2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/mly/mly_pci.c,v 1.1.2.2 2001/03/05 20:17:24 msmith Exp $
28 * $DragonFly: src/sys/dev/raid/mly/Attic/mly_pci.c,v 1.5 2005/06/10 17:10:26 swildner Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
38 #include <sys/devicestat.h>
41 #include <machine/bus_memio.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
45 #include <sys/thread2.h>
47 #include <bus/pci/pcireg.h>
48 #include <bus/pci/pcivar.h>
54 static int mly_pci_probe(device_t dev);
55 static int mly_pci_attach(device_t dev);
56 static int mly_pci_detach(device_t dev);
57 static int mly_pci_shutdown(device_t dev);
58 static int mly_pci_suspend(device_t dev);
59 static int mly_pci_resume(device_t dev);
60 static void mly_pci_intr(void *arg);
62 static int mly_sg_map(struct mly_softc *sc);
63 static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
64 static int mly_mmbox_map(struct mly_softc *sc);
65 static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
67 static device_method_t mly_methods[] = {
68 /* Device interface */
69 DEVMETHOD(device_probe, mly_pci_probe),
70 DEVMETHOD(device_attach, mly_pci_attach),
71 DEVMETHOD(device_detach, mly_pci_detach),
72 DEVMETHOD(device_shutdown, mly_pci_shutdown),
73 DEVMETHOD(device_suspend, mly_pci_suspend),
74 DEVMETHOD(device_resume, mly_pci_resume),
78 static driver_t mly_pci_driver = {
81 sizeof(struct mly_softc)
84 static devclass_t mly_devclass;
85 DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0);
95 } mly_identifiers[] = {
96 {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"},
97 {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"},
98 {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"},
99 {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"},
100 {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"},
104 /********************************************************************************
105 ********************************************************************************
107 ********************************************************************************
108 ********************************************************************************/
111 mly_pci_probe(device_t dev)
117 for (m = mly_identifiers; m->vendor != 0; m++) {
118 if ((m->vendor == pci_get_vendor(dev)) &&
119 (m->device == pci_get_device(dev)) &&
120 ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) &&
121 (m->subdevice == pci_get_subdevice(dev))))) {
123 device_set_desc(dev, m->desc);
124 return(-10); /* allow room to be overridden */
131 mly_pci_attach(device_t dev)
133 struct mly_softc *sc;
142 sc = device_get_softc(dev);
143 bzero(sc, sizeof(*sc));
147 if (device_get_unit(sc->mly_dev) == 0)
151 /* assume failure is 'not configured' */
155 * Verify that the adapter is correctly set up in PCI space.
157 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
158 command |= PCIM_CMD_BUSMASTEREN;
159 pci_write_config(dev, PCIR_COMMAND, command, 2);
160 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
161 if (!(command & PCIM_CMD_BUSMASTEREN)) {
162 mly_printf(sc, "can't enable busmaster feature\n");
165 if ((command & PCIM_CMD_MEMEN) == 0) {
166 mly_printf(sc, "memory window not available\n");
171 * Allocate the PCI register window.
173 sc->mly_regs_rid = PCIR_MAPS; /* first base address register */
174 if ((sc->mly_regs_resource = bus_alloc_resource(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid,
175 0, ~0, 1, RF_ACTIVE)) == NULL) {
176 mly_printf(sc, "can't allocate register window\n");
179 sc->mly_btag = rman_get_bustag(sc->mly_regs_resource);
180 sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource);
183 * Allocate and connect our interrupt.
186 if ((sc->mly_irq = bus_alloc_resource(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid,
187 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
188 mly_printf(sc, "can't allocate interrupt\n");
191 error = bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM,
192 mly_pci_intr, sc, &sc->mly_intr, NULL);
194 mly_printf(sc, "can't set up interrupt\n");
198 /* assume failure is 'out of memory' */
202 * Allocate the parent bus DMA tag appropriate for our PCI interface.
204 * Note that all of these controllers are 64-bit capable.
206 if (bus_dma_tag_create(NULL, /* parent */
207 1, 0, /* alignment, boundary */
208 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
209 BUS_SPACE_MAXADDR, /* highaddr */
210 NULL, NULL, /* filter, filterarg */
211 MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
212 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
213 BUS_DMA_ALLOCNOW, /* flags */
214 &sc->mly_parent_dmat)) {
215 mly_printf(sc, "can't allocate parent DMA tag\n");
220 * Create DMA tag for mapping buffers into controller-addressable space.
222 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
223 1, 0, /* alignment, boundary */
224 BUS_SPACE_MAXADDR, /* lowaddr */
225 BUS_SPACE_MAXADDR, /* highaddr */
226 NULL, NULL, /* filter, filterarg */
227 MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
228 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
230 &sc->mly_buffer_dmat)) {
231 mly_printf(sc, "can't allocate buffer DMA tag\n");
236 * Initialise the DMA tag for command packets.
238 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
239 1, 0, /* alignment, boundary */
240 BUS_SPACE_MAXADDR, /* lowaddr */
241 BUS_SPACE_MAXADDR, /* highaddr */
242 NULL, NULL, /* filter, filterarg */
243 sizeof(union mly_command_packet) * MLY_MAXCOMMANDS, 1, /* maxsize, nsegments */
244 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
246 &sc->mly_packet_dmat)) {
247 mly_printf(sc, "can't allocate command packet DMA tag\n");
252 * Detect the hardware interface version
254 for (i = 0; mly_identifiers[i].vendor != 0; i++) {
255 if ((mly_identifiers[i].vendor == pci_get_vendor(dev)) &&
256 (mly_identifiers[i].device == pci_get_device(dev))) {
257 sc->mly_hwif = mly_identifiers[i].hwif;
258 switch(sc->mly_hwif) {
259 case MLY_HWIF_I960RX:
260 debug(2, "set hardware up for i960RX");
261 sc->mly_doorbell_true = 0x00;
262 sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX;
263 sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
264 sc->mly_idbr = MLY_I960RX_IDBR;
265 sc->mly_odbr = MLY_I960RX_ODBR;
266 sc->mly_error_status = MLY_I960RX_ERROR_STATUS;
267 sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
268 sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
270 case MLY_HWIF_STRONGARM:
271 debug(2, "set hardware up for StrongARM");
272 sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */
273 sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
274 sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
275 sc->mly_idbr = MLY_STRONGARM_IDBR;
276 sc->mly_odbr = MLY_STRONGARM_ODBR;
277 sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
278 sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
279 sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
287 * Create the scatter/gather mappings.
289 if ((error = mly_sg_map(sc)))
293 * Allocate and map the memory mailbox
295 if ((error = mly_mmbox_map(sc)))
299 * Do bus-independent initialisation.
301 if ((error = mly_attach(sc)))
311 /********************************************************************************
312 * Disconnect from the controller completely, in preparation for unload.
315 mly_pci_detach(device_t dev)
317 struct mly_softc *sc = device_get_softc(dev);
322 if (sc->mly_state & MLY_STATE_OPEN)
325 if ((error = mly_pci_shutdown(dev)))
333 /********************************************************************************
334 * Bring the controller down to a dormant state and detach all child devices.
336 * This function is called before detach or system shutdown.
338 * Note that we can assume that the camq on the controller is empty, as we won't
339 * allow shutdown if any device is open.
342 mly_pci_shutdown(device_t dev)
344 struct mly_softc *sc = device_get_softc(dev);
352 /********************************************************************************
353 * Bring the controller to a quiescent state, ready for system suspend.
355 * We can't assume that the controller is not active at this point, so we need
356 * to mask interrupts.
359 mly_pci_suspend(device_t dev)
361 struct mly_softc *sc = device_get_softc(dev);
370 /********************************************************************************
371 * Bring the controller back to a state ready for operation.
374 mly_pci_resume(device_t dev)
376 struct mly_softc *sc = device_get_softc(dev);
379 sc->mly_state &= ~MLY_STATE_SUSPEND;
380 MLY_UNMASK_INTERRUPTS(sc);
384 /*******************************************************************************
385 * Take an interrupt, or be poked by other code to look for interrupt-worthy
389 mly_pci_intr(void *arg)
391 struct mly_softc *sc = (struct mly_softc *)arg;
395 /* collect finished commands, queue anything waiting */
399 /********************************************************************************
400 ********************************************************************************
401 Bus-dependant Resource Management
402 ********************************************************************************
403 ********************************************************************************/
405 /********************************************************************************
406 * Allocate memory for the scatter/gather tables
409 mly_sg_map(struct mly_softc *sc)
416 * Create a single tag describing a region large enough to hold all of
417 * the s/g lists we will need.
419 segsize = sizeof(struct mly_sg_entry) * MLY_MAXCOMMANDS * MLY_MAXSGENTRIES;
420 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
421 1, 0, /* alignment, boundary */
422 BUS_SPACE_MAXADDR, /* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 NULL, NULL, /* filter, filterarg */
425 segsize, 1, /* maxsize, nsegments */
426 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
429 mly_printf(sc, "can't allocate scatter/gather DMA tag\n");
434 * Allocate enough s/g maps for all commands and permanently map them into
435 * controller-visible space.
437 * XXX this assumes we can get enough space for all the s/g maps in one
440 if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) {
441 mly_printf(sc, "can't allocate s/g table\n");
444 bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, 0);
448 /********************************************************************************
449 * Save the physical address of the base of the s/g table.
452 mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
454 struct mly_softc *sc = (struct mly_softc *)arg;
458 /* save base of s/g table's address in bus space */
459 sc->mly_sg_busaddr = segs->ds_addr;
462 /********************************************************************************
463 * Allocate memory for the memory-mailbox interface
466 mly_mmbox_map(struct mly_softc *sc)
470 * Create a DMA tag for a single contiguous region large enough for the
471 * memory mailbox structure.
473 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
474 1, 0, /* alignment, boundary */
475 BUS_SPACE_MAXADDR, /* lowaddr */
476 BUS_SPACE_MAXADDR, /* highaddr */
477 NULL, NULL, /* filter, filterarg */
478 sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */
479 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
481 &sc->mly_mmbox_dmat)) {
482 mly_printf(sc, "can't allocate memory mailbox DMA tag\n");
487 * Allocate the buffer
489 if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) {
490 mly_printf(sc, "can't allocate memory mailbox\n");
493 bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox),
494 mly_mmbox_map_helper, sc, 0);
495 bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox));
500 /********************************************************************************
501 * Save the physical address of the memory mailbox
504 mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
506 struct mly_softc *sc = (struct mly_softc *)arg;
510 sc->mly_mmbox_busaddr = segs->ds_addr;
513 /********************************************************************************
514 * Free all of the resources associated with (sc)
516 * Should not be called if the controller is active.
519 mly_free(struct mly_softc *sc)
521 struct mly_command *mc;
525 /* detach from CAM */
528 /* throw away command buffer DMA maps */
529 while (mly_alloc_command(sc, &mc) == 0)
530 bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap);
532 /* release the packet storage */
533 if (sc->mly_packet != NULL) {
534 bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap);
535 bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap);
538 /* throw away the controllerinfo structure */
539 if (sc->mly_controllerinfo != NULL)
540 free(sc->mly_controllerinfo, M_DEVBUF);
542 /* throw away the controllerparam structure */
543 if (sc->mly_controllerparam != NULL)
544 free(sc->mly_controllerparam, M_DEVBUF);
546 /* destroy data-transfer DMA tag */
547 if (sc->mly_buffer_dmat)
548 bus_dma_tag_destroy(sc->mly_buffer_dmat);
550 /* free and destroy DMA memory and tag for s/g lists */
551 if (sc->mly_sg_table) {
552 bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap);
553 bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap);
556 bus_dma_tag_destroy(sc->mly_sg_dmat);
558 /* free and destroy DMA memory and tag for memory mailbox */
560 bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap);
561 bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap);
563 if (sc->mly_mmbox_dmat)
564 bus_dma_tag_destroy(sc->mly_mmbox_dmat);
566 /* disconnect the interrupt handler */
568 bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr);
569 if (sc->mly_irq != NULL)
570 bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq);
572 /* destroy the parent DMA tag */
573 if (sc->mly_parent_dmat)
574 bus_dma_tag_destroy(sc->mly_parent_dmat);
576 /* release the register window mapping */
577 if (sc->mly_regs_resource != NULL)
578 bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource);