2 * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/ata/ata-pci.c,v 1.121 2007/02/23 12:18:33 piso Exp $
31 #include <sys/param.h>
33 #include <sys/bus_resource.h>
34 #include <sys/module.h>
37 #include <sys/systm.h>
38 #include <sys/machintr.h>
40 #include <bus/pci/pcireg.h>
41 #include <bus/pci/pcivar.h>
48 #define IOMASK 0xfffffffc
49 #define ATA_PROBE_OK -10
51 static const struct none_atapci {
56 } none_atapci_table[] = {
57 /* Appears on Intel PRO/1000 PM */
58 { ATA_INTEL_ID, 0x108d, ATA_INTEL_ID, 0x0000 },
63 * generic PCI ATA device probe
66 ata_pci_probe(device_t dev)
68 if (resource_disabled("atapci", device_get_unit(dev)))
71 if (pci_get_class(dev) != PCIC_STORAGE)
74 /* if this is an AHCI chipset grab it */
75 if (pci_get_subclass(dev) == PCIS_STORAGE_SATA) {
76 if (!ata_ahci_ident(dev))
80 /* run through the vendor specific drivers */
81 switch (pci_get_vendor(dev)) {
83 if (!ata_acard_ident(dev))
86 case ATA_ACER_LABS_ID:
87 if (!ata_ali_ident(dev))
91 if (!ata_adaptec_ident(dev))
95 if (!ata_amd_ident(dev))
99 if (!ata_ati_ident(dev))
103 if (!ata_cyrix_ident(dev))
107 if (!ata_cypress_ident(dev))
110 case ATA_HIGHPOINT_ID:
111 if (!ata_highpoint_ident(dev))
115 if (!ata_intel_ident(dev))
119 if (!ata_ite_ident(dev))
123 if (!ata_jmicron_ident(dev))
127 if (!ata_marvell_ident(dev))
130 case ATA_NATIONAL_ID:
131 if (!ata_national_ident(dev))
135 if (!ata_netcell_ident(dev))
139 if (!ata_nvidia_ident(dev))
143 if (!ata_promise_ident(dev))
146 case ATA_SERVERWORKS_ID:
147 if (!ata_serverworks_ident(dev))
150 case ATA_SILICON_IMAGE_ID:
151 if (!ata_sii_ident(dev))
155 if (!ata_sis_ident(dev))
159 if (!ata_via_ident(dev))
163 if (!ata_cenatek_ident(dev))
167 if (!ata_micron_ident(dev))
172 /* unknown chipset, try generic AHCI or DMA if it seems possible */
173 if (pci_get_subclass(dev) == PCIS_STORAGE_IDE) {
174 uint16_t vendor, device, subvendor, subdevice;
175 const struct none_atapci *e;
177 vendor = pci_get_vendor(dev);
178 device = pci_get_device(dev);
179 subvendor = pci_get_subvendor(dev);
180 subdevice = pci_get_subdevice(dev);
181 for (e = none_atapci_table; e->vendor != 0xffff; ++e) {
182 if (e->vendor == vendor && e->device == device &&
183 e->subvendor == subvendor && e->subdevice == subdevice)
187 if (!ata_generic_ident(dev))
194 ata_pci_attach(device_t dev)
196 struct ata_pci_controller *ctlr = device_get_softc(dev);
200 /* do chipset specific setups only needed once */
201 ctlr->legacy = ata_legacy(dev);
202 if (ctlr->legacy || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK)
206 ctlr->allocate = ata_pci_allocate;
209 /* if needed try to enable busmastering */
210 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
211 if (!(cmd & PCIM_CMD_BUSMASTEREN)) {
212 pci_write_config(dev, PCIR_COMMAND, cmd | PCIM_CMD_BUSMASTEREN, 2);
213 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
216 /* if busmastering mode "stuck" use it */
217 if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) {
218 ctlr->r_type1 = SYS_RES_IOPORT;
219 ctlr->r_rid1 = ATA_BMADDR_RID;
220 ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1,
222 /* Only set a dma init function if the device actually supports it. */
223 ctlr->dmainit = ata_pci_dmainit;
226 if (ctlr->chipinit(dev))
229 /* attach all channels on this controller */
230 for (unit = 0; unit < ctlr->channels; unit++) {
232 if ((unit == 0 || unit == 1) && ctlr->legacy) {
233 device_add_child(dev, "ata", unit);
236 /* XXX TGEN devclass_find_free_unit() implementation */
238 while (freeunit < devclass_get_maxunit(ata_devclass) &&
239 devclass_get_device(ata_devclass, freeunit) != NULL)
242 device_add_child(dev, "ata", freeunit);
244 bus_generic_attach(dev);
249 ata_pci_detach(device_t dev)
251 struct ata_pci_controller *ctlr = device_get_softc(dev);
255 /* detach & delete all children */
256 if (!device_get_children(dev, &children, &nchildren)) {
257 for (i = 0; i < nchildren; i++)
258 device_delete_child(dev, children[i]);
259 kfree(children, M_TEMP);
263 bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle);
264 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ctlr->r_irq);
268 bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2);
272 bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1);
280 ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
281 u_long start, u_long end, u_long count, u_int flags, int cpuid)
283 struct ata_pci_controller *controller = device_get_softc(dev);
284 int unit = ((struct ata_channel *)device_get_softc(child))->unit;
285 struct resource *res = NULL;
288 if (type == SYS_RES_IOPORT) {
291 if (controller->legacy) {
292 start = (unit ? ATA_SECONDARY : ATA_PRIMARY);
294 end = start + count - 1;
296 myrid = PCIR_BAR(0) + (unit << 3);
297 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
298 SYS_RES_IOPORT, &myrid,
299 start, end, count, flags, cpuid);
302 case ATA_CTLADDR_RID:
303 if (controller->legacy) {
304 start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_CTLOFFSET;
305 count = ATA_CTLIOSIZE;
306 end = start + count - 1;
308 myrid = PCIR_BAR(1) + (unit << 3);
309 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
310 SYS_RES_IOPORT, &myrid,
311 start, end, count, flags, cpuid);
315 if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) {
316 if (controller->legacy) {
317 int irq = (unit == 0 ? 14 : 15);
319 cpuid = machintr_legacy_intr_cpuid(irq);
320 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
321 SYS_RES_IRQ, rid, irq, irq, 1, flags,
325 res = controller->r_irq;
331 ata_pci_release_resource(device_t dev, device_t child, int type, int rid,
334 struct ata_pci_controller *controller = device_get_softc(dev);
335 int unit = ((struct ata_channel *)device_get_softc(child))->unit;
337 if (type == SYS_RES_IOPORT) {
340 return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
342 PCIR_BAR(0) + (unit << 3), r);
345 case ATA_CTLADDR_RID:
346 return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
348 PCIR_BAR(1) + (unit << 3), r);
354 if (type == SYS_RES_IRQ) {
355 if (rid != ATA_IRQ_RID)
358 if (controller->legacy) {
359 return BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
360 SYS_RES_IRQ, rid, r);
369 ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq,
370 int flags, driver_intr_t *function, void *argument,
373 struct ata_pci_controller *controller = device_get_softc(dev);
375 if (controller->legacy) {
376 return BUS_SETUP_INTR(device_get_parent(dev), child, irq,
377 flags, function, argument, cookiep, NULL, NULL);
380 struct ata_pci_controller *controller = device_get_softc(dev);
381 int unit = ((struct ata_channel *)device_get_softc(child))->unit;
383 controller->interrupt[unit].function = function;
384 controller->interrupt[unit].argument = argument;
385 *cookiep = controller;
391 ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
394 struct ata_pci_controller *controller = device_get_softc(dev);
396 if (controller->legacy) {
397 return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie);
400 struct ata_pci_controller *controller = device_get_softc(dev);
401 int unit = ((struct ata_channel *)device_get_softc(child))->unit;
403 controller->interrupt[unit].function = NULL;
404 controller->interrupt[unit].argument = NULL;
410 ata_pci_allocate(device_t dev)
412 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
413 struct ata_channel *ch = device_get_softc(dev);
414 struct resource *io = NULL, *ctlio = NULL;
417 rid = ATA_IOADDR_RID;
418 if (!(io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE)))
421 rid = ATA_CTLADDR_RID;
422 if (!(ctlio = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,RF_ACTIVE))){
423 bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io);
427 for (i = ATA_DATA; i <= ATA_COMMAND; i ++) {
428 ch->r_io[i].res = io;
429 ch->r_io[i].offset = i;
431 ch->r_io[ATA_CONTROL].res = ctlio;
432 ch->r_io[ATA_CONTROL].offset = ctlr->legacy ? 0 : 2;
433 ch->r_io[ATA_IDX_ADDR].res = io;
434 ata_default_registers(dev);
436 for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) {
437 ch->r_io[i].res = ctlr->r_res1;
438 ch->r_io[i].offset = (i - ATA_BMCMD_PORT) + (ch->unit*ATA_BMIOSIZE);
447 ata_pci_status(device_t dev)
449 struct ata_pci_controller *controller =
450 device_get_softc(device_get_parent(dev));
451 struct ata_channel *ch = device_get_softc(dev);
453 if ((dumping || !controller->legacy) &&
454 ch->dma && ((ch->flags & ATA_ALWAYS_DMASTAT) ||
455 (ch->dma->flags & ATA_DMA_ACTIVE))) {
456 int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
459 * Strictly speaking the DMA engine should already be stopped
460 * once we receive the interrupt.
461 * However at least ICH controllers seem to have the habbit
462 * of not clearing the active bit even though the interrupt
464 * To make sure we wait a little bit (to make sure that other
465 * buggy systems actually have a chance of finishing their
466 * DMA transaction) and then ignore the active bit.
468 if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) ==
469 (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) {
471 bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
473 if ((bmstat & ATA_BMSTAT_INTERRUPT) == 0)
475 ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR);
478 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) {
480 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY)
487 ata_pci_hw(device_t dev)
489 struct ata_channel *ch = device_get_softc(dev);
492 ch->hw.status = ata_pci_status;
496 ata_pci_dmastart(device_t dev)
498 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
501 ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) |
502 (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR)));
503 ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->sg_bus);
504 ch->dma->flags |= ATA_DMA_ACTIVE;
505 val = ATA_IDX_INB(ch, ATA_BMCMD_PORT);
506 if (ch->dma->flags & ATA_DMA_READ)
507 val |= ATA_BMCMD_WRITE_READ;
509 val &= ~ATA_BMCMD_WRITE_READ;
510 ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, val);
513 * Issue the start command separately from configuration setup,
514 * in case the hardware latches portions of the configuration.
516 ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, val | ATA_BMCMD_START_STOP);
522 ata_pci_dmastop(device_t dev)
524 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
527 ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
528 ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
529 ch->dma->flags &= ~ATA_DMA_ACTIVE;
530 error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
531 ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
536 ata_pci_dmareset(device_t dev)
538 struct ata_channel *ch = device_get_softc(dev);
540 ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
541 ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
542 ch->dma->flags &= ~ATA_DMA_ACTIVE;
543 ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
544 ch->dma->unload(dev);
548 ata_pci_dmainit(device_t dev)
550 struct ata_channel *ch = device_get_softc(dev);
554 ch->dma->start = ata_pci_dmastart;
555 ch->dma->stop = ata_pci_dmastop;
556 ch->dma->reset = ata_pci_dmareset;
561 * misc support fucntions
564 ata_legacy(device_t dev)
566 return (((pci_read_config(dev, PCIR_PROGIF, 1)&PCIP_STORAGE_IDE_MASTERDEV)&&
567 ((pci_read_config(dev, PCIR_PROGIF, 1) &
568 (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC)) !=
569 (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC))) ||
570 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
571 !pci_read_config(dev, PCIR_BAR(1), 4) &&
572 !pci_read_config(dev, PCIR_BAR(2), 4) &&
573 !pci_read_config(dev, PCIR_BAR(3), 4) &&
574 !pci_read_config(dev, PCIR_BAR(5), 4)));
578 ata_generic_intr(void *data)
580 struct ata_pci_controller *ctlr = data;
581 struct ata_channel *ch;
584 for (unit = 0; unit < ctlr->channels; unit++) {
585 if ((ch = ctlr->interrupt[unit].argument))
586 ctlr->interrupt[unit].function(ch);
591 ata_setup_interrupt(device_t dev, void *intr_func)
593 struct ata_pci_controller *ctlr = device_get_softc(dev);
594 int rid = ATA_IRQ_RID;
597 if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
598 RF_SHAREABLE | RF_ACTIVE))) {
599 device_printf(dev, "unable to map interrupt\n");
602 if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS,
603 intr_func, ctlr, &ctlr->handle, NULL))) {
604 device_printf(dev, "unable to setup interrupt\n");
605 bus_release_resource(dev, SYS_RES_IRQ, rid, ctlr->r_irq);
614 ata_teardown_interrupt(device_t dev)
616 struct ata_pci_controller *ctlr = device_get_softc(dev);
620 bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle);
621 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ctlr->r_irq);
628 ata_set_desc(device_t dev)
630 struct ata_pci_controller *ctlr = device_get_softc(dev);
633 ksprintf(buffer, "%s %s %s controller",
634 ata_pcivendor2str(dev), ctlr->chip->text,
635 ata_mode2str(ctlr->chip->max_dma));
636 device_set_desc_copy(dev, buffer);
639 const struct ata_chip_id *
640 ata_match_chip(device_t dev, const struct ata_chip_id *index)
642 while (index->chipid != 0) {
643 if (pci_get_devid(dev) == index->chipid &&
644 pci_get_revid(dev) >= index->chiprev)
651 const struct ata_chip_id *
652 ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot)
657 if (device_get_children(device_get_parent(dev), &children, &nchildren))
660 while (index->chipid != 0) {
661 for (i = 0; i < nchildren; i++) {
662 if (((slot >= 0 && pci_get_slot(children[i]) == slot) ||
663 (slot < 0 && pci_get_slot(children[i]) <= -slot)) &&
664 pci_get_devid(children[i]) == index->chipid &&
665 pci_get_revid(children[i]) >= index->chiprev) {
666 kfree(children, M_TEMP);
672 kfree(children, M_TEMP);
677 ata_check_80pin(device_t dev, int mode)
679 struct ata_device *atadev = device_get_softc(dev);
681 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
682 ata_print_cable(dev, "device");
689 ata_pcivendor2str(device_t dev)
691 switch (pci_get_vendor(dev)) {
692 case ATA_ACARD_ID: return "Acard";
693 case ATA_ACER_LABS_ID: return "AcerLabs";
694 case ATA_AMD_ID: return "AMD";
695 case ATA_ADAPTEC_ID: return "Adaptec";
696 case ATA_ATI_ID: return "ATI";
697 case ATA_CYRIX_ID: return "Cyrix";
698 case ATA_CYPRESS_ID: return "Cypress";
699 case ATA_HIGHPOINT_ID: return "HighPoint";
700 case ATA_INTEL_ID: return "Intel";
701 case ATA_ITE_ID: return "ITE";
702 case ATA_JMICRON_ID: return "JMicron";
703 case ATA_MARVELL_ID: return "Marvell";
704 case ATA_NATIONAL_ID: return "National";
705 case ATA_NETCELL_ID: return "Netcell";
706 case ATA_NVIDIA_ID: return "nVidia";
707 case ATA_PROMISE_ID: return "Promise";
708 case ATA_SERVERWORKS_ID: return "ServerWorks";
709 case ATA_SILICON_IMAGE_ID: return "SiI";
710 case ATA_SIS_ID: return "SiS";
711 case ATA_VIA_ID: return "VIA";
712 case ATA_CENATEK_ID: return "Cenatek";
713 case ATA_MICRON_ID: return "Micron";
714 default: return "Generic";
719 ata_mode2idx(int mode)
721 if ((mode & ATA_DMA_MASK) == ATA_UDMA0)
722 return (mode & ATA_MODE_MASK) + 8;
723 if ((mode & ATA_DMA_MASK) == ATA_WDMA0)
724 return (mode & ATA_MODE_MASK) + 5;
725 return (mode & ATA_MODE_MASK) - ATA_PIO0;
729 static device_method_t ata_pci_methods[] = {
730 /* device interface */
731 DEVMETHOD(device_probe, ata_pci_probe),
732 DEVMETHOD(device_attach, ata_pci_attach),
733 DEVMETHOD(device_detach, ata_pci_detach),
734 DEVMETHOD(device_shutdown, bus_generic_shutdown),
735 DEVMETHOD(device_suspend, bus_generic_suspend),
736 DEVMETHOD(device_resume, bus_generic_resume),
739 DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource),
740 DEVMETHOD(bus_release_resource, ata_pci_release_resource),
741 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
742 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
743 DEVMETHOD(bus_setup_intr, ata_pci_setup_intr),
744 DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr),
749 devclass_t atapci_devclass;
751 static driver_t ata_pci_driver = {
754 sizeof(struct ata_pci_controller),
757 DRIVER_MODULE(atapci, pci, ata_pci_driver, atapci_devclass, NULL, NULL);
758 MODULE_VERSION(atapci, 1);
759 MODULE_DEPEND(atapci, ata, 1, 1, 1);
762 ata_pcichannel_probe(device_t dev)
764 struct ata_channel *ch = device_get_softc(dev);
769 /* take care of green memory */
770 bzero(ch, sizeof(struct ata_channel));
772 /* find channel number on this controller */
773 device_get_children(device_get_parent(dev), &children, &count);
774 for (i = 0; i < count; i++) {
775 if (children[i] == dev)
778 kfree(children, M_TEMP);
780 ksprintf(buffer, "ATA channel %d", ch->unit);
781 device_set_desc_copy(dev, buffer);
783 return ata_probe(dev);
787 ata_pcichannel_attach(device_t dev)
789 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
790 struct ata_channel *ch = device_get_softc(dev);
798 if ((error = ctlr->allocate(dev))) {
804 return ata_attach(dev);
808 ata_pcichannel_detach(device_t dev)
810 struct ata_channel *ch = device_get_softc(dev);
813 if ((error = ata_detach(dev)))
819 /* XXX SOS free resources for io and ctlio ?? */
825 ata_pcichannel_locking(device_t dev, int mode)
827 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
828 struct ata_channel *ch = device_get_softc(dev);
831 return ctlr->locking(dev, mode);
837 ata_pcichannel_reset(device_t dev)
839 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
840 struct ata_channel *ch = device_get_softc(dev);
842 /* if DMA engine present reset it */
846 ch->dma->unload(dev);
849 /* reset the controller HW */
853 ata_generic_reset(dev);
857 ata_pcichannel_setmode(device_t parent, device_t dev)
859 struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev));
860 struct ata_device *atadev = device_get_softc(dev);
861 int mode = atadev->mode;
863 ctlr->setmode(dev, ATA_PIO_MAX);
865 ctlr->setmode(dev, mode);
868 static device_method_t ata_pcichannel_methods[] = {
869 /* device interface */
870 DEVMETHOD(device_probe, ata_pcichannel_probe),
871 DEVMETHOD(device_attach, ata_pcichannel_attach),
872 DEVMETHOD(device_detach, ata_pcichannel_detach),
873 DEVMETHOD(device_shutdown, bus_generic_shutdown),
874 DEVMETHOD(device_suspend, ata_suspend),
875 DEVMETHOD(device_resume, ata_resume),
878 DEVMETHOD(ata_setmode, ata_pcichannel_setmode),
879 DEVMETHOD(ata_locking, ata_pcichannel_locking),
880 DEVMETHOD(ata_reset, ata_pcichannel_reset),
885 driver_t ata_pcichannel_driver = {
887 ata_pcichannel_methods,
888 sizeof(struct ata_channel),
891 DRIVER_MODULE(ata, atapci, ata_pcichannel_driver, ata_devclass, NULL, NULL);