2 * Copyright (c) 1998,1999,2000,2001,2002 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.60.2.24 2003/01/30 07:19:59 sos Exp $
29 * $DragonFly: src/sys/dev/disk/ata/ata-disk.c,v 1.7 2003/08/07 21:16:51 dillon Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
42 #include <sys/devicestat.h>
44 #include <sys/sysctl.h>
45 #include <sys/syslog.h>
48 #include <machine/md_var.h>
49 #include <machine/bus.h>
50 #include <machine/clock.h>
58 /* device structures */
59 static d_open_t adopen;
60 static d_close_t adclose;
61 static d_strategy_t adstrategy;
62 static d_dump_t addump;
64 static struct cdevsw ad_cdevsw = {
74 /* write */ physwrite,
78 /* strategy */ adstrategy,
84 static void ad_invalidatequeue(struct ad_softc *, struct ad_request *);
85 static int ad_tagsupported(struct ad_softc *);
86 static void ad_timeout(struct ad_request *);
87 static void ad_free(struct ad_request *);
88 static int ad_version(u_int16_t);
91 #define AD_MAX_RETRIES 3
94 static u_int32_t adp_lun_map = 0;
95 static int ata_dma = 1;
96 static int ata_wc = 1;
97 static int ata_tags = 0;
98 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
99 TUNABLE_INT("hw.ata.wc", &ata_wc);
100 TUNABLE_INT("hw.ata.tags", &ata_tags);
101 static MALLOC_DEFINE(M_AD, "AD driver", "ATA disk driver");
104 SYSCTL_DECL(_hw_ata);
105 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RD, &ata_dma, 0,
106 "ATA disk DMA mode control");
107 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RD, &ata_wc, 0,
108 "ATA disk write caching");
109 SYSCTL_INT(_hw_ata, OID_AUTO, tags, CTLFLAG_RD, &ata_tags, 0,
110 "ATA disk tagged queuing support");
113 ad_attach(struct ata_device *atadev)
115 struct ad_softc *adp;
118 if (!(adp = malloc(sizeof(struct ad_softc), M_AD, M_NOWAIT | M_ZERO))) {
119 ata_prtdev(atadev, "failed to allocate driver storage\n");
122 adp->device = atadev;
124 adp->lun = (device_get_unit(atadev->channel->dev)<<1)+ATA_DEV(atadev->unit);
126 adp->lun = ata_get_lun(&adp_lun_map);
128 ata_set_name(atadev, "ad", adp->lun);
129 adp->heads = atadev->param->heads;
130 adp->sectors = atadev->param->sectors;
131 adp->total_secs = atadev->param->cylinders * adp->heads * adp->sectors;
132 bufq_init(&adp->queue);
134 /* does this device need oldstyle CHS addressing */
135 if (!ad_version(atadev->param->version_major) ||
136 !(atadev->param->atavalid & ATA_FLAG_54_58) || !atadev->param->lba_size)
137 adp->flags |= AD_F_CHS_USED;
139 /* use the 28bit LBA size if valid */
140 if (atadev->param->cylinders == 16383 &&
141 adp->total_secs < atadev->param->lba_size)
142 adp->total_secs = atadev->param->lba_size;
144 /* use the 48bit LBA size if valid */
145 if (atadev->param->support.address48 &&
146 atadev->param->lba_size48 > 268435455)
147 adp->total_secs = atadev->param->lba_size48;
149 ATA_SLEEPLOCK_CH(atadev->channel, ATA_CONTROL);
150 /* use multiple sectors/interrupt if device supports it */
151 adp->transfersize = DEV_BSIZE;
152 if (ad_version(atadev->param->version_major)) {
153 int secsperint = max(1, min(atadev->param->sectors_intr, 16));
155 if (!ata_command(atadev, ATA_C_SET_MULTI, 0, secsperint,
156 0, ATA_WAIT_INTR) && !ata_wait(atadev, 0))
157 adp->transfersize *= secsperint;
160 /* enable read caching if not default on device */
161 if (ata_command(atadev, ATA_C_SETFEATURES,
162 0, 0, ATA_C_F_ENAB_RCACHE, ATA_WAIT_INTR))
163 ata_prtdev(atadev, "enabling readahead cache failed\n");
165 /* enable write caching if allowed and not default on device */
166 if (ata_wc || (ata_tags && ad_tagsupported(adp))) {
167 if (ata_command(atadev, ATA_C_SETFEATURES,
168 0, 0, ATA_C_F_ENAB_WCACHE, ATA_WAIT_INTR))
169 ata_prtdev(atadev, "enabling write cache failed\n");
172 if (ata_command(atadev, ATA_C_SETFEATURES,
173 0, 0, ATA_C_F_DIS_WCACHE, ATA_WAIT_INTR))
174 ata_prtdev(atadev, "disabling write cache failed\n");
177 /* use DMA if allowed and if drive/controller supports it */
179 ata_dmainit(atadev->channel, atadev->unit, ata_pmode(atadev->param),
180 ata_wmode(atadev->param), ata_umode(atadev->param));
182 ata_dmainit(atadev->channel, atadev->unit,
183 ata_pmode(atadev->param), -1, -1);
185 /* use tagged queueing if allowed and supported */
186 if (ata_tags && ad_tagsupported(adp)) {
187 adp->num_tags = atadev->param->queuelen;
188 adp->flags |= AD_F_TAG_ENABLED;
189 adp->device->channel->flags |= ATA_QUEUED;
190 if (ata_command(atadev, ATA_C_SETFEATURES,
191 0, 0, ATA_C_F_DIS_RELIRQ, ATA_WAIT_INTR))
192 ata_prtdev(atadev, "disabling release interrupt failed\n");
193 if (ata_command(atadev, ATA_C_SETFEATURES,
194 0, 0, ATA_C_F_DIS_SRVIRQ, ATA_WAIT_INTR))
195 ata_prtdev(atadev, "disabling service interrupt failed\n");
198 ATA_UNLOCK_CH(atadev->channel);
200 devstat_add_entry(&adp->stats, "ad", adp->lun, DEV_BSIZE,
201 DEVSTAT_NO_ORDERED_TAGS,
202 DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE,
203 DEVSTAT_PRIORITY_DISK);
205 dev = disk_create(adp->lun, &adp->disk, 0, &ad_cdevsw);
207 dev->si_iosize_max = 256 * DEV_BSIZE;
210 /* construct the disklabel */
211 bzero(&adp->disk.d_label, sizeof(struct disklabel));
212 adp->disk.d_label.d_secsize = DEV_BSIZE;
213 adp->disk.d_label.d_nsectors = adp->sectors;
214 adp->disk.d_label.d_ntracks = adp->heads;
215 adp->disk.d_label.d_ncylinders = adp->total_secs/(adp->heads*adp->sectors);
216 adp->disk.d_label.d_secpercyl = adp->sectors * adp->heads;
217 adp->disk.d_label.d_secperunit = adp->total_secs;
219 atadev->driver = adp;
222 /* if this disk belongs to an ATA RAID dont print the probe */
223 if (ata_raiddisk_attach(adp))
224 adp->flags |= AD_F_RAID_SUBDISK;
226 if (atadev->driver) {
228 ata_enclosure_print(atadev);
234 ad_detach(struct ata_device *atadev, int flush) /* get rid of flush XXX SOS */
236 struct ad_softc *adp = atadev->driver;
237 struct ad_request *request;
240 atadev->flags |= ATA_D_DETACHING;
241 ata_prtdev(atadev, "removed from configuration\n");
242 ad_invalidatequeue(adp, NULL);
243 TAILQ_FOREACH(request, &atadev->channel->ata_queue, chain) {
244 if (request->softc != adp)
246 TAILQ_REMOVE(&atadev->channel->ata_queue, request, chain);
247 request->bp->b_error = ENXIO;
248 request->bp->b_flags |= B_ERROR;
249 biodone(request->bp);
252 while ((bp = bufq_first(&adp->queue))) {
253 bufq_remove(&adp->queue, bp);
255 bp->b_flags |= B_ERROR;
258 disk_invalidate(&adp->disk);
259 devstat_remove_entry(&adp->stats);
260 disk_destroy(&adp->disk);
262 if (ata_command(atadev, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
263 ata_prtdev(atadev, "flushing cache on detach failed\n");
265 if (adp->flags & AD_F_RAID_SUBDISK)
266 ata_raiddisk_detach(adp);
267 ata_free_name(atadev);
268 ata_free_lun(&adp_lun_map, adp->lun);
269 atadev->driver = NULL;
275 adopen(dev_t dev, int flags, int fmt, struct thread *td)
277 struct ad_softc *adp = dev->si_drv1;
279 if (adp->flags & AD_F_RAID_SUBDISK)
285 adclose(dev_t dev, int flags, int fmt, struct thread *td)
287 struct ad_softc *adp = dev->si_drv1;
289 ATA_SLEEPLOCK_CH(adp->device->channel, ATA_CONTROL);
290 if (ata_command(adp->device, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
291 ata_prtdev(adp->device, "flushing cache on close failed\n");
292 ATA_UNLOCK_CH(adp->device->channel);
297 adstrategy(struct buf *bp)
299 struct ad_softc *adp = bp->b_dev->si_drv1;
302 if (adp->device->flags & ATA_D_DETACHING) {
304 bp->b_flags |= B_ERROR;
309 bufqdisksort(&adp->queue, bp);
311 ata_start(adp->device->channel);
317 struct ad_softc *adp = dev->si_drv1;
318 struct ad_request request;
319 u_int count, blkno, secsize;
320 vm_offset_t addr = 0;
322 int dumppages = MAXDUMPPGS;
326 if ((error = disk_dumpcheck(dev, &count, &blkno, &secsize)))
332 /* force PIO mode for dumps */
333 adp->device->mode = ATA_PIO;
334 ata_reinit(adp->device->channel);
336 blkcnt = howmany(PAGE_SIZE, secsize);
342 if ((count / blkcnt) < dumppages)
343 dumppages = count / blkcnt;
345 for (i = 0; i < dumppages; ++i) {
346 vm_offset_t a = addr + (i * PAGE_SIZE);
347 if (is_physical_memory(a))
348 va = pmap_kenter_temporary(trunc_page(a), i);
350 va = pmap_kenter_temporary(trunc_page(0), i);
353 bzero(&request, sizeof(struct ad_request));
355 request.blockaddr = blkno;
356 request.bytecount = PAGE_SIZE * dumppages;
359 while (request.bytecount > 0) {
360 ad_transfer(&request);
361 if (request.flags & ADR_F_ERROR)
363 request.donecount += request.currentsize;
364 request.bytecount -= request.currentsize;
368 if (dumpstatus(addr, (off_t)count * DEV_BSIZE) < 0)
371 blkno += blkcnt * dumppages;
372 count -= blkcnt * dumppages;
373 addr += PAGE_SIZE * dumppages;
376 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DSC) < 0)
377 ata_prtdev(adp->device, "timeout waiting for final ready\n");
382 ad_start(struct ata_device *atadev)
384 struct ad_softc *adp = atadev->driver;
385 struct buf *bp = bufq_first(&adp->queue);
386 struct ad_request *request;
392 /* if tagged queueing enabled get next free tag */
393 if (adp->flags & AD_F_TAG_ENABLED) {
394 while (tag <= adp->num_tags && adp->tags[tag])
396 if (tag > adp->num_tags )
400 if (!(request = malloc(sizeof(struct ad_request), M_AD, M_NOWAIT|M_ZERO))) {
401 ata_prtdev(atadev, "out of memory in start\n");
406 request->softc = adp;
408 request->blockaddr = bp->b_pblkno;
409 request->bytecount = bp->b_bcount;
410 request->data = bp->b_data;
412 if (bp->b_flags & B_READ)
413 request->flags |= ADR_F_READ;
414 if (adp->device->mode >= ATA_DMA) {
415 if (!(request->dmatab = ata_dmaalloc(atadev->channel, atadev->unit)))
416 adp->device->mode = ATA_PIO;
419 /* insert in tag array */
420 adp->tags[tag] = request;
422 /* remove from drive queue */
423 bufq_remove(&adp->queue, bp);
425 /* link onto controller queue */
426 TAILQ_INSERT_TAIL(&atadev->channel->ata_queue, request, chain);
430 ad_transfer(struct ad_request *request)
432 struct ad_softc *adp;
434 u_int32_t count, max_count;
436 int flags = ATA_IMMEDIATE;
438 /* get request params */
439 adp = request->softc;
441 /* calculate transfer details */
442 lba = request->blockaddr + (request->donecount / DEV_BSIZE);
444 if (request->donecount == 0) {
446 /* start timeout for this transfer */
448 request->timeout_handle.callout = NULL;
450 request->timeout_handle =
451 timeout((timeout_t*)ad_timeout, request, 10 * hz);
453 /* setup transfer parameters */
454 count = howmany(request->bytecount, DEV_BSIZE);
455 max_count = adp->device->param->support.address48 ? 65536 : 256;
456 if (count > max_count) {
457 ata_prtdev(adp->device,
458 "count %d size transfers not supported\n", count);
462 if (adp->flags & AD_F_CHS_USED) {
463 int sector = (lba % adp->sectors) + 1;
464 int cylinder = lba / (adp->sectors * adp->heads);
465 int head = (lba % (adp->sectors * adp->heads)) / adp->sectors;
467 lba = (sector&0xff) | ((cylinder&0xffff)<<8) | ((head&0xf)<<24);
468 adp->device->flags |= ATA_D_USE_CHS;
471 /* setup first transfer length */
472 request->currentsize = min(request->bytecount, adp->transfersize);
474 devstat_start_transaction(&adp->stats);
476 /* does this drive & transfer work with DMA ? */
477 request->flags &= ~ADR_F_DMA_USED;
478 if (adp->device->mode >= ATA_DMA &&
479 !ata_dmasetup(adp->device->channel, adp->device->unit,
480 request->dmatab, request->data, request->bytecount)) {
481 request->flags |= ADR_F_DMA_USED;
482 request->currentsize = request->bytecount;
484 /* do we have tags enabled ? */
485 if (adp->flags & AD_F_TAG_ENABLED) {
486 cmd = (request->flags & ADR_F_READ) ?
487 ATA_C_READ_DMA_QUEUED : ATA_C_WRITE_DMA_QUEUED;
489 if (ata_command(adp->device, cmd, lba,
490 request->tag << 3, count, flags)) {
491 ata_prtdev(adp->device, "error executing command");
492 goto transfer_failed;
494 if (ata_wait(adp->device, ATA_S_READY)) {
495 ata_prtdev(adp->device, "timeout waiting for READY\n");
496 goto transfer_failed;
500 /* if ATA bus RELEASE check for SERVICE */
501 if (adp->flags & AD_F_TAG_ENABLED &&
502 ATA_INB(adp->device->channel->r_io, ATA_IREASON) &
504 return ad_service(adp, 1);
507 cmd = (request->flags & ADR_F_READ) ?
508 ATA_C_READ_DMA : ATA_C_WRITE_DMA;
510 if (ata_command(adp->device, cmd, lba, count, 0, flags)) {
511 ata_prtdev(adp->device, "error executing command");
512 goto transfer_failed;
516 * wait for data transfer phase
518 * well this should be here acording to specs, but older
519 * promise controllers doesn't like it, they lockup!
521 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
522 ata_prtdev(adp->device, "timeout waiting for data phase\n");
523 goto transfer_failed;
528 /* start transfer, return and wait for interrupt */
529 ata_dmastart(adp->device->channel, adp->device->unit,
530 request->dmatab, request->flags & ADR_F_READ);
531 return ATA_OP_CONTINUES;
534 /* does this drive support multi sector transfers ? */
535 if (request->currentsize > DEV_BSIZE)
536 cmd = request->flags&ADR_F_READ ? ATA_C_READ_MUL : ATA_C_WRITE_MUL;
538 /* just plain old single sector transfer */
540 cmd = request->flags&ADR_F_READ ? ATA_C_READ : ATA_C_WRITE;
542 if (ata_command(adp->device, cmd, lba, count, 0, flags)){
543 ata_prtdev(adp->device, "error executing command");
544 goto transfer_failed;
548 /* calculate this transfer length */
549 request->currentsize = min(request->bytecount, adp->transfersize);
551 /* if this is a PIO read operation, return and wait for interrupt */
552 if (request->flags & ADR_F_READ)
553 return ATA_OP_CONTINUES;
555 /* ready to write PIO data ? */
556 if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
557 ata_prtdev(adp->device, "timeout waiting for DRQ");
558 goto transfer_failed;
561 /* output the data */
562 if (adp->device->channel->flags & ATA_USE_16BIT)
563 ATA_OUTSW(adp->device->channel->r_io, ATA_DATA,
564 (void *)((uintptr_t)request->data + request->donecount),
565 request->currentsize / sizeof(int16_t));
567 ATA_OUTSL(adp->device->channel->r_io, ATA_DATA,
568 (void *)((uintptr_t)request->data + request->donecount),
569 request->currentsize / sizeof(int32_t));
570 return ATA_OP_CONTINUES;
573 untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
574 ad_invalidatequeue(adp, request);
575 printf(" - resetting\n");
577 /* if retries still permit, reinject this request */
578 if (request->retries++ < AD_MAX_RETRIES)
579 TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
581 /* retries all used up, return error */
582 request->bp->b_error = EIO;
583 request->bp->b_flags |= B_ERROR;
584 request->bp->b_resid = request->bytecount;
585 devstat_end_transaction_buf(&adp->stats, request->bp);
586 biodone(request->bp);
589 ata_reinit(adp->device->channel);
590 return ATA_OP_CONTINUES;
594 ad_interrupt(struct ad_request *request)
596 struct ad_softc *adp = request->softc;
599 /* finish DMA transfer */
600 if (request->flags & ADR_F_DMA_USED)
601 dma_stat = ata_dmadone(adp->device->channel);
603 /* do we have a corrected soft error ? */
604 if (adp->device->channel->status & ATA_S_CORR)
605 diskerr(request->bp, "soft error (ECC corrected)", LOG_PRINTF,
606 request->blockaddr + (request->donecount / DEV_BSIZE),
609 /* did any real errors happen ? */
610 if ((adp->device->channel->status & ATA_S_ERROR) ||
611 (request->flags & ADR_F_DMA_USED && dma_stat & ATA_BMSTAT_ERROR)) {
612 adp->device->channel->error =
613 ATA_INB(adp->device->channel->r_io, ATA_ERROR);
614 diskerr(request->bp, (adp->device->channel->error & ATA_E_ICRC) ?
615 "UDMA ICRC error" : "hard error", LOG_PRINTF,
616 request->blockaddr + (request->donecount / DEV_BSIZE),
619 /* if this is a UDMA CRC error, reinject request */
620 if (request->flags & ADR_F_DMA_USED &&
621 adp->device->channel->error & ATA_E_ICRC) {
622 untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
623 ad_invalidatequeue(adp, request);
625 if (request->retries++ < AD_MAX_RETRIES)
626 printf(" retrying\n");
628 ata_dmainit(adp->device->channel, adp->device->unit,
629 ata_pmode(adp->device->param), -1, -1);
630 printf(" falling back to PIO mode\n");
632 TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
633 return ATA_OP_FINISHED;
636 /* if using DMA, try once again in PIO mode */
637 if (request->flags & ADR_F_DMA_USED) {
638 untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
639 ad_invalidatequeue(adp, request);
640 ata_dmainit(adp->device->channel, adp->device->unit,
641 ata_pmode(adp->device->param), -1, -1);
642 request->flags |= ADR_F_FORCE_PIO;
643 printf(" trying PIO mode\n");
644 TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
645 return ATA_OP_FINISHED;
648 request->flags |= ADR_F_ERROR;
649 printf(" status=%02x error=%02x\n",
650 adp->device->channel->status, adp->device->channel->error);
653 /* if we arrived here with forced PIO mode, DMA doesn't work right */
654 if (request->flags & ADR_F_FORCE_PIO && !(request->flags & ADR_F_ERROR))
655 ata_prtdev(adp->device, "DMA problem fallback to PIO mode\n");
657 /* if this was a PIO read operation, get the data */
658 if (!(request->flags & ADR_F_DMA_USED) &&
659 (request->flags & (ADR_F_READ | ADR_F_ERROR)) == ADR_F_READ) {
661 /* ready to receive data? */
662 if ((adp->device->channel->status & (ATA_S_READY|ATA_S_DSC|ATA_S_DRQ))
663 != (ATA_S_READY|ATA_S_DSC|ATA_S_DRQ))
664 ata_prtdev(adp->device, "read interrupt arrived early");
666 if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) != 0) {
667 ata_prtdev(adp->device, "read error detected (too) late");
668 request->flags |= ADR_F_ERROR;
671 /* data ready, read in */
672 if (adp->device->channel->flags & ATA_USE_16BIT)
673 ATA_INSW(adp->device->channel->r_io, ATA_DATA,
674 (void*)((uintptr_t)request->data + request->donecount),
675 request->currentsize / sizeof(int16_t));
677 ATA_INSL(adp->device->channel->r_io, ATA_DATA,
678 (void*)((uintptr_t)request->data + request->donecount),
679 request->currentsize / sizeof(int32_t));
683 /* finish up transfer */
684 if (request->flags & ADR_F_ERROR) {
685 request->bp->b_error = EIO;
686 request->bp->b_flags |= B_ERROR;
689 request->bytecount -= request->currentsize;
690 request->donecount += request->currentsize;
691 if (request->bytecount > 0) {
692 ad_transfer(request);
693 return ATA_OP_CONTINUES;
697 /* disarm timeout for this transfer */
698 untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
700 request->bp->b_resid = request->bytecount;
702 devstat_end_transaction_buf(&adp->stats, request->bp);
703 biodone(request->bp);
707 /* check for SERVICE (tagged operations only) */
708 return ad_service(adp, 1);
712 ad_service(struct ad_softc *adp, int change)
714 /* do we have to check the other device on this channel ? */
715 if (adp->device->channel->flags & ATA_QUEUED && change) {
716 int device = adp->device->unit;
718 if (adp->device->unit == ATA_MASTER) {
719 if ((adp->device->channel->devices & ATA_ATA_SLAVE) &&
720 (adp->device->channel->device[SLAVE].driver) &&
721 ((struct ad_softc *) (adp->device->channel->
722 device[SLAVE].driver))->flags & AD_F_TAG_ENABLED)
726 if ((adp->device->channel->devices & ATA_ATA_MASTER) &&
727 (adp->device->channel->device[MASTER].driver) &&
728 ((struct ad_softc *) (adp->device->channel->
729 device[MASTER].driver))->flags & AD_F_TAG_ENABLED)
732 if (device != adp->device->unit &&
734 (adp->device->channel->
735 device[ATA_DEV(device)].driver))->outstanding > 0) {
736 ATA_OUTB(adp->device->channel->r_io, ATA_DRIVE, ATA_D_IBM | device);
737 adp = adp->device->channel->device[ATA_DEV(device)].driver;
741 adp->device->channel->status =
742 ATA_INB(adp->device->channel->r_altio, ATA_ALTSTAT);
744 /* do we have a SERVICE request from the drive ? */
745 if (adp->flags & AD_F_TAG_ENABLED &&
746 adp->outstanding > 0 &&
747 adp->device->channel->status & ATA_S_SERVICE) {
748 struct ad_request *request;
751 /* check for error */
752 if (adp->device->channel->status & ATA_S_ERROR) {
753 ata_prtdev(adp->device, "Oops! controller says s=0x%02x e=0x%02x\n",
754 adp->device->channel->status,
755 adp->device->channel->error);
756 ad_invalidatequeue(adp, NULL);
757 return ATA_OP_FINISHED;
760 /* issue SERVICE cmd */
761 if (ata_command(adp->device, ATA_C_SERVICE, 0, 0, 0, ATA_IMMEDIATE)) {
762 ata_prtdev(adp->device, "problem executing SERVICE cmd\n");
763 ad_invalidatequeue(adp, NULL);
764 return ATA_OP_FINISHED;
767 /* setup the transfer environment when ready */
768 if (ata_wait(adp->device, ATA_S_READY)) {
769 ata_prtdev(adp->device, "SERVICE timeout tag=%d s=%02x e=%02x\n",
770 ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3,
771 adp->device->channel->status,
772 adp->device->channel->error);
773 ad_invalidatequeue(adp, NULL);
774 return ATA_OP_FINISHED;
776 tag = ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3;
777 if (!(request = adp->tags[tag])) {
778 ata_prtdev(adp->device, "no request for tag=%d\n", tag);
779 ad_invalidatequeue(adp, NULL);
780 return ATA_OP_FINISHED;
782 ATA_FORCELOCK_CH(adp->device->channel, ATA_ACTIVE_ATA);
783 adp->device->channel->running = request;
786 /* start DMA transfer when ready */
787 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
788 ata_prtdev(adp->device, "timeout starting DMA s=%02x e=%02x\n",
789 adp->device->channel->status,
790 adp->device->channel->error);
791 ad_invalidatequeue(adp, NULL);
792 return ATA_OP_FINISHED;
794 ata_dmastart(adp->device->channel, adp->device->unit,
795 request->dmatab, request->flags & ADR_F_READ);
796 return ATA_OP_CONTINUES;
798 return ATA_OP_FINISHED;
802 ad_free(struct ad_request *request)
807 free(request->dmatab, M_DEVBUF);
808 request->softc->tags[request->tag] = NULL;
814 ad_invalidatequeue(struct ad_softc *adp, struct ad_request *request)
816 /* if tags used invalidate all other tagged transfers */
817 if (adp->flags & AD_F_TAG_ENABLED) {
818 struct ad_request *tmpreq;
821 ata_prtdev(adp->device, "invalidating queued requests\n");
822 for (tag = 0; tag <= adp->num_tags; tag++) {
823 tmpreq = adp->tags[tag];
824 adp->tags[tag] = NULL;
825 if (tmpreq == request || tmpreq == NULL)
827 untimeout((timeout_t *)ad_timeout, tmpreq, tmpreq->timeout_handle);
828 TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, tmpreq, chain);
830 if (ata_command(adp->device, ATA_C_NOP,
831 0, 0, ATA_C_F_FLUSHQUEUE, ATA_WAIT_READY))
832 ata_prtdev(adp->device, "flush queue failed\n");
833 adp->outstanding = 0;
838 ad_tagsupported(struct ad_softc *adp)
840 const char *good[] = {"IBM-DPTA", "IBM-DTLA", NULL};
843 switch (adp->device->channel->chiptype) {
844 case 0x4d33105a: /* Promises before TX2 doesn't work with tagged queuing */
851 /* check that drive does DMA, has tags enabled, and is one we know works */
852 if (adp->device->mode >= ATA_DMA && adp->device->param->support.queued &&
853 adp->device->param->enabled.queued) {
854 while (good[i] != NULL) {
855 if (!strncmp(adp->device->param->model, good[i], strlen(good[i])))
860 * check IBM's new obscure way of naming drives
861 * we want "IC" (IBM CORP) and "AT" or "AV" (ATA interface)
862 * but doesn't care about the other info (size, capacity etc)
864 if (!strncmp(adp->device->param->model, "IC", 2) &&
865 (!strncmp(adp->device->param->model + 8, "AT", 2) ||
866 !strncmp(adp->device->param->model + 8, "AV", 2)))
873 ad_timeout(struct ad_request *request)
875 struct ad_softc *adp = request->softc;
877 adp->device->channel->running = NULL;
878 ata_prtdev(adp->device, "%s command timeout tag=%d serv=%d - resetting\n",
879 (request->flags & ADR_F_READ) ? "READ" : "WRITE",
880 request->tag, request->serv);
882 if (request->flags & ADR_F_DMA_USED) {
883 ata_dmadone(adp->device->channel);
884 ad_invalidatequeue(adp, request);
885 if (request->retries == AD_MAX_RETRIES) {
886 ata_dmainit(adp->device->channel, adp->device->unit,
887 ata_pmode(adp->device->param), -1, -1);
888 ata_prtdev(adp->device, "trying fallback to PIO mode\n");
889 request->retries = 0;
893 /* if retries still permit, reinject this request */
894 if (request->retries++ < AD_MAX_RETRIES) {
895 TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
898 /* retries all used up, return error */
899 request->bp->b_error = EIO;
900 request->bp->b_flags |= B_ERROR;
901 devstat_end_transaction_buf(&adp->stats, request->bp);
902 biodone(request->bp);
905 ata_reinit(adp->device->channel);
909 ad_reinit(struct ata_device *atadev)
911 struct ad_softc *adp = atadev->driver;
913 /* reinit disk parameters */
914 ad_invalidatequeue(atadev->driver, NULL);
915 ata_command(atadev, ATA_C_SET_MULTI, 0,
916 adp->transfersize / DEV_BSIZE, 0, ATA_WAIT_READY);
917 if (adp->device->mode >= ATA_DMA)
918 ata_dmainit(atadev->channel, atadev->unit,
919 ata_pmode(adp->device->param),
920 ata_wmode(adp->device->param),
921 ata_umode(adp->device->param));
923 ata_dmainit(atadev->channel, atadev->unit,
924 ata_pmode(adp->device->param), -1, -1);
928 ad_print(struct ad_softc *adp)
931 ata_prtdev(adp->device, "<%.40s/%.8s> ATA-%d disk at ata%d-%s\n",
932 adp->device->param->model, adp->device->param->revision,
933 ad_version(adp->device->param->version_major),
934 device_get_unit(adp->device->channel->dev),
935 (adp->device->unit == ATA_MASTER) ? "master" : "slave");
937 ata_prtdev(adp->device,
938 "%lluMB (%llu sectors), %llu C, %u H, %u S, %u B\n",
939 (unsigned long long)(adp->total_secs /
940 ((1024L*1024L)/DEV_BSIZE)),
941 (unsigned long long) adp->total_secs,
942 (unsigned long long) (adp->total_secs /
943 (adp->heads * adp->sectors)),
944 adp->heads, adp->sectors, DEV_BSIZE);
946 ata_prtdev(adp->device, "%d secs/int, %d depth queue, %s%s\n",
947 adp->transfersize / DEV_BSIZE, adp->num_tags + 1,
948 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
949 ata_mode2str(adp->device->mode));
951 ata_prtdev(adp->device, "piomode=%d dmamode=%d udmamode=%d cblid=%d\n",
952 ata_pmode(adp->device->param), ata_wmode(adp->device->param),
953 ata_umode(adp->device->param),
954 adp->device->param->hwres_cblid);
958 ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
959 (unsigned long long)(adp->total_secs /
960 ((1024L * 1024L) / DEV_BSIZE)),
961 adp->device->param->model,
962 (unsigned long long)(adp->total_secs /
963 (adp->heads*adp->sectors)),
964 adp->heads, adp->sectors,
965 device_get_unit(adp->device->channel->dev),
966 (adp->device->unit == ATA_MASTER) ? "master" : "slave",
967 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
968 ata_mode2str(adp->device->mode));
972 ad_version(u_int16_t version)
976 if (version == 0xffff)
978 for (bit = 15; bit >= 0; bit--)
979 if (version & (1<<bit))