2 * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/ata/ata-queue.c,v 1.67 2007/01/27 21:15:58 remko Exp $
27 * $DragonFly: src/sys/dev/disk/nata/ata-queue.c,v 1.11 2008/09/23 17:43:41 dillon Exp $
32 #include <sys/param.h>
34 #include <sys/callout.h>
36 #include <sys/queue.h>
37 #include <sys/spinlock2.h>
39 #include <sys/systm.h>
40 #include <sys/taskqueue.h>
46 static void ata_completed(void *, int);
47 static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request);
48 static void atawritereorder(struct ata_channel *ch);
49 static char *ata_skey2str(u_int8_t);
52 ata_queue_init(struct ata_channel *ch)
54 TAILQ_INIT(&ch->ata_queue);
56 ch->transition = NULL;
60 ata_queue_request(struct ata_request *request)
62 struct ata_channel *ch;
64 /* treat request as virgin (this might be an ATA_R_REQUEUE) */
65 request->result = request->status = request->error = 0;
67 /* check that that the device is still valid */
68 if (!(request->parent = device_get_parent(request->dev))) {
69 request->result = ENXIO;
70 if (request->callback)
71 (request->callback)(request);
74 ch = device_get_softc(request->parent);
75 callout_init_mp(&request->callout); /* serialization done via state_mtx */
76 if (!request->callback && !(request->flags & ATA_R_REQUEUE))
77 spin_init(&request->done);
79 /* in ATA_STALL_QUEUE state we call HW directly */
80 if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
81 spin_lock_wr(&ch->state_mtx);
82 ch->running = request;
83 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
85 if (!request->callback)
86 spin_uninit(&request->done);
87 spin_unlock_wr(&ch->state_mtx);
90 /* interlock against interrupt */
91 request->flags |= ATA_R_HWCMDQUEUED;
92 spin_unlock_wr(&ch->state_mtx);
94 /* otherwise put request on the locked queue at the specified location */
96 spin_lock_wr(&ch->queue_mtx);
97 if (request->flags & ATA_R_AT_HEAD) {
98 TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain);
99 } else if (request->flags & ATA_R_ORDERED) {
100 ata_sort_queue(ch, request);
102 TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
103 ch->transition = NULL;
105 spin_unlock_wr(&ch->queue_mtx);
106 ATA_DEBUG_RQ(request, "queued");
110 /* if this is a requeued request callback/sleep we're done */
111 if (request->flags & ATA_R_REQUEUE)
114 /* if this is not a callback wait until request is completed */
115 if (!request->callback) {
116 ATA_DEBUG_RQ(request, "wait for completion");
118 /* interlock against wakeup */
119 spin_lock_wr(&request->done);
120 /* check if the request was completed already */
121 if (!(request->flags & ATA_R_COMPLETED))
122 ssleep(request, &request->done, 0, "ATA request completion "
123 "wait", request->timeout * hz * 4);
124 spin_unlock_wr(&request->done);
125 /* check if the request was completed while sleeping */
126 if (!(request->flags & ATA_R_COMPLETED)) {
128 device_printf(request->dev, "WARNING - %s taskqueue timeout - "
129 "completing request directly\n",
130 ata_cmd2str(request));
131 request->flags |= ATA_R_DANGER1;
132 ata_completed(request, 0);
135 spin_uninit(&request->done);
140 ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature,
141 u_int64_t lba, u_int16_t count)
143 struct ata_request *request = ata_alloc_request();
148 request->u.ata.command = command;
149 request->u.ata.lba = lba;
150 request->u.ata.count = count;
151 request->u.ata.feature = feature;
152 request->flags = ATA_R_CONTROL;
153 request->timeout = 1;
154 request->retries = 0;
155 ata_queue_request(request);
156 error = request->result;
157 ata_free_request(request);
163 ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data,
164 int count, int flags, int timeout)
166 struct ata_request *request = ata_alloc_request();
167 struct ata_device *atadev = device_get_softc(dev);
172 if ((atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12)
173 bcopy(ccb, request->u.atapi.ccb, 12);
175 bcopy(ccb, request->u.atapi.ccb, 16);
176 request->data = data;
177 request->bytecount = count;
178 request->transfersize = min(request->bytecount, 65534);
179 request->flags = flags | ATA_R_ATAPI;
180 request->timeout = timeout;
181 request->retries = 0;
182 ata_queue_request(request);
183 error = request->result;
184 ata_free_request(request);
190 ata_start(device_t dev)
192 struct ata_channel *ch = device_get_softc(dev);
193 struct ata_request *request;
194 struct ata_composite *cptr;
195 int dependencies = 0;
197 /* if we have a request on the queue try to get it running */
198 spin_lock_wr(&ch->queue_mtx);
199 if ((request = TAILQ_FIRST(&ch->ata_queue))) {
201 /* we need the locking function to get the lock for this channel */
202 if (ATA_LOCKING(dev, ATA_LF_LOCK) == ch->unit) {
204 /* check for composite dependencies */
205 if ((cptr = request->composite)) {
206 spin_lock_wr(&cptr->lock);
207 if ((request->flags & ATA_R_WRITE) &&
208 (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) {
211 spin_unlock_wr(&cptr->lock);
214 /* check we are in the right state and has no dependencies */
215 spin_lock_wr(&ch->state_mtx);
216 if (ch->state == ATA_IDLE && !dependencies) {
217 ATA_DEBUG_RQ(request, "starting");
219 if (ch->transition == request)
220 ch->transition = TAILQ_NEXT(request, chain);
221 TAILQ_REMOVE(&ch->ata_queue, request, chain);
222 ch->running = request;
223 ch->state = ATA_ACTIVE;
225 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
227 ch->state = ATA_IDLE;
228 spin_unlock_wr(&ch->state_mtx);
229 spin_unlock_wr(&ch->queue_mtx);
230 ATA_LOCKING(dev, ATA_LF_UNLOCK);
235 /* interlock against interrupt */
236 request->flags |= ATA_R_HWCMDQUEUED;
239 spin_unlock_wr(&ch->state_mtx);
240 spin_unlock_wr(&ch->queue_mtx);
241 while (!ata_interrupt(ch))
246 spin_unlock_wr(&ch->state_mtx);
249 spin_unlock_wr(&ch->queue_mtx);
253 ata_finish(struct ata_request *request)
255 struct ata_channel *ch = device_get_softc(request->parent);
258 * if in ATA_STALL_QUEUE state or request has ATA_R_DIRECT flags set
259 * we need to call ata_complete() directly here (no taskqueue involvement)
262 (ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) {
263 ATA_DEBUG_RQ(request, "finish directly");
264 ata_completed(request, 0);
267 /* put request on the proper taskqueue for completion */
268 /* XXX FreeBSD has some sort of bio_taskqueue code here */
269 TASK_INIT(&request->task, 0, ata_completed, request);
270 ATA_DEBUG_RQ(request, "finish taskqueue_swi");
271 taskqueue_enqueue(taskqueue_swi, &request->task);
276 ata_completed(void *context, int dummy)
278 struct ata_request *request = (struct ata_request *)context;
279 struct ata_channel *ch = device_get_softc(request->parent);
280 struct ata_device *atadev = device_get_softc(request->dev);
281 struct ata_composite *composite;
283 if (request->flags & ATA_R_DANGER2) {
284 device_printf(request->dev,
285 "WARNING - %s freeing taskqueue zombie request\n",
286 ata_cmd2str(request));
287 request->flags &= ~(ATA_R_DANGER1 | ATA_R_DANGER2);
288 ata_free_request(request);
291 if (request->flags & ATA_R_DANGER1)
292 request->flags |= ATA_R_DANGER2;
294 ATA_DEBUG_RQ(request, "completed entered");
296 /* if we had a timeout, reinit channel and deal with the falldown */
297 if (request->flags & ATA_R_TIMEOUT) {
299 * if the channel is still present and
300 * reinit succeeds and
301 * the device doesn't get detached and
302 * there are retries left we reinject this request
304 if (ch && !ata_reinit(ch->dev) && !request->result &&
305 (request->retries-- > 0)) {
306 if (!(request->flags & ATA_R_QUIET)) {
307 device_printf(request->dev,
308 "TIMEOUT - %s retrying (%d retr%s left)",
309 ata_cmd2str(request), request->retries,
310 request->retries == 1 ? "y" : "ies");
311 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
312 kprintf(" LBA=%ju", request->u.ata.lba);
315 request->flags &= ~(ATA_R_TIMEOUT | ATA_R_DEBUG);
316 request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
317 ATA_DEBUG_RQ(request, "completed reinject");
318 ata_queue_request(request);
322 /* ran out of good intentions so finish with error */
323 if (!request->result) {
324 if (!(request->flags & ATA_R_QUIET)) {
326 device_printf(request->dev, "FAILURE - %s timed out",
327 ata_cmd2str(request));
328 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
329 kprintf(" LBA=%ju", request->u.ata.lba);
333 request->result = EIO;
336 else if (!(request->flags & ATA_R_ATAPI) ){
337 /* if this is a soft ECC error warn about it */
338 /* XXX SOS we could do WARF here */
339 if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) {
340 device_printf(request->dev,
341 "WARNING - %s soft error (ECC corrected)",
342 ata_cmd2str(request));
343 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
344 kprintf(" LBA=%ju", request->u.ata.lba);
348 /* if this is a UDMA CRC error we reinject if there are retries left */
349 if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) {
350 if (request->retries-- > 0) {
351 device_printf(request->dev,
352 "WARNING - %s UDMA ICRC error (retrying request)",
353 ata_cmd2str(request));
354 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
355 kprintf(" LBA=%ju", request->u.ata.lba);
357 request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
358 ata_queue_request(request);
364 switch (request->flags & ATA_R_ATAPI) {
368 if (!request->result && request->status & ATA_S_ERROR) {
369 if (!(request->flags & ATA_R_QUIET)) {
370 device_printf(request->dev,
371 "FAILURE - %s status=%b error=%b",
372 ata_cmd2str(request),
373 request->status, "\20\10BUSY\7READY\6DMA_READY"
374 "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR",
375 request->error, "\20\10ICRC\7UNCORRECTABLE"
376 "\6MEDIA_CHANGED\5NID_NOT_FOUND"
377 "\4MEDIA_CHANGE_REQEST"
378 "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH");
379 if ((request->flags & ATA_R_DMA) &&
380 (request->dmastat & ATA_BMSTAT_ERROR))
381 kprintf(" dma=0x%02x", request->dmastat);
382 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
383 kprintf(" LBA=%ju", request->u.ata.lba);
386 request->result = EIO;
392 /* skip if result already set */
396 /* if we have a sensekey -> request sense from device */
397 if ((request->error & ATA_E_ATAPI_SENSE_MASK) &&
398 (request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE)) {
399 static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0,
400 sizeof(struct atapi_sense),
401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
403 request->u.atapi.saved_cmd = request->u.atapi.ccb[0];
404 bcopy(ccb, request->u.atapi.ccb, 16);
405 request->data = (caddr_t)&request->u.atapi.sense;
406 request->bytecount = sizeof(struct atapi_sense);
407 request->donecount = 0;
408 request->transfersize = sizeof(struct atapi_sense);
409 request->timeout = ATA_DEFAULT_TIMEOUT;
410 request->flags &= (ATA_R_ATAPI | ATA_R_QUIET);
411 request->flags |= (ATA_R_READ | ATA_R_AT_HEAD | ATA_R_REQUEUE);
412 ATA_DEBUG_RQ(request, "autoissue request sense");
413 ata_queue_request(request);
417 switch (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK) {
418 case ATA_SENSE_RECOVERED_ERROR:
419 device_printf(request->dev, "WARNING - %s recovered error\n",
420 ata_cmd2str(request));
423 case ATA_SENSE_NO_SENSE:
427 case ATA_SENSE_NOT_READY:
428 request->result = EBUSY;
431 case ATA_SENSE_UNIT_ATTENTION:
432 atadev->flags |= ATA_D_MEDIA_CHANGED;
433 request->result = EIO;
437 request->result = EIO;
438 if (request->flags & ATA_R_QUIET)
441 device_printf(request->dev,
442 "FAILURE - %s %s asc=0x%02x ascq=0x%02x ",
443 ata_cmd2str(request), ata_skey2str(
444 (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK)),
445 request->u.atapi.sense.asc,
446 request->u.atapi.sense.ascq);
447 if (request->u.atapi.sense.specific & ATA_SENSE_SPEC_VALID)
448 kprintf("sks=0x%02x 0x%02x 0x%02x\n",
449 request->u.atapi.sense.specific & ATA_SENSE_SPEC_MASK,
450 request->u.atapi.sense.specific1,
451 request->u.atapi.sense.specific2);
456 if ((request->u.atapi.sense.key & ATA_SENSE_KEY_MASK ?
457 request->u.atapi.sense.key & ATA_SENSE_KEY_MASK :
459 request->result = EIO;
462 ATA_DEBUG_RQ(request, "completed callback/wakeup");
464 /* if we are part of a composite operation we need to maintain progress */
465 if ((composite = request->composite)) {
468 spin_lock_wr(&composite->lock);
470 /* update whats done */
471 if (request->flags & ATA_R_READ)
472 composite->rd_done |= (1 << request->this);
473 if (request->flags & ATA_R_WRITE)
474 composite->wr_done |= (1 << request->this);
476 /* find ready to go dependencies */
477 if (composite->wr_depend &&
478 (composite->rd_done & composite->wr_depend)==composite->wr_depend &&
479 (composite->wr_needed & (~composite->wr_done))) {
480 index = composite->wr_needed & ~composite->wr_done;
483 spin_unlock_wr(&composite->lock);
485 /* if we have any ready candidates kick them off */
489 for (bit = 0; bit < MAX_COMPOSITES; bit++) {
490 if (index & (1 << bit))
491 ata_start(device_get_parent(composite->request[bit]->dev));
496 /* get results back to the initiator for this request */
497 if (request->callback)
498 (request->callback)(request);
500 spin_lock_wr(&request->done);
501 request->flags |= ATA_R_COMPLETED;
502 spin_unlock_wr(&request->done);
506 /* only call ata_start if channel is present */
512 ata_timeout(struct ata_request *request)
514 struct ata_channel *ch = device_get_softc(request->parent);
516 /* acquire state_mtx, softclock_handler() doesn't do this for us */
517 spin_lock_wr(&ch->state_mtx);
519 /*request->flags |= ATA_R_DEBUG;*/
520 ATA_DEBUG_RQ(request, "timeout");
523 * if we have an ATA_ACTIVE request running, we flag the request
524 * ATA_R_TIMEOUT so ata_finish will handle it correctly
525 * also NULL out the running request so we wont loose
526 * the race with an eventual interrupt arriving late
528 if (ch->state == ATA_ACTIVE) {
529 request->flags |= ATA_R_TIMEOUT;
530 spin_unlock_wr(&ch->state_mtx);
531 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
535 spin_unlock_wr(&ch->state_mtx);
540 ata_fail_requests(device_t dev)
542 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
543 struct ata_request *request, *tmp;
544 TAILQ_HEAD(, ata_request) fail_requests;
545 TAILQ_INIT(&fail_requests);
547 /* grap all channel locks to avoid races */
548 spin_lock_wr(&ch->queue_mtx);
549 spin_lock_wr(&ch->state_mtx);
551 /* do we have any running request to care about ? */
552 if ((request = ch->running) && (!dev || request->dev == dev)) {
553 callout_stop(&request->callout);
555 request->result = ENXIO;
556 TAILQ_INSERT_TAIL(&fail_requests, request, chain);
559 /* fail all requests queued on this channel for device dev if !NULL */
560 TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
561 if (!dev || request->dev == dev) {
562 if (ch->transition == request)
563 ch->transition = TAILQ_NEXT(request, chain);
564 TAILQ_REMOVE(&ch->ata_queue, request, chain);
565 request->result = ENXIO;
566 TAILQ_INSERT_TAIL(&fail_requests, request, chain);
570 spin_unlock_wr(&ch->state_mtx);
571 spin_unlock_wr(&ch->queue_mtx);
573 /* finish up all requests collected above */
574 TAILQ_FOREACH_MUTABLE(request, &fail_requests, chain, tmp) {
575 TAILQ_REMOVE(&fail_requests, request, chain);
581 ata_get_lba(struct ata_request *request)
583 if (request->flags & ATA_R_ATAPI) {
584 switch (request->u.atapi.ccb[0]) {
586 case ATAPI_WRITE_BIG:
588 return (request->u.atapi.ccb[5]) | (request->u.atapi.ccb[4]<<8) |
589 (request->u.atapi.ccb[3]<<16)|(request->u.atapi.ccb[2]<<24);
592 return (request->u.atapi.ccb[4]) | (request->u.atapi.ccb[3]<<8) |
593 (request->u.atapi.ccb[2]<<16);
599 return request->u.ata.lba;
603 * This implements exactly bioqdisksort() in the DragonFly kernel.
604 * The short description is: Because megabytes and megabytes worth of
605 * writes can be queued there needs to be a read-prioritization mechanism
606 * or reads get completely starved out.
609 ata_sort_queue(struct ata_channel *ch, struct ata_request *request)
611 if ((request->flags & ATA_R_WRITE) == 0) {
612 if (ch->transition) {
614 * Insert before the first write
616 TAILQ_INSERT_BEFORE(ch->transition, request, chain);
617 if (++ch->reorder >= bioq_reorder_minor_interval) {
623 * No writes queued (or ordering was forced),
626 TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
630 * Writes are always appended. If no writes were previously
631 * queued or an ordered tail insertion occured the transition
632 * field will be NULL.
634 TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
635 if (ch->transition == NULL)
636 ch->transition = request;
638 if (request->composite) {
639 ch->transition = NULL;
645 * Move the transition point to prevent reads from completely
646 * starving our writes. This brings a number of writes into
647 * the fold every N reads.
650 atawritereorder(struct ata_channel *ch)
652 struct ata_request *req;
653 u_int64_t next_offset;
654 size_t left = (size_t)bioq_reorder_minor_bytes;
657 next_offset = ata_get_lba(ch->transition);
658 while ((req = ch->transition) != NULL &&
659 next_offset == ata_get_lba(req)) {
660 n = req->u.ata.count;
661 next_offset = ata_get_lba(req);
662 ch->transition = TAILQ_NEXT(req, chain);
670 ata_cmd2str(struct ata_request *request)
672 static char buffer[20];
674 if (request->flags & ATA_R_ATAPI) {
675 switch (request->u.atapi.sense.key ?
676 request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) {
677 case 0x00: return ("TEST_UNIT_READY");
678 case 0x01: return ("REZERO");
679 case 0x03: return ("REQUEST_SENSE");
680 case 0x04: return ("FORMAT");
681 case 0x08: return ("READ");
682 case 0x0a: return ("WRITE");
683 case 0x10: return ("WEOF");
684 case 0x11: return ("SPACE");
685 case 0x12: return ("INQUIRY");
686 case 0x15: return ("MODE_SELECT");
687 case 0x19: return ("ERASE");
688 case 0x1a: return ("MODE_SENSE");
689 case 0x1b: return ("START_STOP");
690 case 0x1e: return ("PREVENT_ALLOW");
691 case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES");
692 case 0x25: return ("READ_CAPACITY");
693 case 0x28: return ("READ_BIG");
694 case 0x2a: return ("WRITE_BIG");
695 case 0x2b: return ("LOCATE");
696 case 0x34: return ("READ_POSITION");
697 case 0x35: return ("SYNCHRONIZE_CACHE");
698 case 0x3b: return ("WRITE_BUFFER");
699 case 0x3c: return ("READ_BUFFER");
700 case 0x42: return ("READ_SUBCHANNEL");
701 case 0x43: return ("READ_TOC");
702 case 0x45: return ("PLAY_10");
703 case 0x47: return ("PLAY_MSF");
704 case 0x48: return ("PLAY_TRACK");
705 case 0x4b: return ("PAUSE");
706 case 0x51: return ("READ_DISK_INFO");
707 case 0x52: return ("READ_TRACK_INFO");
708 case 0x53: return ("RESERVE_TRACK");
709 case 0x54: return ("SEND_OPC_INFO");
710 case 0x55: return ("MODE_SELECT_BIG");
711 case 0x58: return ("REPAIR_TRACK");
712 case 0x59: return ("READ_MASTER_CUE");
713 case 0x5a: return ("MODE_SENSE_BIG");
714 case 0x5b: return ("CLOSE_TRACK/SESSION");
715 case 0x5c: return ("READ_BUFFER_CAPACITY");
716 case 0x5d: return ("SEND_CUE_SHEET");
717 case 0x96: return ("READ_CAPACITY_16");
718 case 0xa1: return ("BLANK_CMD");
719 case 0xa3: return ("SEND_KEY");
720 case 0xa4: return ("REPORT_KEY");
721 case 0xa5: return ("PLAY_12");
722 case 0xa6: return ("LOAD_UNLOAD");
723 case 0xad: return ("READ_DVD_STRUCTURE");
724 case 0xb4: return ("PLAY_CD");
725 case 0xbb: return ("SET_SPEED");
726 case 0xbd: return ("MECH_STATUS");
727 case 0xbe: return ("READ_CD");
728 case 0xff: return ("POLL_DSC");
732 switch (request->u.ata.command) {
733 case 0x00: return ("NOP");
734 case 0x08: return ("DEVICE_RESET");
735 case 0x20: return ("READ");
736 case 0x24: return ("READ48");
737 case 0x25: return ("READ_DMA48");
738 case 0x26: return ("READ_DMA_QUEUED48");
739 case 0x29: return ("READ_MUL48");
740 case 0x30: return ("WRITE");
741 case 0x34: return ("WRITE48");
742 case 0x35: return ("WRITE_DMA48");
743 case 0x36: return ("WRITE_DMA_QUEUED48");
744 case 0x39: return ("WRITE_MUL48");
745 case 0x70: return ("SEEK");
746 case 0xa0: return ("PACKET_CMD");
747 case 0xa1: return ("ATAPI_IDENTIFY");
748 case 0xa2: return ("SERVICE");
749 case 0xb0: return ("SMART");
750 case 0xc0: return ("CFA ERASE");
751 case 0xc4: return ("READ_MUL");
752 case 0xc5: return ("WRITE_MUL");
753 case 0xc6: return ("SET_MULTI");
754 case 0xc7: return ("READ_DMA_QUEUED");
755 case 0xc8: return ("READ_DMA");
756 case 0xca: return ("WRITE_DMA");
757 case 0xcc: return ("WRITE_DMA_QUEUED");
758 case 0xe6: return ("SLEEP");
759 case 0xe7: return ("FLUSHCACHE");
760 case 0xea: return ("FLUSHCACHE48");
761 case 0xec: return ("ATA_IDENTIFY");
763 switch (request->u.ata.feature) {
764 case 0x03: return ("SETFEATURES SET TRANSFER MODE");
765 case 0x02: return ("SETFEATURES ENABLE WCACHE");
766 case 0x82: return ("SETFEATURES DISABLE WCACHE");
767 case 0xaa: return ("SETFEATURES ENABLE RCACHE");
768 case 0x55: return ("SETFEATURES DISABLE RCACHE");
770 ksprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature);
774 ksprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command);
779 ata_skey2str(u_int8_t skey)
782 case 0x00: return ("NO SENSE");
783 case 0x01: return ("RECOVERED ERROR");
784 case 0x02: return ("NOT READY");
785 case 0x03: return ("MEDIUM ERROR");
786 case 0x04: return ("HARDWARE ERROR");
787 case 0x05: return ("ILLEGAL REQUEST");
788 case 0x06: return ("UNIT ATTENTION");
789 case 0x07: return ("DATA PROTECT");
790 case 0x08: return ("BLANK CHECK");
791 case 0x09: return ("VENDOR SPECIFIC");
792 case 0x0a: return ("COPY ABORTED");
793 case 0x0b: return ("ABORTED COMMAND");
794 case 0x0c: return ("EQUAL");
795 case 0x0d: return ("VOLUME OVERFLOW");
796 case 0x0e: return ("MISCOMPARE");
797 case 0x0f: return ("RESERVED");
798 default: return("UNKNOWN");