TRIM support
[dragonfly.git] / sys / bus / cam / scsi / scsi_da.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
62ade751 28 * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
984263bc
MD
29 */
30
984263bc 31#include <sys/param.h>
fabb8ceb 32#include <sys/bootmaj.h>
984263bc
MD
33
34#ifdef _KERNEL
684a93c4 35
984263bc
MD
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/buf.h>
39#include <sys/sysctl.h>
62ade751 40#include <sys/taskqueue.h>
1c8b7a9a 41#include <sys/lock.h>
984263bc 42#include <sys/conf.h>
1c8b7a9a 43#include <sys/devicestat.h>
984263bc 44#include <sys/disk.h>
55a78310 45#include <sys/dtype.h>
984263bc
MD
46#include <sys/eventhandler.h>
47#include <sys/malloc.h>
48#include <sys/cons.h>
3020e3be 49#include <sys/proc.h>
e0fb398b 50#include <sys/ioctl_compat.h>
684a93c4 51
3020e3be 52#include <sys/buf2.h>
4e01b467 53#include <sys/thread2.h>
684a93c4
MD
54#include <sys/mplock2.h>
55
1c8b7a9a 56#endif /* _KERNEL */
984263bc 57
05220613 58#ifdef _KERNEL
984263bc 59#include <vm/pmap.h>
05220613 60#endif
984263bc
MD
61
62#ifndef _KERNEL
63#include <stdio.h>
64#include <string.h>
65#endif /* _KERNEL */
66
55230951 67#include <sys/camlib.h>
1f2de5d4
MD
68#include "../cam.h"
69#include "../cam_ccb.h"
70#include "../cam_extend.h"
71#include "../cam_periph.h"
72#include "../cam_xpt_periph.h"
1c8b7a9a 73#include "../cam_sim.h"
984263bc 74
1f2de5d4 75#include "scsi_message.h"
984263bc
MD
76
77#ifndef _KERNEL
1f2de5d4 78#include "scsi_da.h"
984263bc
MD
79#endif /* !_KERNEL */
80
81#ifdef _KERNEL
82typedef enum {
83 DA_STATE_PROBE,
bdd58e03 84 DA_STATE_PROBE2,
984263bc
MD
85 DA_STATE_NORMAL
86} da_state;
87
88typedef enum {
89 DA_FLAG_PACK_INVALID = 0x001,
90 DA_FLAG_NEW_PACK = 0x002,
91 DA_FLAG_PACK_LOCKED = 0x004,
92 DA_FLAG_PACK_REMOVABLE = 0x008,
93 DA_FLAG_TAGGED_QUEUING = 0x010,
94 DA_FLAG_NEED_OTAG = 0x020,
95 DA_FLAG_WENT_IDLE = 0x040,
96 DA_FLAG_RETRY_UA = 0x080,
62ade751 97 DA_FLAG_OPEN = 0x100,
af0aa0ac
MD
98 DA_FLAG_SCTX_INIT = 0x200,
99 DA_FLAG_RD_LIMIT = 0x400,
e0fb398b
T
100 DA_FLAG_WR_LIMIT = 0x800,
101 DA_FLAG_CAN_TRIM = 0x1000
984263bc
MD
102} da_flags;
103
104typedef enum {
105 DA_Q_NONE = 0x00,
106 DA_Q_NO_SYNC_CACHE = 0x01,
62ade751
MD
107 DA_Q_NO_6_BYTE = 0x02,
108 DA_Q_NO_PREVENT = 0x04
984263bc
MD
109} da_quirks;
110
111typedef enum {
112 DA_CCB_PROBE = 0x01,
bdd58e03
MD
113 DA_CCB_PROBE2 = 0x02,
114 DA_CCB_BUFFER_IO = 0x03,
115 DA_CCB_WAITING = 0x04,
116 DA_CCB_DUMP = 0x05,
e0fb398b 117 DA_CCB_TRIM = 0x06,
984263bc
MD
118 DA_CCB_TYPE_MASK = 0x0F,
119 DA_CCB_RETRY_UA = 0x10
120} da_ccb_state;
121
122/* Offsets into our private area for storing information */
123#define ccb_state ppriv_field0
81b5c339 124#define ccb_bio ppriv_ptr1
984263bc
MD
125
126struct disk_params {
127 u_int8_t heads;
bdd58e03 128 u_int32_t cylinders;
984263bc
MD
129 u_int8_t secs_per_track;
130 u_int32_t secsize; /* Number of bytes/sector */
bdd58e03 131 u_int64_t sectors; /* total number sectors */
984263bc
MD
132};
133
e0fb398b
T
134#define TRIM_MAX_BLOCKS 8
135#define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
136struct trim_request {
137 uint8_t data[TRIM_MAX_RANGES * 8];
138 struct bio *bios[TRIM_MAX_RANGES];
139};
140
984263bc 141struct da_softc {
af0aa0ac
MD
142 struct bio_queue_head bio_queue_rd;
143 struct bio_queue_head bio_queue_wr;
e0fb398b 144 struct bio_queue_head bio_queue_trim;
984263bc
MD
145 struct devstat device_stats;
146 SLIST_ENTRY(da_softc) links;
147 LIST_HEAD(, ccb_hdr) pending_ccbs;
148 da_state state;
149 da_flags flags;
150 da_quirks quirks;
151 int minimum_cmd_size;
152 int ordered_tag_count;
af0aa0ac
MD
153 int outstanding_cmds_rd;
154 int outstanding_cmds_wr;
e0fb398b
T
155 int trim_max_ranges;
156 int trim_running;
157 int trim_enabled;
984263bc
MD
158 struct disk_params params;
159 struct disk disk;
160 union ccb saved_ccb;
62ade751
MD
161 struct task sysctl_task;
162 struct sysctl_ctx_list sysctl_ctx;
163 struct sysctl_oid *sysctl_tree;
1c8b7a9a 164 struct callout sendordered_c;
e0fb398b 165 struct trim_request trim_req;
984263bc
MD
166};
167
168struct da_quirk_entry {
169 struct scsi_inquiry_pattern inq_pat;
170 da_quirks quirks;
171};
172
173static const char quantum[] = "QUANTUM";
174static const char microp[] = "MICROP";
175
176static struct da_quirk_entry da_quirk_table[] =
177{
62ade751 178 /* SPI, FC devices */
984263bc
MD
179 {
180 /*
181 * Fujitsu M2513A MO drives.
182 * Tested devices: M2513A2 firmware versions 1200 & 1300.
183 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
184 * Reported by: W.Scholten <whs@xs4all.nl>
185 */
186 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
187 /*quirks*/ DA_Q_NO_SYNC_CACHE
188 },
189 {
190 /* See above. */
191 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
192 /*quirks*/ DA_Q_NO_SYNC_CACHE
193 },
194 {
195 /*
196 * This particular Fujitsu drive doesn't like the
197 * synchronize cache command.
198 * Reported by: Tom Jackson <toj@gorilla.net>
199 */
200 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
201 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
202 },
203 {
204 /*
205 * This drive doesn't like the synchronize cache command
206 * either. Reported by: Matthew Jacob <mjacob@feral.com>
207 * in NetBSD PR kern/6027, August 24, 1998.
208 */
209 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
210 /*quirks*/ DA_Q_NO_SYNC_CACHE
211 },
212 {
213 /*
214 * This drive doesn't like the synchronize cache command
215 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
216 * (PR 8882).
217 */
218 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
219 /*quirks*/ DA_Q_NO_SYNC_CACHE
220 },
221 {
222 /*
223 * Doesn't like the synchronize cache command.
224 * Reported by: Blaz Zupan <blaz@gold.amis.net>
225 */
226 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
227 /*quirks*/ DA_Q_NO_SYNC_CACHE
228 },
229 {
230 /*
231 * Doesn't like the synchronize cache command.
d92d7552 232 * Reported by: Blaz Zupan <blaz@gold.amis.net>
984263bc
MD
233 */
234 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
235 /*quirks*/ DA_Q_NO_SYNC_CACHE
236 },
237 {
238 /*
239 * Doesn't like the synchronize cache command.
240 */
241 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
242 /*quirks*/ DA_Q_NO_SYNC_CACHE
243 },
244 {
245 /*
d92d7552
PA
246 * Doesn't like the synchronize cache command.
247 * Reported by: walter@pelissero.de
248 */
249 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
250 /*quirks*/ DA_Q_NO_SYNC_CACHE
251 },
252 {
253 /*
984263bc
MD
254 * Doesn't work correctly with 6 byte reads/writes.
255 * Returns illegal request, and points to byte 9 of the
256 * 6-byte CDB.
257 * Reported by: Adam McDougall <bsdx@spawnet.com>
258 */
259 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
260 /*quirks*/ DA_Q_NO_6_BYTE
261 },
262 {
62ade751 263 /* See above. */
984263bc
MD
264 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
265 /*quirks*/ DA_Q_NO_6_BYTE
266 },
984263bc
MD
267 {
268 /*
d92d7552
PA
269 * Doesn't like the synchronize cache command.
270 * Reported by: walter@pelissero.de
984263bc 271 */
d92d7552 272 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
62ade751 273 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
274 },
275 {
d92d7552
PA
276 /*
277 * The CISS RAID controllers do not support SYNC_CACHE
278 */
279 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
62ade751
MD
280 /*quirks*/ DA_Q_NO_SYNC_CACHE
281 },
a4a9ba75
SW
282 {
283 /*
284 * The same goes for the mly(4) controllers
285 */
286 {T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
287 /*quirks*/ DA_Q_NO_SYNC_CACHE
288 },
a9453758
MD
289 /*
290 * USB mass storage devices supported by umass(4)
291 *
292 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
293 * it does not have to be specified here.
294 */
d92d7552
PA
295 {
296 /*
297 * Creative Nomad MUVO mp3 player (USB)
298 * PR: kern/53094
299 */
300 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
a9453758 301 /*quirks*/ DA_Q_NO_PREVENT
d92d7552 302 },
654cdffd
JS
303 {
304 /*
d92d7552
PA
305 * Sigmatel USB Flash MP3 Player
306 * PR: kern/57046
984263bc 307 */
d92d7552 308 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
a9453758 309 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
310 },
311 {
312 /*
d92d7552
PA
313 * SEAGRAND NP-900 MP3 Player
314 * PR: kern/64563
984263bc 315 */
d92d7552 316 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
a9453758 317 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
318 },
319 {
320 /*
d92d7552
PA
321 * Creative MUVO Slim mp3 player (USB)
322 * PR: usb/86131
984263bc 323 */
d92d7552 324 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
a9453758 325 "*"}, /*quirks*/ DA_Q_NO_PREVENT
d92d7552
PA
326 },
327 {
328 /*
329 * Philips USB Key Audio KEY013
330 * PR: usb/68412
331 */
332 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
a9453758 333 /*quirks*/ DA_Q_NO_PREVENT
285d490c 334 },
984263bc
MD
335};
336
337static d_open_t daopen;
338static d_close_t daclose;
339static d_strategy_t dastrategy;
984263bc 340static d_dump_t dadump;
e0fb398b 341static d_ioctl_t daioctl;
984263bc
MD
342static periph_init_t dainit;
343static void daasync(void *callback_arg, u_int32_t code,
344 struct cam_path *path, void *arg);
62ade751 345static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
984263bc
MD
346static periph_ctor_t daregister;
347static periph_dtor_t dacleanup;
348static periph_start_t dastart;
349static periph_oninv_t daoninvalidate;
350static void dadone(struct cam_periph *periph,
351 union ccb *done_ccb);
352static int daerror(union ccb *ccb, u_int32_t cam_flags,
353 u_int32_t sense_flags);
354static void daprevent(struct cam_periph *periph, int action);
bdd58e03 355static int dagetcapacity(struct cam_periph *periph);
f7b26992 356static int dacheckmedia(struct cam_periph *periph);
bdd58e03
MD
357static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
358 uint64_t maxsector);
af0aa0ac 359static void daflushbioq(struct bio_queue_head *bioq, int error);
984263bc
MD
360static timeout_t dasendorderedtag;
361static void dashutdown(void *arg, int howto);
362
363#ifndef DA_DEFAULT_TIMEOUT
364#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
365#endif
366
367#ifndef DA_DEFAULT_RETRY
368#define DA_DEFAULT_RETRY 4
369#endif
370
066e560b
PA
371#ifndef DA_DEFAULT_SEND_ORDERED
372#define DA_DEFAULT_SEND_ORDERED 1
373#endif
374
984263bc
MD
375static int da_retry_count = DA_DEFAULT_RETRY;
376static int da_default_timeout = DA_DEFAULT_TIMEOUT;
066e560b 377static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
3690a379 378static struct callout dasendorderedtag_ch;
984263bc 379
984263bc
MD
380SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
381 "CAM Direct Access Disk driver");
382SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
383 &da_retry_count, 0, "Normal I/O retry count");
62ade751 384TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
984263bc
MD
385SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
386 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
62ade751 387TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
066e560b
PA
388SYSCTL_INT(_kern_cam_da, OID_AUTO, da_send_ordered, CTLFLAG_RW,
389 &da_send_ordered, 0, "Send Ordered Tags");
390TUNABLE_INT("kern.cam.da.da_send_ordered", &da_send_ordered);
984263bc
MD
391
392/*
393 * DA_ORDEREDTAG_INTERVAL determines how often, relative
394 * to the default timeout, we check to see whether an ordered
395 * tagged transaction is appropriate to prevent simple tag
396 * starvation. Since we'd like to ensure that there is at least
397 * 1/2 of the timeout length left for a starved transaction to
398 * complete after we've sent an ordered tag, we must poll at least
399 * four times in every timeout period. This takes care of the worst
400 * case where a starved transaction starts during an interval that
401 * meets the requirement "don't send an ordered tag" test so it takes
402 * us two intervals to determine that a tag must be sent.
403 */
404#ifndef DA_ORDEREDTAG_INTERVAL
405#define DA_ORDEREDTAG_INTERVAL 4
406#endif
407
408static struct periph_driver dadriver =
409{
410 dainit, "da",
411 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
412};
413
2ad14cb5 414PERIPHDRIVER_DECLARE(da, dadriver);
984263bc 415
fef8985e 416static struct dev_ops da_ops = {
9f889dc4 417 { "da", DA_CDEV_MAJOR, D_DISK | D_MPSAFE },
fef8985e
MD
418 .d_open = daopen,
419 .d_close = daclose,
420 .d_read = physread,
421 .d_write = physwrite,
fef8985e 422 .d_strategy = dastrategy,
e0fb398b
T
423 .d_dump = dadump,
424 .d_ioctl = daioctl
984263bc
MD
425};
426
984263bc
MD
427static struct extend_array *daperiphs;
428
1c8b7a9a
PA
429MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
430
984263bc 431static int
e0fb398b
T
432daioctl(struct dev_ioctl_args *ap)
433{
434 int unit;
435 int error = 0;
436 struct buf *bp;
437 struct cam_periph *periph;
438 int byte_count;
439 struct da_softc * softc;
440
441 off_t *del_num = (off_t*)ap->a_data;
442 off_t bytes_left;
443 off_t bytes_start;
444
445 cdev_t dev = ap->a_head.a_dev;
446
447
448 unit = dkunit(dev);
449 periph = cam_extend_get(daperiphs, unit);
450 if (periph == NULL)
451 return(ENXIO);
452 softc = (struct da_softc *)periph->softc;
453
454 switch (ap->a_cmd) {
455 case IOCTLTRIM:
456 {
457
458 bytes_left = del_num[1];
459 bytes_start = del_num[0];
460
461 /* TRIM occurs on 512-byte sectors. */
462 KKASSERT((bytes_left % 512) == 0);
463 KKASSERT((bytes_start% 512) == 0);
464
465
466 /* Break TRIM up into int-sized commands because of b_bcount */
467 while(bytes_left) {
468
469 /*
470 * Rather than than squezing out more blocks in b_bcount
471 * and having to break up the TRIM request in da_start(),
472 * we ensure we can always TRIM this many bytes with one
473 * TRIM command (this happens if the device only
474 * supports one TRIM block).
475 *
476 * With min TRIM blksize of 1, TRIM command free
477 * 4194240 blks(64*65535): each LBA range can address
478 * 65535 blks and there 64 such ranges in a 512-byte
479 * block. And, 4194240 * 512 = 0x7FFF8000
480 *
481 */
482 byte_count = MIN(bytes_left,0x7FFF8000);
483 bp = getnewbuf(0,0,0,1);
484
485 bp->b_cmd = BUF_CMD_FREEBLKS;
486 bp->b_bio1.bio_offset = bytes_start;
487 bp->b_bcount = byte_count;
488 bp->b_bio1.bio_flags |= BIO_SYNC;
489 bp->b_bio1.bio_done = biodone_sync;
490
491 dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
492
493 if (biowait(&bp->b_bio1, "TRIM")) {
494 kprintf("Error:%d\n", bp->b_error);
495 return(bp->b_error ? bp->b_error : EIO);
496 }
497 brelse(bp);
498 bytes_left -= byte_count;
499 bytes_start += byte_count;
500 }
501 break;
502 }
503 default:
504 return(EINVAL);
505 }
506
507 return(error);
508}
509
510static int
fef8985e 511daopen(struct dev_open_args *ap)
984263bc 512{
b13267a5 513 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
514 struct cam_periph *periph;
515 struct da_softc *softc;
a688b15c 516 struct disk_info info;
984263bc 517 int unit;
984263bc 518 int error;
984263bc
MD
519
520 unit = dkunit(dev);
984263bc 521 periph = cam_extend_get(daperiphs, unit);
b05e84c9 522 if (periph == NULL) {
984263bc 523 return (ENXIO);
b05e84c9 524 }
984263bc 525
1c8b7a9a
PA
526 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
527 return(ENXIO);
528 }
529
530 cam_periph_lock(periph);
531 if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
532 cam_periph_unlock(periph);
533 cam_periph_release(periph);
534 return (error);
535 }
536
537 unit = periph->unit_number;
984263bc
MD
538 softc = (struct da_softc *)periph->softc;
539
540 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
9ece9268
PA
541 ("daopen: dev=%s (unit %d)\n", devtoname(dev),
542 unit));
984263bc 543
984263bc
MD
544 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
545 /* Invalidate our pack information. */
546 disk_invalidate(&softc->disk);
547 softc->flags &= ~DA_FLAG_PACK_INVALID;
548 }
984263bc 549
f7b26992
MD
550 error = dacheckmedia(periph);
551 softc->flags |= DA_FLAG_OPEN;
984263bc
MD
552
553 if (error == 0) {
554 struct ccb_getdev cgd;
555
a688b15c
MD
556 /* Build disk information structure */
557 bzero(&info, sizeof(info));
558 info.d_type = DTYPE_SCSI;
984263bc
MD
559
560 /*
561 * Grab the inquiry data to get the vendor and product names.
562 * Put them in the typename and packname for the label.
563 */
564 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
565 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
566 xpt_action((union ccb *)&cgd);
567
984263bc
MD
568 /*
569 * Check to see whether or not the blocksize is set yet.
570 * If it isn't, set it and then clear the blocksize
571 * unavailable flag for the device statistics.
572 */
573 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
574 softc->device_stats.block_size = softc->params.secsize;
575 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
576 }
577 }
578
b05e84c9 579 if (error == 0) {
62ade751
MD
580 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
581 (softc->quirks & DA_Q_NO_PREVENT) == 0)
b05e84c9
PA
582 daprevent(periph, PR_PREVENT);
583 } else {
984263bc
MD
584 softc->flags &= ~DA_FLAG_OPEN;
585 cam_periph_release(periph);
586 }
2d19cdd3 587 cam_periph_unhold(periph, 1);
984263bc
MD
588 return (error);
589}
590
591static int
fef8985e 592daclose(struct dev_close_args *ap)
984263bc 593{
b13267a5 594 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
595 struct cam_periph *periph;
596 struct da_softc *softc;
597 int unit;
598 int error;
599
600 unit = dkunit(dev);
601 periph = cam_extend_get(daperiphs, unit);
602 if (periph == NULL)
603 return (ENXIO);
604
1c8b7a9a
PA
605 cam_periph_lock(periph);
606 if ((error = cam_periph_hold(periph, 0)) != 0) {
607 cam_periph_unlock(periph);
608 cam_periph_release(periph);
609 return (error);
984263bc
MD
610 }
611
1c8b7a9a
PA
612 softc = (struct da_softc *)periph->softc;
613
984263bc
MD
614 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
615 union ccb *ccb;
616
617 ccb = cam_periph_getccb(periph, /*priority*/1);
618
619 scsi_synchronize_cache(&ccb->csio,
620 /*retries*/1,
621 /*cbfcnp*/dadone,
622 MSG_SIMPLE_Q_TAG,
623 /*begin_lba*/0,/* Cover the whole disk */
624 /*lb_count*/0,
625 SSD_FULL_SIZE,
626 5 * 60 * 1000);
627
628 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
629 /*sense_flags*/SF_RETRY_UA,
630 &softc->device_stats);
631
632 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
633 if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
634 CAM_SCSI_STATUS_ERROR) {
635 int asc, ascq;
636 int sense_key, error_code;
637
638 scsi_extract_sense(&ccb->csio.sense_data,
639 &error_code,
640 &sense_key,
641 &asc, &ascq);
642 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
643 scsi_sense_print(&ccb->csio);
644 } else {
1c8b7a9a
PA
645 xpt_print(periph->path, "Synchronize cache "
646 "failed, status == 0x%x, scsi status == "
647 "0x%x\n", ccb->csio.ccb_h.status,
648 ccb->csio.scsi_status);
984263bc
MD
649 }
650 }
651
652 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
653 cam_release_devq(ccb->ccb_h.path,
654 /*relsim_flags*/0,
655 /*reduction*/0,
656 /*timeout*/0,
657 /*getcount_only*/0);
658
659 xpt_release_ccb(ccb);
660
661 }
662
663 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
62ade751
MD
664 if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
665 daprevent(periph, PR_ALLOW);
984263bc
MD
666 /*
667 * If we've got removeable media, mark the blocksize as
668 * unavailable, since it could change when new media is
669 * inserted.
670 */
671 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
672 }
673
fca0fce6
MD
674 /*
675 * Don't compound any ref counting software bugs with more.
676 */
677 if (softc->flags & DA_FLAG_OPEN) {
678 softc->flags &= ~DA_FLAG_OPEN;
679 cam_periph_release(periph);
680 } else {
1c8b7a9a
PA
681 xpt_print(periph->path,
682 "daclose() called on an already closed device!\n");
fca0fce6 683 }
2d19cdd3 684 cam_periph_unhold(periph, 1);
984263bc
MD
685 return (0);
686}
687
688/*
689 * Actually translate the requested transfer into one the physical driver
690 * can understand. The transfer is described by a buf and will include
691 * only one physical transfer.
692 */
fef8985e
MD
693static int
694dastrategy(struct dev_strategy_args *ap)
984263bc 695{
b13267a5 696 cdev_t dev = ap->a_head.a_dev;
fef8985e 697 struct bio *bio = ap->a_bio;
81b5c339 698 struct buf *bp = bio->bio_buf;
984263bc
MD
699 struct cam_periph *periph;
700 struct da_softc *softc;
701 u_int unit;
702 u_int part;
984263bc 703
81b5c339
MD
704 unit = dkunit(dev);
705 part = dkpart(dev);
984263bc
MD
706 periph = cam_extend_get(daperiphs, unit);
707 if (periph == NULL) {
708 bp->b_error = ENXIO;
709 goto bad;
710 }
711 softc = (struct da_softc *)periph->softc;
1c8b7a9a
PA
712
713 cam_periph_lock(periph);
714
984263bc
MD
715#if 0
716 /*
717 * check it's not too big a transfer for our adapter
718 */
81b5c339 719 scsi_minphys(bp, &sd_switch);
984263bc
MD
720#endif
721
722 /*
723 * Mask interrupts so that the pack cannot be invalidated until
724 * after we are in the queue. Otherwise, we might not properly
725 * clean up one of the buffers.
726 */
984263bc
MD
727
728 /*
729 * If the device has been made invalid, error out
730 */
731 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1c8b7a9a 732 cam_periph_unlock(periph);
984263bc
MD
733 bp->b_error = ENXIO;
734 goto bad;
735 }
736
737 /*
738 * Place it in the queue of disk activities for this disk
739 */
af0aa0ac
MD
740 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
741 bioqdisksort(&softc->bio_queue_wr, bio);
e0fb398b
T
742 else if (bp->b_cmd == BUF_CMD_FREEBLKS)
743 bioqdisksort(&softc->bio_queue_trim, bio);
af0aa0ac
MD
744 else
745 bioqdisksort(&softc->bio_queue_rd, bio);
984263bc
MD
746
747 /*
748 * Schedule ourselves for performing the work.
749 */
750 xpt_schedule(periph, /* XXX priority */1);
1c8b7a9a 751 cam_periph_unlock(periph);
984263bc 752
fef8985e 753 return(0);
984263bc
MD
754bad:
755 bp->b_flags |= B_ERROR;
756
757 /*
758 * Correctly set the buf to indicate a completed xfer
759 */
760 bp->b_resid = bp->b_bcount;
81b5c339 761 biodone(bio);
fef8985e 762 return(0);
984263bc
MD
763}
764
984263bc 765static int
fef8985e 766dadump(struct dev_dump_args *ap)
984263bc 767{
b13267a5 768 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
769 struct cam_periph *periph;
770 struct da_softc *softc;
771 u_int unit;
b24cd69c 772 u_int32_t secsize;
984263bc 773 struct ccb_scsiio csio;
984263bc
MD
774
775 unit = dkunit(dev);
984263bc 776 periph = cam_extend_get(daperiphs, unit);
b24cd69c 777 if (periph == NULL)
984263bc 778 return (ENXIO);
b24cd69c 779
984263bc 780 softc = (struct da_softc *)periph->softc;
1c8b7a9a 781 cam_periph_lock(periph);
b24cd69c
AH
782 secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
783
1c8b7a9a
PA
784 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
785 cam_periph_unlock(periph);
984263bc 786 return (ENXIO);
1c8b7a9a 787 }
984263bc 788
b24cd69c
AH
789 /*
790 * because length == 0 means we are supposed to flush cache, we only
791 * try to write something if length > 0.
792 */
793 if (ap->a_length > 0) {
984263bc 794 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
0b0362e1 795 csio.ccb_h.flags |= CAM_POLLED;
984263bc
MD
796 csio.ccb_h.ccb_state = DA_CCB_DUMP;
797 scsi_read_write(&csio,
798 /*retries*/1,
799 dadone,
800 MSG_ORDERED_Q_TAG,
801 /*read*/FALSE,
802 /*byte2*/0,
803 /*minimum_cmd_size*/ softc->minimum_cmd_size,
b24cd69c
AH
804 ap->a_offset / secsize,
805 ap->a_length / secsize,
806 /*data_ptr*/(u_int8_t *) ap->a_virtual,
807 /*dxfer_len*/ap->a_length,
984263bc
MD
808 /*sense_len*/SSD_FULL_SIZE,
809 DA_DEFAULT_TIMEOUT * 1000);
810 xpt_polled_action((union ccb *)&csio);
811
812 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
85f8e2ea 813 kprintf("Aborting dump due to I/O error.\n");
984263bc
MD
814 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
815 CAM_SCSI_STATUS_ERROR)
816 scsi_sense_print(&csio);
817 else
85f8e2ea 818 kprintf("status == 0x%x, scsi status == 0x%x\n",
984263bc
MD
819 csio.ccb_h.status, csio.scsi_status);
820 return(EIO);
821 }
b24cd69c
AH
822 cam_periph_unlock(periph);
823 return 0;
984263bc
MD
824 }
825
826 /*
827 * Sync the disk cache contents to the physical media.
828 */
829 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
830
831 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
832 csio.ccb_h.ccb_state = DA_CCB_DUMP;
833 scsi_synchronize_cache(&csio,
834 /*retries*/1,
835 /*cbfcnp*/dadone,
836 MSG_SIMPLE_Q_TAG,
837 /*begin_lba*/0,/* Cover the whole disk */
838 /*lb_count*/0,
839 SSD_FULL_SIZE,
840 5 * 60 * 1000);
841 xpt_polled_action((union ccb *)&csio);
842
843 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
844 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
845 CAM_SCSI_STATUS_ERROR) {
846 int asc, ascq;
847 int sense_key, error_code;
848
849 scsi_extract_sense(&csio.sense_data,
850 &error_code,
851 &sense_key,
852 &asc, &ascq);
853 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
854 scsi_sense_print(&csio);
855 } else {
1c8b7a9a
PA
856 xpt_print(periph->path, "Synchronize cache "
857 "failed, status == 0x%x, scsi status == "
858 "0x%x\n", csio.ccb_h.status,
859 csio.scsi_status);
984263bc
MD
860 }
861 }
862 }
1c8b7a9a 863 cam_periph_unlock(periph);
984263bc
MD
864 return (0);
865}
866
867static void
868dainit(void)
869{
870 cam_status status;
984263bc
MD
871
872 /*
873 * Create our extend array for storing the devices we attach to.
874 */
875 daperiphs = cam_extend_new();
984263bc 876 if (daperiphs == NULL) {
85f8e2ea 877 kprintf("da: Failed to alloc extend array!\n");
984263bc
MD
878 return;
879 }
3690a379
JS
880
881 callout_init(&dasendorderedtag_ch);
882
984263bc
MD
883 /*
884 * Install a global async callback. This callback will
885 * receive async callbacks like "new device found".
886 */
1c8b7a9a 887 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
984263bc
MD
888
889 if (status != CAM_REQ_CMP) {
85f8e2ea 890 kprintf("da: Failed to attach master async callback "
984263bc 891 "due to status 0x%x!\n", status);
066e560b 892 } else if (da_send_ordered) {
984263bc 893
984263bc
MD
894 /* Register our shutdown event handler */
895 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
896 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
85f8e2ea 897 kprintf("dainit: shutdown event registration failed!\n");
984263bc
MD
898 }
899}
900
901static void
902daoninvalidate(struct cam_periph *periph)
903{
984263bc 904 struct da_softc *softc;
984263bc
MD
905
906 softc = (struct da_softc *)periph->softc;
907
908 /*
909 * De-register any async callbacks.
910 */
1c8b7a9a 911 xpt_register_async(0, daasync, periph, periph->path);
984263bc
MD
912
913 softc->flags |= DA_FLAG_PACK_INVALID;
914
915 /*
984263bc
MD
916 * Return all queued I/O with ENXIO.
917 * XXX Handle any transactions queued to the card
918 * with XPT_ABORT_CCB.
919 */
e0fb398b 920 daflushbioq(&softc->bio_queue_trim, ENXIO);
af0aa0ac
MD
921 daflushbioq(&softc->bio_queue_wr, ENXIO);
922 daflushbioq(&softc->bio_queue_rd, ENXIO);
923 xpt_print(periph->path, "lost device\n");
924}
925
926static void
927daflushbioq(struct bio_queue_head *bioq, int error)
928{
929 struct bio *q_bio;
930 struct buf *q_bp;
931
932 while ((q_bio = bioq_first(bioq)) != NULL){
933 bioq_remove(bioq, q_bio);
81b5c339 934 q_bp = q_bio->bio_buf;
984263bc 935 q_bp->b_resid = q_bp->b_bcount;
af0aa0ac 936 q_bp->b_error = error;
984263bc 937 q_bp->b_flags |= B_ERROR;
81b5c339 938 biodone(q_bio);
984263bc 939 }
984263bc
MD
940}
941
942static void
943dacleanup(struct cam_periph *periph)
944{
945 struct da_softc *softc;
946
947 softc = (struct da_softc *)periph->softc;
948
949 devstat_remove_entry(&softc->device_stats);
950 cam_extend_release(daperiphs, periph->unit_number);
1c8b7a9a 951 xpt_print(periph->path, "removing device entry\n");
62ade751
MD
952 /*
953 * If we can't free the sysctl tree, oh well...
954 */
955 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
956 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
1c8b7a9a 957 xpt_print(periph->path, "can't remove sysctl context\n");
62ade751 958 }
2d19cdd3 959 periph->softc = NULL;
e4c9c0c8 960 if (softc->disk.d_rawdev) {
1c8b7a9a 961 cam_periph_unlock(periph);
335dda38 962 disk_destroy(&softc->disk);
1c8b7a9a 963 cam_periph_lock(periph);
984263bc 964 }
1c8b7a9a
PA
965
966 callout_stop(&softc->sendordered_c);
efda3bd0 967 kfree(softc, M_DEVBUF);
984263bc
MD
968}
969
970static void
971daasync(void *callback_arg, u_int32_t code,
972 struct cam_path *path, void *arg)
973{
974 struct cam_periph *periph;
975
976 periph = (struct cam_periph *)callback_arg;
87993e5a 977
984263bc
MD
978 switch (code) {
979 case AC_FOUND_DEVICE:
980 {
981 struct ccb_getdev *cgd;
1c8b7a9a 982 struct cam_sim *sim;
984263bc
MD
983 cam_status status;
984
985 cgd = (struct ccb_getdev *)arg;
e9936c96
PA
986 if (cgd == NULL)
987 break;
984263bc
MD
988
989 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
990 && SID_TYPE(&cgd->inq_data) != T_RBC
991 && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
992 break;
993
994 /*
87993e5a
MD
995 * Don't complain if a valid peripheral is already attached.
996 */
997 periph = cam_periph_find(cgd->ccb_h.path, "da");
998 if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
999 break;
1000
1001 /*
984263bc
MD
1002 * Allocate a peripheral instance for
1003 * this device and start the probe
1004 * process.
1005 */
1c8b7a9a 1006 sim = xpt_path_sim(cgd->ccb_h.path);
984263bc
MD
1007 status = cam_periph_alloc(daregister, daoninvalidate,
1008 dacleanup, dastart,
1009 "da", CAM_PERIPH_BIO,
1010 cgd->ccb_h.path, daasync,
1011 AC_FOUND_DEVICE, cgd);
1012
87993e5a 1013 if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
85f8e2ea 1014 kprintf("daasync: Unable to attach to new device "
984263bc 1015 "due to status 0x%x\n", status);
87993e5a 1016 }
984263bc
MD
1017 break;
1018 }
1019 case AC_SENT_BDR:
1020 case AC_BUS_RESET:
1021 {
1022 struct da_softc *softc;
1023 struct ccb_hdr *ccbh;
984263bc
MD
1024
1025 softc = (struct da_softc *)periph->softc;
984263bc
MD
1026 /*
1027 * Don't fail on the expected unit attention
1028 * that will occur.
1029 */
1030 softc->flags |= DA_FLAG_RETRY_UA;
cbe8f7dc 1031 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
984263bc 1032 ccbh->ccb_state |= DA_CCB_RETRY_UA;
984263bc
MD
1033 /* FALLTHROUGH*/
1034 }
1035 default:
1036 cam_periph_async(periph, code, path, arg);
1037 break;
1038 }
1039}
1040
62ade751
MD
1041static void
1042dasysctlinit(void *context, int pending)
1043{
1044 struct cam_periph *periph;
1045 struct da_softc *softc;
1046 char tmpstr[80], tmpstr2[80];
1047
227ce828 1048 get_mplock();
62ade751 1049 periph = (struct cam_periph *)context;
227ce828
AH
1050 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1051 rel_mplock();
1c8b7a9a 1052 return;
227ce828 1053 }
62ade751 1054
1c8b7a9a 1055 softc = (struct da_softc *)periph->softc;
f8c7a42d
MD
1056 ksnprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
1057 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
62ade751
MD
1058
1059 sysctl_ctx_init(&softc->sysctl_ctx);
1060 softc->flags |= DA_FLAG_SCTX_INIT;
1061 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1062 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1063 CTLFLAG_RD, 0, tmpstr);
1064 if (softc->sysctl_tree == NULL) {
85f8e2ea 1065 kprintf("dasysctlinit: unable to allocate sysctl tree\n");
1c8b7a9a 1066 cam_periph_release(periph);
227ce828 1067 rel_mplock();
62ade751
MD
1068 return;
1069 }
1070
1071 /*
1072 * Now register the sysctl handler, so the user can the value on
1073 * the fly.
1074 */
1075 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1076 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1077 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1078 "Minimum CDB size");
1c8b7a9a 1079
e0fb398b
T
1080 /* Only create the option if the device supports TRIM */
1081 if (softc->disk.d_info.d_trimflag) {
1082 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1083 SYSCTL_CHILDREN(softc->sysctl_tree),
1084 OID_AUTO,
1085 "trim_enabled",
1086 CTLFLAG_RW,
1087 &softc->trim_enabled,
1088 0,
1089 "Enable TRIM for this device (SSD))");
1090 }
1091
1c8b7a9a 1092 cam_periph_release(periph);
227ce828 1093 rel_mplock();
62ade751
MD
1094}
1095
1096static int
1097dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1098{
1099 int error, value;
1100
1101 value = *(int *)arg1;
1102
1103 error = sysctl_handle_int(oidp, &value, 0, req);
1104
1105 if ((error != 0)
1106 || (req->newptr == NULL))
1107 return (error);
1108
1109 /*
bdd58e03 1110 * Acceptable values here are 6, 10 or 12, or 16.
62ade751
MD
1111 */
1112 if (value < 6)
1113 value = 6;
1114 else if ((value > 6)
1115 && (value <= 10))
1116 value = 10;
bdd58e03
MD
1117 else if ((value > 10)
1118 && (value <= 12))
62ade751 1119 value = 12;
bdd58e03
MD
1120 else if (value > 12)
1121 value = 16;
62ade751
MD
1122
1123 *(int *)arg1 = value;
1124
1125 return (0);
1126}
1127
984263bc
MD
1128static cam_status
1129daregister(struct cam_periph *periph, void *arg)
1130{
984263bc 1131 struct da_softc *softc;
62ade751 1132 struct ccb_pathinq cpi;
984263bc 1133 struct ccb_getdev *cgd;
62ade751 1134 char tmpstr[80];
984263bc
MD
1135 caddr_t match;
1136
1137 cgd = (struct ccb_getdev *)arg;
1138 if (periph == NULL) {
85f8e2ea 1139 kprintf("daregister: periph was NULL!!\n");
984263bc
MD
1140 return(CAM_REQ_CMP_ERR);
1141 }
1142
1143 if (cgd == NULL) {
85f8e2ea 1144 kprintf("daregister: no getdev CCB, can't register device\n");
984263bc
MD
1145 return(CAM_REQ_CMP_ERR);
1146 }
1147
efda3bd0 1148 softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
984263bc
MD
1149 LIST_INIT(&softc->pending_ccbs);
1150 softc->state = DA_STATE_PROBE;
e0fb398b 1151 bioq_init(&softc->bio_queue_trim);
af0aa0ac
MD
1152 bioq_init(&softc->bio_queue_rd);
1153 bioq_init(&softc->bio_queue_wr);
984263bc
MD
1154 if (SID_IS_REMOVABLE(&cgd->inq_data))
1155 softc->flags |= DA_FLAG_PACK_REMOVABLE;
1156 if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1157 softc->flags |= DA_FLAG_TAGGED_QUEUING;
1158
e0fb398b
T
1159 /* Used to get TRIM status from AHCI driver */
1160 if (cgd->inq_data.vendor_specific1[0] == 1) {
1161 /*
1162 * max number of lba ranges an SSD can handle in a single
1163 * TRIM command. vendor_specific1[1] is the num of 512-byte
1164 * blocks the SSD reports that can be passed in a TRIM cmd.
1165 */
1166 softc->trim_max_ranges =
1167 min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1168 }
1169
984263bc
MD
1170 periph->softc = softc;
1171
1172 cam_extend_set(daperiphs, periph->unit_number, periph);
1173
1174 /*
1175 * See if this device has any quirks.
1176 */
1177 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1178 (caddr_t)da_quirk_table,
1179 sizeof(da_quirk_table)/sizeof(*da_quirk_table),
1180 sizeof(*da_quirk_table), scsi_inquiry_match);
1181
1182 if (match != NULL)
1183 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1184 else
1185 softc->quirks = DA_Q_NONE;
1186
a9453758
MD
1187 /*
1188 * Unconditionally disable the synchronize cache command for
1189 * usb attachments. It's just impossible to determine if the
1190 * device supports it or not and if it doesn't the port can
1191 * brick.
1192 */
1193 if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1194 softc->quirks |= DA_Q_NO_SYNC_CACHE;
1195 }
1196
62ade751
MD
1197 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1198
1199 /* Check if the SIM does not want 6 byte commands */
1200 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
1201 cpi.ccb_h.func_code = XPT_PATH_INQ;
1202 xpt_action((union ccb *)&cpi);
1203 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
1204 softc->quirks |= DA_Q_NO_6_BYTE;
1205
1206 /*
1207 * RBC devices don't have to support READ(6), only READ(10).
1208 */
984263bc
MD
1209 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1210 softc->minimum_cmd_size = 10;
1211 else
1212 softc->minimum_cmd_size = 6;
1213
1214 /*
62ade751
MD
1215 * Load the user's default, if any.
1216 */
f8c7a42d 1217 ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
62ade751
MD
1218 periph->unit_number);
1219 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1220
1221 /*
bdd58e03 1222 * 6, 10, 12, and 16 are the currently permissible values.
62ade751
MD
1223 */
1224 if (softc->minimum_cmd_size < 6)
1225 softc->minimum_cmd_size = 6;
1226 else if ((softc->minimum_cmd_size > 6)
1227 && (softc->minimum_cmd_size <= 10))
1228 softc->minimum_cmd_size = 10;
bdd58e03
MD
1229 else if ((softc->minimum_cmd_size > 10)
1230 && (softc->minimum_cmd_size <= 12))
62ade751 1231 softc->minimum_cmd_size = 12;
bdd58e03
MD
1232 else if (softc->minimum_cmd_size > 12)
1233 softc->minimum_cmd_size = 16;
62ade751
MD
1234
1235 /*
984263bc
MD
1236 * The DA driver supports a blocksize, but
1237 * we don't know the blocksize until we do
1238 * a read capacity. So, set a flag to
1239 * indicate that the blocksize is
1240 * unavailable right now. We'll clear the
1241 * flag as soon as we've done a read capacity.
1242 */
1243 devstat_add_entry(&softc->device_stats, "da",
1244 periph->unit_number, 0,
1245 DEVSTAT_BS_UNAVAILABLE,
1246 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1247 DEVSTAT_PRIORITY_DISK);
1248
1249 /*
1250 * Register this media as a disk
1251 */
1c8b7a9a 1252 CAM_SIM_UNLOCK(periph->sim);
a688b15c 1253 disk_create(periph->unit_number, &softc->disk, &da_ops);
48789b01 1254 softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1c8b7a9a 1255 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1256
1257 /*
1258 * Add async callbacks for bus reset and
1259 * bus device reset calls. I don't bother
1260 * checking if this fails as, in most cases,
1261 * the system will function just fine without
1262 * them and the only alternative would be to
1263 * not attach the device on failure.
1264 */
1c8b7a9a
PA
1265 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1266 daasync, periph, periph->path);
1267
984263bc 1268 /*
1c8b7a9a
PA
1269 * Take an exclusive refcount on the periph while dastart is called
1270 * to finish the probe. The reference will be dropped in dadone at
1271 * the end of probe.
984263bc 1272 */
1c8b7a9a 1273 cam_periph_hold(periph, 0);
984263bc
MD
1274 xpt_schedule(periph, /*priority*/5);
1275
1c8b7a9a
PA
1276 /*
1277 * Schedule a periodic event to occasionally send an
1278 * ordered tag to a device.
1279 */
1280 callout_init(&softc->sendordered_c);
1281 callout_reset(&softc->sendordered_c,
1282 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL,
1283 dasendorderedtag, softc);
1284
e0fb398b
T
1285
1286
984263bc
MD
1287 return(CAM_REQ_CMP);
1288}
1289
1290static void
1291dastart(struct cam_periph *periph, union ccb *start_ccb)
1292{
1293 struct da_softc *softc;
1294
1295 softc = (struct da_softc *)periph->softc;
1296
984263bc
MD
1297 switch (softc->state) {
1298 case DA_STATE_NORMAL:
1299 {
1300 /* Pull a buffer from the queue and get going on it */
81b5c339 1301 struct bio *bio;
af0aa0ac
MD
1302 struct bio *bio_rd;
1303 struct bio *bio_wr;
984263bc 1304 struct buf *bp;
a9bf1b8c 1305 u_int8_t tag_code;
af0aa0ac 1306 int limit;
984263bc
MD
1307
1308 /*
1309 * See if there is a buf with work for us to do..
1310 */
af0aa0ac
MD
1311 bio_rd = bioq_first(&softc->bio_queue_rd);
1312 bio_wr = bioq_first(&softc->bio_queue_wr);
1313
984263bc
MD
1314 if (periph->immediate_priority <= periph->pinfo.priority) {
1315 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1316 ("queuing for immediate ccb\n"));
1317 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1318 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1319 periph_links.sle);
1320 periph->immediate_priority = CAM_PRIORITY_NONE;
984263bc 1321 wakeup(&periph->ccb_list);
af0aa0ac 1322 if (bio_rd || bio_wr) {
a9bf1b8c
MD
1323 /*
1324 * Have more work to do, so ensure we stay
1325 * scheduled
1326 */
1327 xpt_schedule(periph, /* XXX priority */1);
1328 }
1329 break;
1330 }
af0aa0ac 1331
e0fb398b
T
1332 /* Run the trim command if not already running */
1333 if (!softc->trim_running &&
1334 (bio = bioq_first(&softc->bio_queue_trim)) != 0) {
1335 struct trim_request *req = &softc->trim_req;
1336 struct bio *bio1;
1337 int bps = 0, ranges = 0;
1338
1339 softc->trim_running = 1;
1340 bzero(req, sizeof(*req));
1341 bio1 = bio;
1342 while (1) {
1343 uint64_t lba;
1344 int count;
1345
1346 bp = bio1->bio_buf;
1347 count = bp->b_bcount / softc->params.secsize;
1348 lba = bio1->bio_offset/softc->params.secsize;
1349
1350 kprintf("trim lba:%llu boff:%llu count:%d\n",
1351 (unsigned long long) lba,
1352 (unsigned long long) bio1->bio_offset,
1353 count);
1354
1355 bioq_remove(&softc->bio_queue_trim, bio1);
1356 while (count > 0) {
1357 int c = min(count, 0xffff);
1358 int off = ranges * 8;
1359
1360 req->data[off + 0] = lba & 0xff;
1361 req->data[off + 1] = (lba >> 8) & 0xff;
1362 req->data[off + 2] = (lba >> 16) & 0xff;
1363 req->data[off + 3] = (lba >> 24) & 0xff;
1364 req->data[off + 4] = (lba >> 32) & 0xff;
1365 req->data[off + 5] = (lba >> 40) & 0xff;
1366 req->data[off + 6] = c & 0xff;
1367 req->data[off + 7] = (c >> 8) & 0xff;
1368 lba += c;
1369 count -= c;
1370 ranges++;
1371 }
1372
1373 /* Try to merge multiple TRIM requests */
1374 req->bios[bps++] = bio1;
1375 bio1 = bioq_first(&softc->bio_queue_trim);
1376 if (bio1 == NULL ||
1377 bio1->bio_buf->b_bcount / softc->params.secsize >
1378 (softc->trim_max_ranges - ranges) * 0xffff)
1379 break;
1380 }
1381
1382
1383 cam_fill_csio(&start_ccb->csio,
1384 1/*retries*/,
1385 dadone,
1386 CAM_DIR_OUT,
1387 MSG_SIMPLE_Q_TAG,
1388 req->data,
1389 ((ranges +63)/64)*512,
1390 SSD_FULL_SIZE,
1391 sizeof(struct scsi_rw_6),
1392 da_default_timeout*2);
1393
1394 start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1395 LIST_INSERT_HEAD(&softc->pending_ccbs,
1396 &start_ccb->ccb_h, periph_links.le);
1397 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1398 start_ccb->ccb_h.ccb_bio = bio;
1399 devstat_start_transaction(&softc->device_stats);
1400 xpt_action(start_ccb);
1401 xpt_schedule(periph, 1);
1402 break;
1403 }
1404
af0aa0ac
MD
1405 /*
1406 * Select a read or write buffer to queue. Limit the number
1407 * of tags dedicated to reading or writing, giving reads
1408 * precedence.
1409 *
1410 * Writes to modern hard drives go into the HDs cache and
1411 * return completion nearly instantly. That is until the
1412 * cache becomes full. When the HDs cache becomes full
1413 * write commands will begin to stall. If all available
1414 * tags are taken up by writes which saturate the drive
1415 * reads will become tag-starved.
1416 *
1417 * A similar situation can occur with reads. With many
1418 * parallel readers all tags can be taken up by reads
1419 * and prevent any writes from draining, even if the HD's
1420 * cache is not full.
1421 */
a3c9d3d8 1422 limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
af0aa0ac
MD
1423#if 0
1424 /* DEBUGGING */
1425 static int savets;
1426 static long savets2;
a3c9d3d8 1427 if (1 || time_second != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
af0aa0ac
MD
1428 kprintf("%d %d (%d)\n",
1429 softc->outstanding_cmds_rd,
1430 softc->outstanding_cmds_wr,
1431 limit);
1432 savets = ticks;
1433 savets2 = time_second;
1434 }
1435#endif
1436 if (bio_rd && softc->outstanding_cmds_rd < limit) {
1437 bio = bio_rd;
1438 bioq_remove(&softc->bio_queue_rd, bio);
1439 } else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1440 bio = bio_wr;
1441 bioq_remove(&softc->bio_queue_wr, bio);
1442 } else {
1443 if (bio_rd)
1444 softc->flags |= DA_FLAG_RD_LIMIT;
1445 if (bio_wr)
1446 softc->flags |= DA_FLAG_WR_LIMIT;
984263bc 1447 xpt_release_ccb(start_ccb);
a9bf1b8c
MD
1448 break;
1449 }
984263bc 1450
a9bf1b8c
MD
1451 /*
1452 * We can queue new work.
1453 */
a9bf1b8c 1454 bp = bio->bio_buf;
54078292 1455
a9bf1b8c 1456 devstat_start_transaction(&softc->device_stats);
54078292 1457
a9bf1b8c
MD
1458 if ((bp->b_flags & B_ORDERED) != 0 ||
1459 (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1460 softc->flags &= ~DA_FLAG_NEED_OTAG;
1461 softc->ordered_tag_count++;
1462 tag_code = MSG_ORDERED_Q_TAG;
1463 } else {
1464 tag_code = MSG_SIMPLE_Q_TAG;
1465 }
984263bc 1466
a9bf1b8c
MD
1467 switch(bp->b_cmd) {
1468 case BUF_CMD_READ:
1469 case BUF_CMD_WRITE:
984263bc 1470 /*
a9bf1b8c 1471 * Block read/write op
984263bc 1472 */
a9bf1b8c 1473 KKASSERT(bio->bio_offset % softc->params.secsize == 0);
984263bc 1474
a9bf1b8c
MD
1475 scsi_read_write(
1476 &start_ccb->csio,
1477 da_retry_count, /* retries */
1478 dadone,
1479 tag_code,
1480 (bp->b_cmd == BUF_CMD_READ),
1481 0, /* byte2 */
1482 softc->minimum_cmd_size,
1483 bio->bio_offset / softc->params.secsize,
1484 bp->b_bcount / softc->params.secsize,
1485 bp->b_data,
1486 bp->b_bcount,
1487 SSD_FULL_SIZE, /* sense_len */
1488 da_default_timeout * 1000
1489 );
1490 break;
1491 case BUF_CMD_FLUSH:
a9453758
MD
1492 /*
1493 * Silently complete a flush request if the device
1494 * cannot handle it.
1495 */
1496 if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1497 xpt_release_ccb(start_ccb);
1498 start_ccb = NULL;
1499 devstat_end_transaction_buf(
1500 &softc->device_stats, bp);
1501 biodone(bio);
1502 } else {
1503 scsi_synchronize_cache(
1504 &start_ccb->csio,
1505 1, /* retries */
1506 dadone, /* cbfcnp */
1507 MSG_SIMPLE_Q_TAG,
1508 0, /* lba */
1509 0, /* count (whole disk) */
1510 SSD_FULL_SIZE,
1511 da_default_timeout*1000 /* timeout */
1512 );
1513 }
a9bf1b8c 1514 break;
e0fb398b
T
1515 case BUF_CMD_FREEBLKS:
1516 if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1517 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1518 break;
1519 }
a9bf1b8c 1520 default:
a9453758
MD
1521 xpt_release_ccb(start_ccb);
1522 start_ccb = NULL;
a9bf1b8c
MD
1523 panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1524 break; /* NOT REACHED */
1525 }
984263bc 1526
a9bf1b8c
MD
1527 /*
1528 * Block out any asyncronous callbacks
1529 * while we touch the pending ccb list.
1530 */
a9453758
MD
1531 if (start_ccb) {
1532 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1533 LIST_INSERT_HEAD(&softc->pending_ccbs,
1534 &start_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1535 if (bp->b_cmd == BUF_CMD_WRITE ||
1536 bp->b_cmd == BUF_CMD_FLUSH) {
1537 ++softc->outstanding_cmds_wr;
1538 } else {
1539 ++softc->outstanding_cmds_rd;
1540 }
a9453758
MD
1541
1542 /* We expect a unit attention from this device */
1543 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1544 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1545 softc->flags &= ~DA_FLAG_RETRY_UA;
1546 }
a9bf1b8c 1547
a9453758
MD
1548 start_ccb->ccb_h.ccb_bio = bio;
1549 xpt_action(start_ccb);
1550 }
984263bc 1551
a9bf1b8c
MD
1552 /*
1553 * Be sure we stay scheduled if we have more work to do.
1554 */
af0aa0ac
MD
1555 if (bioq_first(&softc->bio_queue_rd) ||
1556 bioq_first(&softc->bio_queue_wr)) {
a9bf1b8c 1557 xpt_schedule(periph, 1);
af0aa0ac 1558 }
984263bc
MD
1559 break;
1560 }
1561 case DA_STATE_PROBE:
1562 {
1563 struct ccb_scsiio *csio;
1564 struct scsi_read_capacity_data *rcap;
1565
1c8b7a9a 1566 rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
984263bc
MD
1567 csio = &start_ccb->csio;
1568 scsi_read_capacity(csio,
1569 /*retries*/4,
1570 dadone,
1571 MSG_SIMPLE_Q_TAG,
1572 rcap,
1573 SSD_FULL_SIZE,
1574 /*timeout*/5000);
81b5c339 1575 start_ccb->ccb_h.ccb_bio = NULL;
984263bc
MD
1576 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1577 xpt_action(start_ccb);
1578 break;
1579 }
bdd58e03
MD
1580 case DA_STATE_PROBE2:
1581 {
1582 struct ccb_scsiio *csio;
0b0362e1 1583 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03 1584
0b0362e1
MD
1585 rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1586 M_INTWAIT | M_ZERO);
bdd58e03
MD
1587 if (rcaplong == NULL) {
1588 kprintf("dastart: Couldn't allocate read_capacity\n");
1589 /* da_free_periph??? */
1590 break;
1591 }
1592 csio = &start_ccb->csio;
1593 scsi_read_capacity_16(csio,
1594 /*retries*/ 4,
1595 /*cbfcnp*/ dadone,
1596 /*tag_action*/ MSG_SIMPLE_Q_TAG,
1597 /*lba*/ 0,
1598 /*reladr*/ 0,
1599 /*pmi*/ 0,
1600 rcaplong,
1601 /*sense_len*/ SSD_FULL_SIZE,
1602 /*timeout*/ 60000);
1603 start_ccb->ccb_h.ccb_bio = NULL;
1604 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1605 xpt_action(start_ccb);
1606 break;
1607 }
984263bc
MD
1608 }
1609}
1610
1611static int
1612cmd6workaround(union ccb *ccb)
1613{
1614 struct scsi_rw_6 cmd6;
1615 struct scsi_rw_10 *cmd10;
1616 struct da_softc *softc;
1617 u_int8_t *cdb;
1618 int frozen;
1619
1620 cdb = ccb->csio.cdb_io.cdb_bytes;
1621
1622 /* Translation only possible if CDB is an array and cmd is R/W6 */
1623 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1624 (*cdb != READ_6 && *cdb != WRITE_6))
1625 return 0;
1626
1c8b7a9a
PA
1627 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1628 "increasing minimum_cmd_size to 10.\n");
984263bc
MD
1629 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1630 softc->minimum_cmd_size = 10;
1631
1632 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1633 cmd10 = (struct scsi_rw_10 *)cdb;
1634 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1635 cmd10->byte2 = 0;
1636 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1637 cmd10->reserved = 0;
1638 scsi_ulto2b(cmd6.length, cmd10->length);
1639 cmd10->control = cmd6.control;
1640 ccb->csio.cdb_len = sizeof(*cmd10);
1641
1642 /* Requeue request, unfreezing queue if necessary */
1643 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1644 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1645 xpt_action(ccb);
1646 if (frozen) {
1647 cam_release_devq(ccb->ccb_h.path,
1648 /*relsim_flags*/0,
1649 /*reduction*/0,
1650 /*timeout*/0,
1651 /*getcount_only*/0);
1652 }
1653 return (ERESTART);
1654}
1655
1656static void
1657dadone(struct cam_periph *periph, union ccb *done_ccb)
1658{
1659 struct da_softc *softc;
1660 struct ccb_scsiio *csio;
cd29885a 1661 struct disk_info info;
984263bc
MD
1662
1663 softc = (struct da_softc *)periph->softc;
1664 csio = &done_ccb->csio;
1665 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1666 case DA_CCB_BUFFER_IO:
e0fb398b 1667 case DA_CCB_TRIM:
984263bc
MD
1668 {
1669 struct buf *bp;
81b5c339 1670 struct bio *bio;
af0aa0ac 1671 int mustsched = 0;
984263bc 1672
81b5c339
MD
1673 bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1674 bp = bio->bio_buf;
984263bc
MD
1675 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1676 int error;
984263bc
MD
1677 int sf;
1678
1679 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1680 sf = SF_RETRY_UA;
1681 else
1682 sf = 0;
1683
b05e84c9
PA
1684 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1685 if (error == ERESTART) {
984263bc
MD
1686 /*
1687 * A retry was scheuled, so
1688 * just return.
1689 */
1690 return;
1691 }
1692 if (error != 0) {
984263bc
MD
1693 if (error == ENXIO) {
1694 /*
1695 * Catastrophic error. Mark our pack as
1696 * invalid.
1697 */
1c8b7a9a
PA
1698 /*
1699 * XXX See if this is really a media
1700 * XXX change first?
984263bc 1701 */
1c8b7a9a
PA
1702 xpt_print(periph->path,
1703 "Invalidating pack\n");
984263bc
MD
1704 softc->flags |= DA_FLAG_PACK_INVALID;
1705 }
1706
1707 /*
af0aa0ac
MD
1708 * Return all queued write I/O's with EIO
1709 * so the client can retry these I/Os in the
984263bc 1710 * proper order should it attempt to recover.
af0aa0ac
MD
1711 *
1712 * Leave read I/O's alone.
984263bc 1713 */
af0aa0ac 1714 daflushbioq(&softc->bio_queue_wr, EIO);
984263bc
MD
1715 bp->b_error = error;
1716 bp->b_resid = bp->b_bcount;
1717 bp->b_flags |= B_ERROR;
1718 } else {
1719 bp->b_resid = csio->resid;
1720 bp->b_error = 0;
62ade751 1721 if (bp->b_resid != 0)
984263bc 1722 bp->b_flags |= B_ERROR;
984263bc
MD
1723 }
1724 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1725 cam_release_devq(done_ccb->ccb_h.path,
1726 /*relsim_flags*/0,
1727 /*reduction*/0,
1728 /*timeout*/0,
1729 /*getcount_only*/0);
1730 } else {
b05e84c9
PA
1731 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1732 panic("REQ_CMP with QFRZN");
984263bc 1733 bp->b_resid = csio->resid;
62ade751 1734 if (csio->resid > 0)
984263bc 1735 bp->b_flags |= B_ERROR;
984263bc
MD
1736 }
1737
1738 /*
1739 * Block out any asyncronous callbacks
1740 * while we touch the pending ccb list.
1741 */
984263bc 1742 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1743 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1744 --softc->outstanding_cmds_wr;
1745 if (softc->flags & DA_FLAG_WR_LIMIT) {
1746 softc->flags &= ~DA_FLAG_WR_LIMIT;
1747 mustsched = 1;
1748 }
1749 } else {
1750 --softc->outstanding_cmds_rd;
1751 if (softc->flags & DA_FLAG_RD_LIMIT) {
1752 softc->flags &= ~DA_FLAG_RD_LIMIT;
1753 mustsched = 1;
1754 }
1755 }
1756 if (softc->outstanding_cmds_rd +
1757 softc->outstanding_cmds_wr == 0) {
984263bc 1758 softc->flags |= DA_FLAG_WENT_IDLE;
af0aa0ac 1759 }
984263bc
MD
1760
1761 devstat_end_transaction_buf(&softc->device_stats, bp);
e0fb398b
T
1762 if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1763 DA_CCB_TRIM) {
1764 struct trim_request *req =
1765 (struct trim_request *) csio->data_ptr;
1766 int i;
1767
1768 for (i = 1; i < softc->trim_max_ranges &&
1769 req->bios[i]; i++) {
1770 struct bio *bp1 = req->bios[i];
1771
1772 bp1->bio_buf->b_resid = bp->b_resid;
1773 bp1->bio_buf->b_error = bp->b_error;
1774 if (bp->b_flags & B_ERROR)
1775 bp1->bio_buf->b_flags |= B_ERROR;
1776 biodone(bp1);
1777 }
1778 softc->trim_running = 0;
1779 biodone(bio);
1780 xpt_schedule(periph,1);
1781 } else
1782 biodone(bio);
1783
af0aa0ac
MD
1784
1785 if (mustsched)
1786 xpt_schedule(periph, /*priority*/1);
1787
984263bc
MD
1788 break;
1789 }
1790 case DA_CCB_PROBE:
bdd58e03 1791 case DA_CCB_PROBE2:
984263bc
MD
1792 {
1793 struct scsi_read_capacity_data *rdcap;
0b0362e1 1794 struct scsi_read_capacity_data_16 *rcaplong;
984263bc
MD
1795 char announce_buf[80];
1796
bdd58e03
MD
1797 rdcap = NULL;
1798 rcaplong = NULL;
1799 if (softc->state == DA_STATE_PROBE)
1800 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1801 else
0b0362e1 1802 rcaplong = (struct scsi_read_capacity_data_16 *)
bdd58e03 1803 csio->data_ptr;
f7b26992
MD
1804
1805 bzero(&info, sizeof(info));
1806 info.d_type = DTYPE_SCSI;
1807 info.d_serialno = xpt_path_serialno(periph->path);
984263bc
MD
1808
1809 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1810 struct disk_params *dp;
bdd58e03
MD
1811 uint32_t block_size;
1812 uint64_t maxsector;
1813
1814 if (softc->state == DA_STATE_PROBE) {
1815 block_size = scsi_4btoul(rdcap->length);
1816 maxsector = scsi_4btoul(rdcap->addr);
984263bc 1817
bdd58e03
MD
1818 /*
1819 * According to SBC-2, if the standard 10
1820 * byte READ CAPACITY command returns 2^32,
1821 * we should issue the 16 byte version of
1822 * the command, since the device in question
1823 * has more sectors than can be represented
1824 * with the short version of the command.
1825 */
1826 if (maxsector == 0xffffffff) {
1827 softc->state = DA_STATE_PROBE2;
1c8b7a9a 1828 kfree(rdcap, M_SCSIDA);
bdd58e03
MD
1829 xpt_release_ccb(done_ccb);
1830 xpt_schedule(periph, /*priority*/5);
1831 return;
1832 }
1833 } else {
1834 block_size = scsi_4btoul(rcaplong->length);
1835 maxsector = scsi_8btou64(rcaplong->addr);
1836 }
1837 dasetgeom(periph, block_size, maxsector);
984263bc 1838 dp = &softc->params;
f8c7a42d 1839 ksnprintf(announce_buf, sizeof(announce_buf),
bdd58e03
MD
1840 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1841 (uintmax_t) (((uintmax_t)dp->secsize *
1842 dp->sectors) / (1024*1024)),
1843 (uintmax_t)dp->sectors,
984263bc
MD
1844 dp->secsize, dp->heads, dp->secs_per_track,
1845 dp->cylinders);
e0fb398b 1846
cd29885a 1847 CAM_SIM_UNLOCK(periph->sim);
cd29885a
MD
1848 info.d_media_blksize = softc->params.secsize;
1849 info.d_media_blocks = softc->params.sectors;
1850 info.d_media_size = 0;
1851 info.d_secpertrack = softc->params.secs_per_track;
1852 info.d_nheads = softc->params.heads;
1853 info.d_ncylinders = softc->params.cylinders;
1854 info.d_secpercyl = softc->params.heads *
1855 softc->params.secs_per_track;
55230951 1856 info.d_serialno = xpt_path_serialno(periph->path);
cd29885a
MD
1857 disk_setdiskinfo(&softc->disk, &info);
1858 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1859 } else {
1860 int error;
1861
1862 announce_buf[0] = '\0';
1863
1864 /*
1865 * Retry any UNIT ATTENTION type errors. They
1866 * are expected at boot.
1867 */
b05e84c9
PA
1868 error = daerror(done_ccb, CAM_RETRY_SELTO,
1869 SF_RETRY_UA|SF_NO_PRINT);
984263bc
MD
1870 if (error == ERESTART) {
1871 /*
1872 * A retry was scheuled, so
1873 * just return.
1874 */
1875 return;
1876 } else if (error != 0) {
1877 struct scsi_sense_data *sense;
1878 int asc, ascq;
1879 int sense_key, error_code;
1880 int have_sense;
1881 cam_status status;
1882 struct ccb_getdev cgd;
1883
1884 /* Don't wedge this device's queue */
984263bc 1885 status = done_ccb->ccb_h.status;
b05e84c9
PA
1886 if ((status & CAM_DEV_QFRZN) != 0)
1887 cam_release_devq(done_ccb->ccb_h.path,
1888 /*relsim_flags*/0,
1889 /*reduction*/0,
1890 /*timeout*/0,
1891 /*getcount_only*/0);
1892
984263bc
MD
1893
1894 xpt_setup_ccb(&cgd.ccb_h,
1895 done_ccb->ccb_h.path,
1896 /* priority */ 1);
1897 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1898 xpt_action((union ccb *)&cgd);
1899
1900 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1901 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1902 || ((status & CAM_AUTOSNS_VALID) == 0))
1903 have_sense = FALSE;
1904 else
1905 have_sense = TRUE;
1906
1907 if (have_sense) {
1908 sense = &csio->sense_data;
1909 scsi_extract_sense(sense, &error_code,
1910 &sense_key,
1911 &asc, &ascq);
1912 }
1913 /*
1914 * Attach to anything that claims to be a
1915 * direct access or optical disk device,
1916 * as long as it doesn't return a "Logical
1917 * unit not supported" (0x25) error.
1918 */
1919 if ((have_sense) && (asc != 0x25)
b05e84c9
PA
1920 && (error_code == SSD_CURRENT_ERROR)) {
1921 const char *sense_key_desc;
1922 const char *asc_desc;
1923
1924 scsi_sense_desc(sense_key, asc, ascq,
1925 &cgd.inq_data,
1926 &sense_key_desc,
1927 &asc_desc);
f8c7a42d 1928 ksnprintf(announce_buf,
984263bc
MD
1929 sizeof(announce_buf),
1930 "Attempt to query device "
1931 "size failed: %s, %s",
b05e84c9
PA
1932 sense_key_desc,
1933 asc_desc);
f7b26992
MD
1934 info.d_media_blksize = 512;
1935 disk_setdiskinfo(&softc->disk, &info);
b05e84c9 1936 } else {
984263bc
MD
1937 if (have_sense)
1938 scsi_sense_print(
1939 &done_ccb->csio);
1940 else {
1c8b7a9a
PA
1941 xpt_print(periph->path,
1942 "got CAM status %#x\n",
1943 done_ccb->ccb_h.status);
984263bc
MD
1944 }
1945
1c8b7a9a
PA
1946 xpt_print(periph->path, "fatal error, "
1947 "failed to attach to device\n");
984263bc
MD
1948
1949 /*
1950 * Free up resources.
1951 */
1952 cam_periph_invalidate(periph);
1953 }
1954 }
1955 }
1c8b7a9a 1956 kfree(csio->data_ptr, M_SCSIDA);
62ade751 1957 if (announce_buf[0] != '\0') {
984263bc 1958 xpt_announce_periph(periph, announce_buf);
62ade751
MD
1959 /*
1960 * Create our sysctl variables, now that we know
1961 * we have successfully attached.
1962 */
b3504e03
JH
1963 taskqueue_enqueue(taskqueue_thread[mycpuid],
1964 &softc->sysctl_task);
62ade751 1965 }
e0fb398b
T
1966
1967 if (softc->trim_max_ranges) {
1968 softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1969 kprintf("%s%d: supports TRIM\n",
1970 periph->periph_name,
1971 periph->unit_number);
1972 }
b05e84c9 1973 softc->state = DA_STATE_NORMAL;
984263bc
MD
1974 /*
1975 * Since our peripheral may be invalidated by an error
1976 * above or an external event, we must release our CCB
1977 * before releasing the probe lock on the peripheral.
1978 * The peripheral will only go away once the last lock
1979 * is removed, and we need it around for the CCB release
1980 * operation.
1981 */
1982 xpt_release_ccb(done_ccb);
2d19cdd3 1983 cam_periph_unhold(periph, 0);
984263bc
MD
1984 return;
1985 }
1986 case DA_CCB_WAITING:
1987 {
1988 /* Caller will release the CCB */
1989 wakeup(&done_ccb->ccb_h.cbfcnp);
1990 return;
1991 }
1992 case DA_CCB_DUMP:
1993 /* No-op. We're polling */
1994 return;
1995 default:
1996 break;
1997 }
1998 xpt_release_ccb(done_ccb);
1999}
2000
2001static int
2002daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
2003{
2004 struct da_softc *softc;
2005 struct cam_periph *periph;
62ade751 2006 int error;
984263bc
MD
2007
2008 periph = xpt_path_periph(ccb->ccb_h.path);
2009 softc = (struct da_softc *)periph->softc;
2010
2011 /*
2012 * Automatically detect devices that do not support
2013 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
2014 */
2015 error = 0;
62ade751
MD
2016 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
2017 error = cmd6workaround(ccb);
2018 } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
2019 CAM_SCSI_STATUS_ERROR)
2020 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
2021 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
2022 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
2023 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
2024 int sense_key, error_code, asc, ascq;
2025
984263bc
MD
2026 scsi_extract_sense(&ccb->csio.sense_data,
2027 &error_code, &sense_key, &asc, &ascq);
2028 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
2029 error = cmd6workaround(ccb);
2030 }
2031 if (error == ERESTART)
2032 return (ERESTART);
2033
2034 /*
2035 * XXX
2036 * Until we have a better way of doing pack validation,
2037 * don't treat UAs as errors.
2038 */
2039 sense_flags |= SF_RETRY_UA;
2040 return(cam_periph_error(ccb, cam_flags, sense_flags,
2041 &softc->saved_ccb));
2042}
2043
2044static void
2045daprevent(struct cam_periph *periph, int action)
2046{
2047 struct da_softc *softc;
2048 union ccb *ccb;
2049 int error;
2050
2051 softc = (struct da_softc *)periph->softc;
2052
2053 if (((action == PR_ALLOW)
2054 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2055 || ((action == PR_PREVENT)
2056 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2057 return;
2058 }
2059
2060 ccb = cam_periph_getccb(periph, /*priority*/1);
2061
2062 scsi_prevent(&ccb->csio,
2063 /*retries*/1,
2064 /*cbcfp*/dadone,
2065 MSG_SIMPLE_Q_TAG,
2066 action,
2067 SSD_FULL_SIZE,
2068 5000);
2069
3f499af5
PA
2070 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2071 SF_RETRY_UA, &softc->device_stats);
984263bc
MD
2072
2073 if (error == 0) {
2074 if (action == PR_ALLOW)
2075 softc->flags &= ~DA_FLAG_PACK_LOCKED;
2076 else
2077 softc->flags |= DA_FLAG_PACK_LOCKED;
2078 }
2079
2080 xpt_release_ccb(ccb);
2081}
2082
f7b26992
MD
2083/*
2084 * Check media on open, e.g. card reader devices which had no initial media.
2085 */
2086static int
2087dacheckmedia(struct cam_periph *periph)
2088{
2089 struct disk_params *dp;
2090 struct da_softc *softc;
2091 struct disk_info info;
2092 int error;
2093
2094 softc = (struct da_softc *)periph->softc;
2095 dp = &softc->params;
2096
2097 error = dagetcapacity(periph);
2098
2099 /*
2100 * Only reprobe on initial open and if the media is removable.
9670bdda
MD
2101 *
2102 * NOTE: If we setdiskinfo() it will take the device probe
2103 * a bit of time to probe the slices and partitions,
2104 * and mess up booting. So avoid if nothing has changed.
2105 * XXX
f7b26992
MD
2106 */
2107 if (softc->flags & DA_FLAG_OPEN)
2108 return (error);
2109 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2110 return (error);
2111
2112 bzero(&info, sizeof(info));
2113 info.d_type = DTYPE_SCSI;
2114 info.d_serialno = xpt_path_serialno(periph->path);
2115
2116 if (error == 0) {
f7b26992
MD
2117 CAM_SIM_UNLOCK(periph->sim);
2118 info.d_media_blksize = softc->params.secsize;
2119 info.d_media_blocks = softc->params.sectors;
2120 info.d_media_size = 0;
2121 info.d_secpertrack = softc->params.secs_per_track;
2122 info.d_nheads = softc->params.heads;
2123 info.d_ncylinders = softc->params.cylinders;
2124 info.d_secpercyl = softc->params.heads *
2125 softc->params.secs_per_track;
2126 info.d_serialno = xpt_path_serialno(periph->path);
9670bdda
MD
2127 if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2128 kprintf("%s%d: open removable media: "
2129 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2130 periph->periph_name, periph->unit_number,
2131 (uintmax_t)(((uintmax_t)dp->secsize *
2132 dp->sectors) / (1024*1024)),
2133 (uintmax_t)dp->sectors, dp->secsize,
2134 dp->heads, dp->secs_per_track, dp->cylinders);
2135 disk_setdiskinfo(&softc->disk, &info);
2136 }
f7b26992
MD
2137 CAM_SIM_LOCK(periph->sim);
2138 } else {
2139 kprintf("%s%d: open removable media: no media present\n",
2140 periph->periph_name, periph->unit_number);
2141 info.d_media_blksize = 512;
2142 disk_setdiskinfo(&softc->disk, &info);
2143 }
2144 return (error);
2145}
2146
bdd58e03
MD
2147static int
2148dagetcapacity(struct cam_periph *periph)
2149{
2150 struct da_softc *softc;
2151 union ccb *ccb;
2152 struct scsi_read_capacity_data *rcap;
0b0362e1 2153 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03
MD
2154 uint32_t block_len;
2155 uint64_t maxsector;
2156 int error;
2157
2158 softc = (struct da_softc *)periph->softc;
2159 block_len = 0;
2160 maxsector = 0;
2161 error = 0;
2162
2163 /* Do a read capacity */
1c8b7a9a
PA
2164 rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2165 M_SCSIDA, M_INTWAIT);
bdd58e03
MD
2166
2167 ccb = cam_periph_getccb(periph, /*priority*/1);
2168 scsi_read_capacity(&ccb->csio,
2169 /*retries*/4,
2170 /*cbfncp*/dadone,
2171 MSG_SIMPLE_Q_TAG,
2172 rcap,
2173 SSD_FULL_SIZE,
2174 /*timeout*/60000);
2175 ccb->ccb_h.ccb_bio = NULL;
2176
2177 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2178 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2179 /*sense_flags*/SF_RETRY_UA,
2180 &softc->device_stats);
2181
2182 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2183 cam_release_devq(ccb->ccb_h.path,
2184 /*relsim_flags*/0,
2185 /*reduction*/0,
2186 /*timeout*/0,
2187 /*getcount_only*/0);
2188
2189 if (error == 0) {
2190 block_len = scsi_4btoul(rcap->length);
2191 maxsector = scsi_4btoul(rcap->addr);
2192
2193 if (maxsector != 0xffffffff)
2194 goto done;
2195 } else
2196 goto done;
2197
0b0362e1 2198 rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
bdd58e03
MD
2199
2200 scsi_read_capacity_16(&ccb->csio,
2201 /*retries*/ 4,
2202 /*cbfcnp*/ dadone,
2203 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2204 /*lba*/ 0,
2205 /*reladr*/ 0,
2206 /*pmi*/ 0,
2207 rcaplong,
2208 /*sense_len*/ SSD_FULL_SIZE,
2209 /*timeout*/ 60000);
2210 ccb->ccb_h.ccb_bio = NULL;
2211
2212 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2213 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2214 /*sense_flags*/SF_RETRY_UA,
2215 &softc->device_stats);
2216
2217 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2218 cam_release_devq(ccb->ccb_h.path,
2219 /*relsim_flags*/0,
2220 /*reduction*/0,
2221 /*timeout*/0,
2222 /*getcount_only*/0);
2223
2224 if (error == 0) {
2225 block_len = scsi_4btoul(rcaplong->length);
2226 maxsector = scsi_8btou64(rcaplong->addr);
2227 }
2228
2229done:
2230
2231 if (error == 0)
2232 dasetgeom(periph, block_len, maxsector);
2233
2234 xpt_release_ccb(ccb);
2235
1c8b7a9a 2236 kfree(rcap, M_SCSIDA);
bdd58e03
MD
2237
2238 return (error);
2239}
2240
984263bc 2241static void
bdd58e03 2242dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
984263bc
MD
2243{
2244 struct ccb_calc_geometry ccg;
2245 struct da_softc *softc;
2246 struct disk_params *dp;
2247
2248 softc = (struct da_softc *)periph->softc;
2249
2250 dp = &softc->params;
bdd58e03
MD
2251 dp->secsize = block_len;
2252 dp->sectors = maxsector + 1;
984263bc
MD
2253 /*
2254 * Have the controller provide us with a geometry
2255 * for this disk. The only time the geometry
2256 * matters is when we boot and the controller
2257 * is the only one knowledgeable enough to come
2258 * up with something that will make this a bootable
2259 * device.
2260 */
2261 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
2262 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
2263 ccg.block_size = dp->secsize;
2264 ccg.volume_size = dp->sectors;
2265 ccg.heads = 0;
2266 ccg.secs_per_track = 0;
2267 ccg.cylinders = 0;
2268 xpt_action((union ccb*)&ccg);
eac73adf
PA
2269 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2270 /*
2271 * We don't know what went wrong here- but just pick
2272 * a geometry so we don't have nasty things like divide
2273 * by zero.
2274 */
2275 dp->heads = 255;
2276 dp->secs_per_track = 255;
2277 dp->cylinders = dp->sectors / (255 * 255);
2278 if (dp->cylinders == 0) {
2279 dp->cylinders = 1;
2280 }
2281 } else {
2282 dp->heads = ccg.heads;
2283 dp->secs_per_track = ccg.secs_per_track;
2284 dp->cylinders = ccg.cylinders;
2285 }
984263bc
MD
2286}
2287
2288static void
2289dasendorderedtag(void *arg)
2290{
1c8b7a9a 2291 struct da_softc *softc = arg;
984263bc 2292
066e560b 2293 if (da_send_ordered) {
1c8b7a9a
PA
2294 if ((softc->ordered_tag_count == 0)
2295 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
2296 softc->flags |= DA_FLAG_NEED_OTAG;
984263bc 2297 }
af0aa0ac 2298 if (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr)
1c8b7a9a
PA
2299 softc->flags &= ~DA_FLAG_WENT_IDLE;
2300
2301 softc->ordered_tag_count = 0;
984263bc 2302 }
1c8b7a9a
PA
2303 /* Queue us up again */
2304 callout_reset(&softc->sendordered_c,
2305 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL,
2306 dasendorderedtag, softc);
984263bc
MD
2307}
2308
2309/*
2310 * Step through all DA peripheral drivers, and if the device is still open,
2311 * sync the disk cache to physical media.
2312 */
2313static void
2314dashutdown(void * arg, int howto)
2315{
2316 struct cam_periph *periph;
2317 struct da_softc *softc;
2318
234289a4 2319 TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
984263bc 2320 union ccb ccb;
234289a4 2321
1c8b7a9a 2322 cam_periph_lock(periph);
984263bc
MD
2323 softc = (struct da_softc *)periph->softc;
2324
2325 /*
2326 * We only sync the cache if the drive is still open, and
2327 * if the drive is capable of it..
2328 */
2329 if (((softc->flags & DA_FLAG_OPEN) == 0)
1c8b7a9a
PA
2330 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2331 cam_periph_unlock(periph);
984263bc 2332 continue;
1c8b7a9a 2333 }
984263bc
MD
2334
2335 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
2336
2337 ccb.ccb_h.ccb_state = DA_CCB_DUMP;
2338 scsi_synchronize_cache(&ccb.csio,
2339 /*retries*/1,
2340 /*cbfcnp*/dadone,
2341 MSG_SIMPLE_Q_TAG,
2342 /*begin_lba*/0, /* whole disk */
2343 /*lb_count*/0,
2344 SSD_FULL_SIZE,
19a136fb 2345 60 * 60 * 1000);
984263bc
MD
2346
2347 xpt_polled_action(&ccb);
2348
2349 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2350 if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
2351 CAM_SCSI_STATUS_ERROR)
2352 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2353 int error_code, sense_key, asc, ascq;
2354
2355 scsi_extract_sense(&ccb.csio.sense_data,
2356 &error_code, &sense_key,
2357 &asc, &ascq);
2358
2359 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2360 scsi_sense_print(&ccb.csio);
2361 } else {
1c8b7a9a
PA
2362 xpt_print(periph->path, "Synchronize "
2363 "cache failed, status == 0x%x, scsi status "
2364 "== 0x%x\n", ccb.ccb_h.status,
2365 ccb.csio.scsi_status);
984263bc
MD
2366 }
2367 }
2368
2369 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
2370 cam_release_devq(ccb.ccb_h.path,
2371 /*relsim_flags*/0,
2372 /*reduction*/0,
2373 /*timeout*/0,
2374 /*getcount_only*/0);
2375
1c8b7a9a 2376 cam_periph_unlock(periph);
984263bc
MD
2377 }
2378}
2379
2380#else /* !_KERNEL */
2381
2382/*
2383 * XXX This is only left out of the kernel build to silence warnings. If,
2384 * for some reason this function is used in the kernel, the ifdefs should
2385 * be moved so it is included both in the kernel and userland.
2386 */
2387void
2388scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2389 void (*cbfcnp)(struct cam_periph *, union ccb *),
2390 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2391 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2392 u_int32_t timeout)
2393{
2394 struct scsi_format_unit *scsi_cmd;
2395
2396 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2397 scsi_cmd->opcode = FORMAT_UNIT;
2398 scsi_cmd->byte2 = byte2;
2399 scsi_ulto2b(ileave, scsi_cmd->interleave);
2400
2401 cam_fill_csio(csio,
2402 retries,
2403 cbfcnp,
2404 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2405 tag_action,
2406 data_ptr,
2407 dxfer_len,
2408 sense_len,
2409 sizeof(*scsi_cmd),
2410 timeout);
2411}
2412
2413#endif /* _KERNEL */