kernel/cam: Make si_iosize_max overridable by drivers.
[dragonfly.git] / sys / bus / cam / scsi / scsi_da.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
62ade751 28 * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
984263bc
MD
29 */
30
984263bc
MD
31#include <sys/param.h>
32
33#ifdef _KERNEL
684a93c4 34
984263bc
MD
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/buf.h>
38#include <sys/sysctl.h>
62ade751 39#include <sys/taskqueue.h>
1c8b7a9a 40#include <sys/lock.h>
984263bc 41#include <sys/conf.h>
1c8b7a9a 42#include <sys/devicestat.h>
984263bc 43#include <sys/disk.h>
55a78310 44#include <sys/dtype.h>
984263bc
MD
45#include <sys/eventhandler.h>
46#include <sys/malloc.h>
47#include <sys/cons.h>
3020e3be 48#include <sys/proc.h>
e0fb398b 49#include <sys/ioctl_compat.h>
684a93c4 50
3020e3be 51#include <sys/buf2.h>
4e01b467 52#include <sys/thread2.h>
684a93c4
MD
53#include <sys/mplock2.h>
54
1c8b7a9a 55#endif /* _KERNEL */
984263bc 56
05220613 57#ifdef _KERNEL
984263bc 58#include <vm/pmap.h>
05220613 59#endif
984263bc
MD
60
61#ifndef _KERNEL
62#include <stdio.h>
63#include <string.h>
64#endif /* _KERNEL */
65
55230951 66#include <sys/camlib.h>
1f2de5d4
MD
67#include "../cam.h"
68#include "../cam_ccb.h"
69#include "../cam_extend.h"
70#include "../cam_periph.h"
71#include "../cam_xpt_periph.h"
1c8b7a9a 72#include "../cam_sim.h"
984263bc 73
1f2de5d4 74#include "scsi_message.h"
984263bc
MD
75
76#ifndef _KERNEL
1f2de5d4 77#include "scsi_da.h"
984263bc
MD
78#endif /* !_KERNEL */
79
80#ifdef _KERNEL
81typedef enum {
82 DA_STATE_PROBE,
bdd58e03 83 DA_STATE_PROBE2,
984263bc
MD
84 DA_STATE_NORMAL
85} da_state;
86
87typedef enum {
88 DA_FLAG_PACK_INVALID = 0x001,
89 DA_FLAG_NEW_PACK = 0x002,
90 DA_FLAG_PACK_LOCKED = 0x004,
91 DA_FLAG_PACK_REMOVABLE = 0x008,
92 DA_FLAG_TAGGED_QUEUING = 0x010,
93 DA_FLAG_NEED_OTAG = 0x020,
94 DA_FLAG_WENT_IDLE = 0x040,
95 DA_FLAG_RETRY_UA = 0x080,
62ade751 96 DA_FLAG_OPEN = 0x100,
af0aa0ac
MD
97 DA_FLAG_SCTX_INIT = 0x200,
98 DA_FLAG_RD_LIMIT = 0x400,
e0fb398b
T
99 DA_FLAG_WR_LIMIT = 0x800,
100 DA_FLAG_CAN_TRIM = 0x1000
984263bc
MD
101} da_flags;
102
103typedef enum {
104 DA_Q_NONE = 0x00,
105 DA_Q_NO_SYNC_CACHE = 0x01,
62ade751
MD
106 DA_Q_NO_6_BYTE = 0x02,
107 DA_Q_NO_PREVENT = 0x04
984263bc
MD
108} da_quirks;
109
110typedef enum {
111 DA_CCB_PROBE = 0x01,
bdd58e03
MD
112 DA_CCB_PROBE2 = 0x02,
113 DA_CCB_BUFFER_IO = 0x03,
114 DA_CCB_WAITING = 0x04,
115 DA_CCB_DUMP = 0x05,
e0fb398b 116 DA_CCB_TRIM = 0x06,
984263bc
MD
117 DA_CCB_TYPE_MASK = 0x0F,
118 DA_CCB_RETRY_UA = 0x10
119} da_ccb_state;
120
121/* Offsets into our private area for storing information */
122#define ccb_state ppriv_field0
81b5c339 123#define ccb_bio ppriv_ptr1
984263bc
MD
124
125struct disk_params {
126 u_int8_t heads;
bdd58e03 127 u_int32_t cylinders;
984263bc
MD
128 u_int8_t secs_per_track;
129 u_int32_t secsize; /* Number of bytes/sector */
bdd58e03 130 u_int64_t sectors; /* total number sectors */
984263bc
MD
131};
132
e0fb398b
T
133#define TRIM_MAX_BLOCKS 8
134#define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
135struct trim_request {
136 uint8_t data[TRIM_MAX_RANGES * 8];
137 struct bio *bios[TRIM_MAX_RANGES];
138};
139
984263bc 140struct da_softc {
af0aa0ac
MD
141 struct bio_queue_head bio_queue_rd;
142 struct bio_queue_head bio_queue_wr;
e0fb398b 143 struct bio_queue_head bio_queue_trim;
984263bc
MD
144 struct devstat device_stats;
145 SLIST_ENTRY(da_softc) links;
146 LIST_HEAD(, ccb_hdr) pending_ccbs;
147 da_state state;
148 da_flags flags;
149 da_quirks quirks;
150 int minimum_cmd_size;
151 int ordered_tag_count;
af0aa0ac
MD
152 int outstanding_cmds_rd;
153 int outstanding_cmds_wr;
e0fb398b
T
154 int trim_max_ranges;
155 int trim_running;
156 int trim_enabled;
984263bc
MD
157 struct disk_params params;
158 struct disk disk;
159 union ccb saved_ccb;
62ade751
MD
160 struct task sysctl_task;
161 struct sysctl_ctx_list sysctl_ctx;
162 struct sysctl_oid *sysctl_tree;
1c8b7a9a 163 struct callout sendordered_c;
e0fb398b 164 struct trim_request trim_req;
984263bc
MD
165};
166
167struct da_quirk_entry {
168 struct scsi_inquiry_pattern inq_pat;
169 da_quirks quirks;
170};
171
172static const char quantum[] = "QUANTUM";
173static const char microp[] = "MICROP";
174
175static struct da_quirk_entry da_quirk_table[] =
176{
62ade751 177 /* SPI, FC devices */
984263bc
MD
178 {
179 /*
180 * Fujitsu M2513A MO drives.
181 * Tested devices: M2513A2 firmware versions 1200 & 1300.
182 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
183 * Reported by: W.Scholten <whs@xs4all.nl>
184 */
185 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
186 /*quirks*/ DA_Q_NO_SYNC_CACHE
187 },
188 {
189 /* See above. */
190 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
191 /*quirks*/ DA_Q_NO_SYNC_CACHE
192 },
193 {
194 /*
195 * This particular Fujitsu drive doesn't like the
196 * synchronize cache command.
197 * Reported by: Tom Jackson <toj@gorilla.net>
198 */
199 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
200 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
201 },
202 {
203 /*
204 * This drive doesn't like the synchronize cache command
205 * either. Reported by: Matthew Jacob <mjacob@feral.com>
206 * in NetBSD PR kern/6027, August 24, 1998.
207 */
208 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
209 /*quirks*/ DA_Q_NO_SYNC_CACHE
210 },
211 {
212 /*
213 * This drive doesn't like the synchronize cache command
214 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
215 * (PR 8882).
216 */
217 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
218 /*quirks*/ DA_Q_NO_SYNC_CACHE
219 },
220 {
221 /*
222 * Doesn't like the synchronize cache command.
223 * Reported by: Blaz Zupan <blaz@gold.amis.net>
224 */
225 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
226 /*quirks*/ DA_Q_NO_SYNC_CACHE
227 },
228 {
229 /*
230 * Doesn't like the synchronize cache command.
d92d7552 231 * Reported by: Blaz Zupan <blaz@gold.amis.net>
984263bc
MD
232 */
233 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
234 /*quirks*/ DA_Q_NO_SYNC_CACHE
235 },
236 {
237 /*
238 * Doesn't like the synchronize cache command.
239 */
240 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
241 /*quirks*/ DA_Q_NO_SYNC_CACHE
242 },
243 {
244 /*
d92d7552
PA
245 * Doesn't like the synchronize cache command.
246 * Reported by: walter@pelissero.de
247 */
248 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
249 /*quirks*/ DA_Q_NO_SYNC_CACHE
250 },
251 {
252 /*
984263bc
MD
253 * Doesn't work correctly with 6 byte reads/writes.
254 * Returns illegal request, and points to byte 9 of the
255 * 6-byte CDB.
256 * Reported by: Adam McDougall <bsdx@spawnet.com>
257 */
258 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
259 /*quirks*/ DA_Q_NO_6_BYTE
260 },
261 {
62ade751 262 /* See above. */
984263bc
MD
263 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
264 /*quirks*/ DA_Q_NO_6_BYTE
265 },
984263bc
MD
266 {
267 /*
d92d7552
PA
268 * Doesn't like the synchronize cache command.
269 * Reported by: walter@pelissero.de
984263bc 270 */
d92d7552 271 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
62ade751 272 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
273 },
274 {
d92d7552
PA
275 /*
276 * The CISS RAID controllers do not support SYNC_CACHE
277 */
278 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
62ade751
MD
279 /*quirks*/ DA_Q_NO_SYNC_CACHE
280 },
a4a9ba75
SW
281 {
282 /*
283 * The same goes for the mly(4) controllers
284 */
285 {T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
286 /*quirks*/ DA_Q_NO_SYNC_CACHE
287 },
a9453758
MD
288 /*
289 * USB mass storage devices supported by umass(4)
290 *
291 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
292 * it does not have to be specified here.
293 */
d92d7552
PA
294 {
295 /*
296 * Creative Nomad MUVO mp3 player (USB)
297 * PR: kern/53094
298 */
299 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
a9453758 300 /*quirks*/ DA_Q_NO_PREVENT
d92d7552 301 },
654cdffd
JS
302 {
303 /*
d92d7552
PA
304 * Sigmatel USB Flash MP3 Player
305 * PR: kern/57046
984263bc 306 */
d92d7552 307 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
a9453758 308 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
309 },
310 {
311 /*
d92d7552
PA
312 * SEAGRAND NP-900 MP3 Player
313 * PR: kern/64563
984263bc 314 */
d92d7552 315 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
a9453758 316 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
317 },
318 {
319 /*
d92d7552
PA
320 * Creative MUVO Slim mp3 player (USB)
321 * PR: usb/86131
984263bc 322 */
d92d7552 323 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
a9453758 324 "*"}, /*quirks*/ DA_Q_NO_PREVENT
d92d7552
PA
325 },
326 {
327 /*
328 * Philips USB Key Audio KEY013
329 * PR: usb/68412
330 */
331 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
a9453758 332 /*quirks*/ DA_Q_NO_PREVENT
285d490c 333 },
984263bc
MD
334};
335
336static d_open_t daopen;
337static d_close_t daclose;
338static d_strategy_t dastrategy;
984263bc 339static d_dump_t dadump;
e0fb398b 340static d_ioctl_t daioctl;
984263bc
MD
341static periph_init_t dainit;
342static void daasync(void *callback_arg, u_int32_t code,
343 struct cam_path *path, void *arg);
62ade751 344static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
984263bc
MD
345static periph_ctor_t daregister;
346static periph_dtor_t dacleanup;
347static periph_start_t dastart;
348static periph_oninv_t daoninvalidate;
349static void dadone(struct cam_periph *periph,
350 union ccb *done_ccb);
351static int daerror(union ccb *ccb, u_int32_t cam_flags,
352 u_int32_t sense_flags);
353static void daprevent(struct cam_periph *periph, int action);
bdd58e03 354static int dagetcapacity(struct cam_periph *periph);
f7b26992 355static int dacheckmedia(struct cam_periph *periph);
bdd58e03
MD
356static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
357 uint64_t maxsector);
af0aa0ac 358static void daflushbioq(struct bio_queue_head *bioq, int error);
984263bc
MD
359static timeout_t dasendorderedtag;
360static void dashutdown(void *arg, int howto);
361
362#ifndef DA_DEFAULT_TIMEOUT
363#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
364#endif
365
366#ifndef DA_DEFAULT_RETRY
367#define DA_DEFAULT_RETRY 4
368#endif
369
066e560b
PA
370#ifndef DA_DEFAULT_SEND_ORDERED
371#define DA_DEFAULT_SEND_ORDERED 1
372#endif
373
984263bc
MD
374static int da_retry_count = DA_DEFAULT_RETRY;
375static int da_default_timeout = DA_DEFAULT_TIMEOUT;
066e560b 376static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
3690a379 377static struct callout dasendorderedtag_ch;
984263bc 378
984263bc
MD
379SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
380 "CAM Direct Access Disk driver");
381SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
382 &da_retry_count, 0, "Normal I/O retry count");
62ade751 383TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
984263bc
MD
384SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
385 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
62ade751 386TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
066e560b
PA
387SYSCTL_INT(_kern_cam_da, OID_AUTO, da_send_ordered, CTLFLAG_RW,
388 &da_send_ordered, 0, "Send Ordered Tags");
389TUNABLE_INT("kern.cam.da.da_send_ordered", &da_send_ordered);
984263bc
MD
390
391/*
392 * DA_ORDEREDTAG_INTERVAL determines how often, relative
393 * to the default timeout, we check to see whether an ordered
394 * tagged transaction is appropriate to prevent simple tag
395 * starvation. Since we'd like to ensure that there is at least
396 * 1/2 of the timeout length left for a starved transaction to
397 * complete after we've sent an ordered tag, we must poll at least
398 * four times in every timeout period. This takes care of the worst
399 * case where a starved transaction starts during an interval that
400 * meets the requirement "don't send an ordered tag" test so it takes
401 * us two intervals to determine that a tag must be sent.
402 */
403#ifndef DA_ORDEREDTAG_INTERVAL
404#define DA_ORDEREDTAG_INTERVAL 4
405#endif
406
407static struct periph_driver dadriver =
408{
409 dainit, "da",
410 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
411};
412
2ad14cb5 413PERIPHDRIVER_DECLARE(da, dadriver);
984263bc 414
fef8985e 415static struct dev_ops da_ops = {
b8e1d863 416 { "da", 0, D_DISK | D_MPSAFE },
fef8985e
MD
417 .d_open = daopen,
418 .d_close = daclose,
419 .d_read = physread,
420 .d_write = physwrite,
fef8985e 421 .d_strategy = dastrategy,
e0fb398b
T
422 .d_dump = dadump,
423 .d_ioctl = daioctl
984263bc
MD
424};
425
984263bc
MD
426static struct extend_array *daperiphs;
427
1c8b7a9a
PA
428MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
429
984263bc 430static int
e0fb398b
T
431daioctl(struct dev_ioctl_args *ap)
432{
433 int unit;
434 int error = 0;
435 struct buf *bp;
436 struct cam_periph *periph;
437 int byte_count;
438 struct da_softc * softc;
439
440 off_t *del_num = (off_t*)ap->a_data;
441 off_t bytes_left;
442 off_t bytes_start;
443
444 cdev_t dev = ap->a_head.a_dev;
445
446
447 unit = dkunit(dev);
448 periph = cam_extend_get(daperiphs, unit);
449 if (periph == NULL)
450 return(ENXIO);
451 softc = (struct da_softc *)periph->softc;
452
453 switch (ap->a_cmd) {
454 case IOCTLTRIM:
455 {
456
457 bytes_left = del_num[1];
458 bytes_start = del_num[0];
459
460 /* TRIM occurs on 512-byte sectors. */
461 KKASSERT((bytes_left % 512) == 0);
462 KKASSERT((bytes_start% 512) == 0);
463
464
465 /* Break TRIM up into int-sized commands because of b_bcount */
466 while(bytes_left) {
467
468 /*
469 * Rather than than squezing out more blocks in b_bcount
470 * and having to break up the TRIM request in da_start(),
471 * we ensure we can always TRIM this many bytes with one
472 * TRIM command (this happens if the device only
473 * supports one TRIM block).
474 *
475 * With min TRIM blksize of 1, TRIM command free
476 * 4194240 blks(64*65535): each LBA range can address
477 * 65535 blks and there 64 such ranges in a 512-byte
478 * block. And, 4194240 * 512 = 0x7FFF8000
479 *
480 */
481 byte_count = MIN(bytes_left,0x7FFF8000);
482 bp = getnewbuf(0,0,0,1);
483
484 bp->b_cmd = BUF_CMD_FREEBLKS;
485 bp->b_bio1.bio_offset = bytes_start;
486 bp->b_bcount = byte_count;
487 bp->b_bio1.bio_flags |= BIO_SYNC;
488 bp->b_bio1.bio_done = biodone_sync;
489
490 dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
491
492 if (biowait(&bp->b_bio1, "TRIM")) {
493 kprintf("Error:%d\n", bp->b_error);
494 return(bp->b_error ? bp->b_error : EIO);
495 }
496 brelse(bp);
497 bytes_left -= byte_count;
498 bytes_start += byte_count;
499 }
500 break;
501 }
502 default:
503 return(EINVAL);
504 }
505
506 return(error);
507}
508
509static int
fef8985e 510daopen(struct dev_open_args *ap)
984263bc 511{
b13267a5 512 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
513 struct cam_periph *periph;
514 struct da_softc *softc;
a688b15c 515 struct disk_info info;
984263bc 516 int unit;
984263bc 517 int error;
984263bc
MD
518
519 unit = dkunit(dev);
984263bc 520 periph = cam_extend_get(daperiphs, unit);
b05e84c9 521 if (periph == NULL) {
984263bc 522 return (ENXIO);
b05e84c9 523 }
984263bc 524
1c8b7a9a
PA
525 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
526 return(ENXIO);
527 }
528
529 cam_periph_lock(periph);
530 if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
531 cam_periph_unlock(periph);
532 cam_periph_release(periph);
533 return (error);
534 }
535
536 unit = periph->unit_number;
984263bc
MD
537 softc = (struct da_softc *)periph->softc;
538
539 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
9ece9268
PA
540 ("daopen: dev=%s (unit %d)\n", devtoname(dev),
541 unit));
984263bc 542
984263bc
MD
543 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
544 /* Invalidate our pack information. */
545 disk_invalidate(&softc->disk);
546 softc->flags &= ~DA_FLAG_PACK_INVALID;
547 }
984263bc 548
f7b26992
MD
549 error = dacheckmedia(periph);
550 softc->flags |= DA_FLAG_OPEN;
984263bc
MD
551
552 if (error == 0) {
553 struct ccb_getdev cgd;
554
a688b15c
MD
555 /* Build disk information structure */
556 bzero(&info, sizeof(info));
557 info.d_type = DTYPE_SCSI;
984263bc
MD
558
559 /*
560 * Grab the inquiry data to get the vendor and product names.
561 * Put them in the typename and packname for the label.
562 */
563 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
564 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
565 xpt_action((union ccb *)&cgd);
566
984263bc
MD
567 /*
568 * Check to see whether or not the blocksize is set yet.
569 * If it isn't, set it and then clear the blocksize
570 * unavailable flag for the device statistics.
571 */
572 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
573 softc->device_stats.block_size = softc->params.secsize;
574 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
575 }
576 }
577
b05e84c9 578 if (error == 0) {
62ade751
MD
579 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
580 (softc->quirks & DA_Q_NO_PREVENT) == 0)
b05e84c9
PA
581 daprevent(periph, PR_PREVENT);
582 } else {
984263bc
MD
583 softc->flags &= ~DA_FLAG_OPEN;
584 cam_periph_release(periph);
585 }
2d19cdd3 586 cam_periph_unhold(periph, 1);
984263bc
MD
587 return (error);
588}
589
590static int
fef8985e 591daclose(struct dev_close_args *ap)
984263bc 592{
b13267a5 593 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
594 struct cam_periph *periph;
595 struct da_softc *softc;
596 int unit;
597 int error;
598
599 unit = dkunit(dev);
600 periph = cam_extend_get(daperiphs, unit);
601 if (periph == NULL)
602 return (ENXIO);
603
1c8b7a9a
PA
604 cam_periph_lock(periph);
605 if ((error = cam_periph_hold(periph, 0)) != 0) {
606 cam_periph_unlock(periph);
607 cam_periph_release(periph);
608 return (error);
984263bc
MD
609 }
610
1c8b7a9a
PA
611 softc = (struct da_softc *)periph->softc;
612
984263bc
MD
613 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
614 union ccb *ccb;
615
616 ccb = cam_periph_getccb(periph, /*priority*/1);
617
618 scsi_synchronize_cache(&ccb->csio,
619 /*retries*/1,
620 /*cbfcnp*/dadone,
621 MSG_SIMPLE_Q_TAG,
622 /*begin_lba*/0,/* Cover the whole disk */
623 /*lb_count*/0,
624 SSD_FULL_SIZE,
625 5 * 60 * 1000);
626
627 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
628 /*sense_flags*/SF_RETRY_UA,
629 &softc->device_stats);
630
631 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
632 if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
633 CAM_SCSI_STATUS_ERROR) {
634 int asc, ascq;
635 int sense_key, error_code;
636
637 scsi_extract_sense(&ccb->csio.sense_data,
638 &error_code,
639 &sense_key,
640 &asc, &ascq);
641 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
642 scsi_sense_print(&ccb->csio);
643 } else {
1c8b7a9a
PA
644 xpt_print(periph->path, "Synchronize cache "
645 "failed, status == 0x%x, scsi status == "
646 "0x%x\n", ccb->csio.ccb_h.status,
647 ccb->csio.scsi_status);
984263bc
MD
648 }
649 }
650
651 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
652 cam_release_devq(ccb->ccb_h.path,
653 /*relsim_flags*/0,
654 /*reduction*/0,
655 /*timeout*/0,
656 /*getcount_only*/0);
657
658 xpt_release_ccb(ccb);
659
660 }
661
662 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
62ade751
MD
663 if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
664 daprevent(periph, PR_ALLOW);
984263bc
MD
665 /*
666 * If we've got removeable media, mark the blocksize as
667 * unavailable, since it could change when new media is
668 * inserted.
669 */
670 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
671 }
672
fca0fce6
MD
673 /*
674 * Don't compound any ref counting software bugs with more.
675 */
676 if (softc->flags & DA_FLAG_OPEN) {
677 softc->flags &= ~DA_FLAG_OPEN;
678 cam_periph_release(periph);
679 } else {
1c8b7a9a
PA
680 xpt_print(periph->path,
681 "daclose() called on an already closed device!\n");
fca0fce6 682 }
2d19cdd3 683 cam_periph_unhold(periph, 1);
984263bc
MD
684 return (0);
685}
686
687/*
688 * Actually translate the requested transfer into one the physical driver
689 * can understand. The transfer is described by a buf and will include
690 * only one physical transfer.
691 */
fef8985e
MD
692static int
693dastrategy(struct dev_strategy_args *ap)
984263bc 694{
b13267a5 695 cdev_t dev = ap->a_head.a_dev;
fef8985e 696 struct bio *bio = ap->a_bio;
81b5c339 697 struct buf *bp = bio->bio_buf;
984263bc
MD
698 struct cam_periph *periph;
699 struct da_softc *softc;
700 u_int unit;
701 u_int part;
984263bc 702
81b5c339
MD
703 unit = dkunit(dev);
704 part = dkpart(dev);
984263bc
MD
705 periph = cam_extend_get(daperiphs, unit);
706 if (periph == NULL) {
707 bp->b_error = ENXIO;
708 goto bad;
709 }
710 softc = (struct da_softc *)periph->softc;
1c8b7a9a
PA
711
712 cam_periph_lock(periph);
713
984263bc
MD
714#if 0
715 /*
716 * check it's not too big a transfer for our adapter
717 */
81b5c339 718 scsi_minphys(bp, &sd_switch);
984263bc
MD
719#endif
720
721 /*
722 * Mask interrupts so that the pack cannot be invalidated until
723 * after we are in the queue. Otherwise, we might not properly
724 * clean up one of the buffers.
725 */
984263bc
MD
726
727 /*
728 * If the device has been made invalid, error out
729 */
730 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1c8b7a9a 731 cam_periph_unlock(periph);
984263bc
MD
732 bp->b_error = ENXIO;
733 goto bad;
734 }
735
736 /*
737 * Place it in the queue of disk activities for this disk
738 */
af0aa0ac
MD
739 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
740 bioqdisksort(&softc->bio_queue_wr, bio);
e0fb398b
T
741 else if (bp->b_cmd == BUF_CMD_FREEBLKS)
742 bioqdisksort(&softc->bio_queue_trim, bio);
af0aa0ac
MD
743 else
744 bioqdisksort(&softc->bio_queue_rd, bio);
984263bc
MD
745
746 /*
747 * Schedule ourselves for performing the work.
748 */
749 xpt_schedule(periph, /* XXX priority */1);
1c8b7a9a 750 cam_periph_unlock(periph);
984263bc 751
fef8985e 752 return(0);
984263bc
MD
753bad:
754 bp->b_flags |= B_ERROR;
755
756 /*
757 * Correctly set the buf to indicate a completed xfer
758 */
759 bp->b_resid = bp->b_bcount;
81b5c339 760 biodone(bio);
fef8985e 761 return(0);
984263bc
MD
762}
763
984263bc 764static int
fef8985e 765dadump(struct dev_dump_args *ap)
984263bc 766{
b13267a5 767 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
768 struct cam_periph *periph;
769 struct da_softc *softc;
770 u_int unit;
b24cd69c 771 u_int32_t secsize;
984263bc 772 struct ccb_scsiio csio;
984263bc
MD
773
774 unit = dkunit(dev);
984263bc 775 periph = cam_extend_get(daperiphs, unit);
b24cd69c 776 if (periph == NULL)
984263bc 777 return (ENXIO);
b24cd69c 778
984263bc 779 softc = (struct da_softc *)periph->softc;
1c8b7a9a 780 cam_periph_lock(periph);
b24cd69c
AH
781 secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
782
1c8b7a9a
PA
783 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
784 cam_periph_unlock(periph);
984263bc 785 return (ENXIO);
1c8b7a9a 786 }
984263bc 787
b24cd69c
AH
788 /*
789 * because length == 0 means we are supposed to flush cache, we only
790 * try to write something if length > 0.
791 */
792 if (ap->a_length > 0) {
984263bc 793 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
0b0362e1 794 csio.ccb_h.flags |= CAM_POLLED;
984263bc
MD
795 csio.ccb_h.ccb_state = DA_CCB_DUMP;
796 scsi_read_write(&csio,
797 /*retries*/1,
798 dadone,
799 MSG_ORDERED_Q_TAG,
800 /*read*/FALSE,
801 /*byte2*/0,
802 /*minimum_cmd_size*/ softc->minimum_cmd_size,
b24cd69c
AH
803 ap->a_offset / secsize,
804 ap->a_length / secsize,
805 /*data_ptr*/(u_int8_t *) ap->a_virtual,
806 /*dxfer_len*/ap->a_length,
984263bc
MD
807 /*sense_len*/SSD_FULL_SIZE,
808 DA_DEFAULT_TIMEOUT * 1000);
809 xpt_polled_action((union ccb *)&csio);
810
811 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
85f8e2ea 812 kprintf("Aborting dump due to I/O error.\n");
984263bc
MD
813 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
814 CAM_SCSI_STATUS_ERROR)
815 scsi_sense_print(&csio);
816 else
85f8e2ea 817 kprintf("status == 0x%x, scsi status == 0x%x\n",
984263bc
MD
818 csio.ccb_h.status, csio.scsi_status);
819 return(EIO);
820 }
b24cd69c
AH
821 cam_periph_unlock(periph);
822 return 0;
984263bc
MD
823 }
824
825 /*
826 * Sync the disk cache contents to the physical media.
827 */
828 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
829
830 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
831 csio.ccb_h.ccb_state = DA_CCB_DUMP;
832 scsi_synchronize_cache(&csio,
833 /*retries*/1,
834 /*cbfcnp*/dadone,
835 MSG_SIMPLE_Q_TAG,
836 /*begin_lba*/0,/* Cover the whole disk */
837 /*lb_count*/0,
838 SSD_FULL_SIZE,
839 5 * 60 * 1000);
840 xpt_polled_action((union ccb *)&csio);
841
842 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
843 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
844 CAM_SCSI_STATUS_ERROR) {
845 int asc, ascq;
846 int sense_key, error_code;
847
848 scsi_extract_sense(&csio.sense_data,
849 &error_code,
850 &sense_key,
851 &asc, &ascq);
852 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
853 scsi_sense_print(&csio);
854 } else {
1c8b7a9a
PA
855 xpt_print(periph->path, "Synchronize cache "
856 "failed, status == 0x%x, scsi status == "
857 "0x%x\n", csio.ccb_h.status,
858 csio.scsi_status);
984263bc
MD
859 }
860 }
861 }
1c8b7a9a 862 cam_periph_unlock(periph);
984263bc
MD
863 return (0);
864}
865
866static void
867dainit(void)
868{
869 cam_status status;
984263bc
MD
870
871 /*
872 * Create our extend array for storing the devices we attach to.
873 */
874 daperiphs = cam_extend_new();
984263bc 875 if (daperiphs == NULL) {
85f8e2ea 876 kprintf("da: Failed to alloc extend array!\n");
984263bc
MD
877 return;
878 }
3690a379
JS
879
880 callout_init(&dasendorderedtag_ch);
881
984263bc
MD
882 /*
883 * Install a global async callback. This callback will
884 * receive async callbacks like "new device found".
885 */
1c8b7a9a 886 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
984263bc
MD
887
888 if (status != CAM_REQ_CMP) {
85f8e2ea 889 kprintf("da: Failed to attach master async callback "
984263bc 890 "due to status 0x%x!\n", status);
066e560b 891 } else if (da_send_ordered) {
984263bc 892
984263bc
MD
893 /* Register our shutdown event handler */
894 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
895 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
85f8e2ea 896 kprintf("dainit: shutdown event registration failed!\n");
984263bc
MD
897 }
898}
899
900static void
901daoninvalidate(struct cam_periph *periph)
902{
984263bc 903 struct da_softc *softc;
984263bc
MD
904
905 softc = (struct da_softc *)periph->softc;
906
907 /*
908 * De-register any async callbacks.
909 */
1c8b7a9a 910 xpt_register_async(0, daasync, periph, periph->path);
984263bc
MD
911
912 softc->flags |= DA_FLAG_PACK_INVALID;
913
914 /*
984263bc
MD
915 * Return all queued I/O with ENXIO.
916 * XXX Handle any transactions queued to the card
917 * with XPT_ABORT_CCB.
918 */
e0fb398b 919 daflushbioq(&softc->bio_queue_trim, ENXIO);
af0aa0ac
MD
920 daflushbioq(&softc->bio_queue_wr, ENXIO);
921 daflushbioq(&softc->bio_queue_rd, ENXIO);
922 xpt_print(periph->path, "lost device\n");
923}
924
925static void
926daflushbioq(struct bio_queue_head *bioq, int error)
927{
928 struct bio *q_bio;
929 struct buf *q_bp;
930
931 while ((q_bio = bioq_first(bioq)) != NULL){
932 bioq_remove(bioq, q_bio);
81b5c339 933 q_bp = q_bio->bio_buf;
984263bc 934 q_bp->b_resid = q_bp->b_bcount;
af0aa0ac 935 q_bp->b_error = error;
984263bc 936 q_bp->b_flags |= B_ERROR;
81b5c339 937 biodone(q_bio);
984263bc 938 }
984263bc
MD
939}
940
941static void
942dacleanup(struct cam_periph *periph)
943{
944 struct da_softc *softc;
945
946 softc = (struct da_softc *)periph->softc;
947
948 devstat_remove_entry(&softc->device_stats);
949 cam_extend_release(daperiphs, periph->unit_number);
1c8b7a9a 950 xpt_print(periph->path, "removing device entry\n");
62ade751
MD
951 /*
952 * If we can't free the sysctl tree, oh well...
953 */
954 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
955 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
1c8b7a9a 956 xpt_print(periph->path, "can't remove sysctl context\n");
62ade751 957 }
2d19cdd3 958 periph->softc = NULL;
e4c9c0c8 959 if (softc->disk.d_rawdev) {
1c8b7a9a 960 cam_periph_unlock(periph);
335dda38 961 disk_destroy(&softc->disk);
1c8b7a9a 962 cam_periph_lock(periph);
984263bc 963 }
1c8b7a9a
PA
964
965 callout_stop(&softc->sendordered_c);
efda3bd0 966 kfree(softc, M_DEVBUF);
984263bc
MD
967}
968
969static void
970daasync(void *callback_arg, u_int32_t code,
971 struct cam_path *path, void *arg)
972{
973 struct cam_periph *periph;
974
975 periph = (struct cam_periph *)callback_arg;
87993e5a 976
984263bc
MD
977 switch (code) {
978 case AC_FOUND_DEVICE:
979 {
980 struct ccb_getdev *cgd;
1c8b7a9a 981 struct cam_sim *sim;
984263bc
MD
982 cam_status status;
983
984 cgd = (struct ccb_getdev *)arg;
e9936c96
PA
985 if (cgd == NULL)
986 break;
984263bc
MD
987
988 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
989 && SID_TYPE(&cgd->inq_data) != T_RBC
990 && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
991 break;
992
993 /*
87993e5a
MD
994 * Don't complain if a valid peripheral is already attached.
995 */
996 periph = cam_periph_find(cgd->ccb_h.path, "da");
997 if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
998 break;
999
1000 /*
984263bc
MD
1001 * Allocate a peripheral instance for
1002 * this device and start the probe
1003 * process.
1004 */
1c8b7a9a 1005 sim = xpt_path_sim(cgd->ccb_h.path);
984263bc
MD
1006 status = cam_periph_alloc(daregister, daoninvalidate,
1007 dacleanup, dastart,
1008 "da", CAM_PERIPH_BIO,
1009 cgd->ccb_h.path, daasync,
1010 AC_FOUND_DEVICE, cgd);
1011
87993e5a 1012 if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
85f8e2ea 1013 kprintf("daasync: Unable to attach to new device "
984263bc 1014 "due to status 0x%x\n", status);
87993e5a 1015 }
984263bc
MD
1016 break;
1017 }
1018 case AC_SENT_BDR:
1019 case AC_BUS_RESET:
1020 {
1021 struct da_softc *softc;
1022 struct ccb_hdr *ccbh;
984263bc
MD
1023
1024 softc = (struct da_softc *)periph->softc;
984263bc
MD
1025 /*
1026 * Don't fail on the expected unit attention
1027 * that will occur.
1028 */
1029 softc->flags |= DA_FLAG_RETRY_UA;
cbe8f7dc 1030 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
984263bc 1031 ccbh->ccb_state |= DA_CCB_RETRY_UA;
984263bc
MD
1032 /* FALLTHROUGH*/
1033 }
1034 default:
1035 cam_periph_async(periph, code, path, arg);
1036 break;
1037 }
1038}
1039
62ade751
MD
1040static void
1041dasysctlinit(void *context, int pending)
1042{
1043 struct cam_periph *periph;
1044 struct da_softc *softc;
1045 char tmpstr[80], tmpstr2[80];
1046
227ce828 1047 get_mplock();
62ade751 1048 periph = (struct cam_periph *)context;
227ce828
AH
1049 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1050 rel_mplock();
1c8b7a9a 1051 return;
227ce828 1052 }
62ade751 1053
1c8b7a9a 1054 softc = (struct da_softc *)periph->softc;
f8c7a42d
MD
1055 ksnprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
1056 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
62ade751
MD
1057
1058 sysctl_ctx_init(&softc->sysctl_ctx);
1059 softc->flags |= DA_FLAG_SCTX_INIT;
1060 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1061 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1062 CTLFLAG_RD, 0, tmpstr);
1063 if (softc->sysctl_tree == NULL) {
85f8e2ea 1064 kprintf("dasysctlinit: unable to allocate sysctl tree\n");
1c8b7a9a 1065 cam_periph_release(periph);
227ce828 1066 rel_mplock();
62ade751
MD
1067 return;
1068 }
1069
1070 /*
1071 * Now register the sysctl handler, so the user can the value on
1072 * the fly.
1073 */
1074 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1075 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1076 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1077 "Minimum CDB size");
1c8b7a9a 1078
e0fb398b
T
1079 /* Only create the option if the device supports TRIM */
1080 if (softc->disk.d_info.d_trimflag) {
1081 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1082 SYSCTL_CHILDREN(softc->sysctl_tree),
1083 OID_AUTO,
1084 "trim_enabled",
1085 CTLFLAG_RW,
1086 &softc->trim_enabled,
1087 0,
1088 "Enable TRIM for this device (SSD))");
1089 }
1090
1c8b7a9a 1091 cam_periph_release(periph);
227ce828 1092 rel_mplock();
62ade751
MD
1093}
1094
1095static int
1096dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1097{
1098 int error, value;
1099
1100 value = *(int *)arg1;
1101
1102 error = sysctl_handle_int(oidp, &value, 0, req);
1103
1104 if ((error != 0)
1105 || (req->newptr == NULL))
1106 return (error);
1107
1108 /*
bdd58e03 1109 * Acceptable values here are 6, 10 or 12, or 16.
62ade751
MD
1110 */
1111 if (value < 6)
1112 value = 6;
1113 else if ((value > 6)
1114 && (value <= 10))
1115 value = 10;
bdd58e03
MD
1116 else if ((value > 10)
1117 && (value <= 12))
62ade751 1118 value = 12;
bdd58e03
MD
1119 else if (value > 12)
1120 value = 16;
62ade751
MD
1121
1122 *(int *)arg1 = value;
1123
1124 return (0);
1125}
1126
984263bc
MD
1127static cam_status
1128daregister(struct cam_periph *periph, void *arg)
1129{
984263bc 1130 struct da_softc *softc;
62ade751 1131 struct ccb_pathinq cpi;
984263bc 1132 struct ccb_getdev *cgd;
62ade751 1133 char tmpstr[80];
984263bc
MD
1134 caddr_t match;
1135
1136 cgd = (struct ccb_getdev *)arg;
1137 if (periph == NULL) {
85f8e2ea 1138 kprintf("daregister: periph was NULL!!\n");
984263bc
MD
1139 return(CAM_REQ_CMP_ERR);
1140 }
1141
1142 if (cgd == NULL) {
85f8e2ea 1143 kprintf("daregister: no getdev CCB, can't register device\n");
984263bc
MD
1144 return(CAM_REQ_CMP_ERR);
1145 }
1146
efda3bd0 1147 softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
984263bc
MD
1148 LIST_INIT(&softc->pending_ccbs);
1149 softc->state = DA_STATE_PROBE;
e0fb398b 1150 bioq_init(&softc->bio_queue_trim);
af0aa0ac
MD
1151 bioq_init(&softc->bio_queue_rd);
1152 bioq_init(&softc->bio_queue_wr);
984263bc
MD
1153 if (SID_IS_REMOVABLE(&cgd->inq_data))
1154 softc->flags |= DA_FLAG_PACK_REMOVABLE;
1155 if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1156 softc->flags |= DA_FLAG_TAGGED_QUEUING;
1157
e0fb398b
T
1158 /* Used to get TRIM status from AHCI driver */
1159 if (cgd->inq_data.vendor_specific1[0] == 1) {
1160 /*
1161 * max number of lba ranges an SSD can handle in a single
1162 * TRIM command. vendor_specific1[1] is the num of 512-byte
1163 * blocks the SSD reports that can be passed in a TRIM cmd.
1164 */
1165 softc->trim_max_ranges =
1166 min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1167 }
1168
984263bc
MD
1169 periph->softc = softc;
1170
1171 cam_extend_set(daperiphs, periph->unit_number, periph);
1172
1173 /*
1174 * See if this device has any quirks.
1175 */
1176 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1177 (caddr_t)da_quirk_table,
b370aff7 1178 NELEM(da_quirk_table),
984263bc
MD
1179 sizeof(*da_quirk_table), scsi_inquiry_match);
1180
1181 if (match != NULL)
1182 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1183 else
1184 softc->quirks = DA_Q_NONE;
1185
a9453758
MD
1186 /*
1187 * Unconditionally disable the synchronize cache command for
1188 * usb attachments. It's just impossible to determine if the
1189 * device supports it or not and if it doesn't the port can
1190 * brick.
1191 */
1192 if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1193 softc->quirks |= DA_Q_NO_SYNC_CACHE;
1194 }
1195
62ade751
MD
1196 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1197
1198 /* Check if the SIM does not want 6 byte commands */
61fe6a46 1199 bzero(&cpi, sizeof(cpi));
62ade751
MD
1200 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
1201 cpi.ccb_h.func_code = XPT_PATH_INQ;
1202 xpt_action((union ccb *)&cpi);
1203 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
1204 softc->quirks |= DA_Q_NO_6_BYTE;
1205
1206 /*
1207 * RBC devices don't have to support READ(6), only READ(10).
1208 */
984263bc
MD
1209 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1210 softc->minimum_cmd_size = 10;
1211 else
1212 softc->minimum_cmd_size = 6;
1213
1214 /*
62ade751
MD
1215 * Load the user's default, if any.
1216 */
f8c7a42d 1217 ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
62ade751
MD
1218 periph->unit_number);
1219 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1220
1221 /*
bdd58e03 1222 * 6, 10, 12, and 16 are the currently permissible values.
62ade751
MD
1223 */
1224 if (softc->minimum_cmd_size < 6)
1225 softc->minimum_cmd_size = 6;
1226 else if ((softc->minimum_cmd_size > 6)
1227 && (softc->minimum_cmd_size <= 10))
1228 softc->minimum_cmd_size = 10;
bdd58e03
MD
1229 else if ((softc->minimum_cmd_size > 10)
1230 && (softc->minimum_cmd_size <= 12))
62ade751 1231 softc->minimum_cmd_size = 12;
bdd58e03
MD
1232 else if (softc->minimum_cmd_size > 12)
1233 softc->minimum_cmd_size = 16;
62ade751
MD
1234
1235 /*
984263bc
MD
1236 * The DA driver supports a blocksize, but
1237 * we don't know the blocksize until we do
1238 * a read capacity. So, set a flag to
1239 * indicate that the blocksize is
1240 * unavailable right now. We'll clear the
1241 * flag as soon as we've done a read capacity.
1242 */
1243 devstat_add_entry(&softc->device_stats, "da",
1244 periph->unit_number, 0,
1245 DEVSTAT_BS_UNAVAILABLE,
1246 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1247 DEVSTAT_PRIORITY_DISK);
1248
1249 /*
1250 * Register this media as a disk
1251 */
1c8b7a9a 1252 CAM_SIM_UNLOCK(periph->sim);
a688b15c 1253 disk_create(periph->unit_number, &softc->disk, &da_ops);
61fe6a46
SW
1254 if (cpi.maxio == 0 || cpi.maxio > MAXPHYS)
1255 softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1256 else
1257 softc->disk.d_rawdev->si_iosize_max = cpi.maxio;
1c8b7a9a 1258 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1259
1260 /*
1261 * Add async callbacks for bus reset and
1262 * bus device reset calls. I don't bother
1263 * checking if this fails as, in most cases,
1264 * the system will function just fine without
1265 * them and the only alternative would be to
1266 * not attach the device on failure.
1267 */
1c8b7a9a
PA
1268 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1269 daasync, periph, periph->path);
1270
984263bc 1271 /*
1c8b7a9a
PA
1272 * Take an exclusive refcount on the periph while dastart is called
1273 * to finish the probe. The reference will be dropped in dadone at
1274 * the end of probe.
984263bc 1275 */
1c8b7a9a 1276 cam_periph_hold(periph, 0);
984263bc
MD
1277 xpt_schedule(periph, /*priority*/5);
1278
1c8b7a9a
PA
1279 /*
1280 * Schedule a periodic event to occasionally send an
1281 * ordered tag to a device.
1282 */
1283 callout_init(&softc->sendordered_c);
1284 callout_reset(&softc->sendordered_c,
1285 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL,
1286 dasendorderedtag, softc);
1287
e0fb398b
T
1288
1289
984263bc
MD
1290 return(CAM_REQ_CMP);
1291}
1292
1293static void
1294dastart(struct cam_periph *periph, union ccb *start_ccb)
1295{
1296 struct da_softc *softc;
1297
1298 softc = (struct da_softc *)periph->softc;
1299
984263bc
MD
1300 switch (softc->state) {
1301 case DA_STATE_NORMAL:
1302 {
1303 /* Pull a buffer from the queue and get going on it */
81b5c339 1304 struct bio *bio;
af0aa0ac
MD
1305 struct bio *bio_rd;
1306 struct bio *bio_wr;
984263bc 1307 struct buf *bp;
a9bf1b8c 1308 u_int8_t tag_code;
af0aa0ac 1309 int limit;
984263bc
MD
1310
1311 /*
1312 * See if there is a buf with work for us to do..
1313 */
af0aa0ac
MD
1314 bio_rd = bioq_first(&softc->bio_queue_rd);
1315 bio_wr = bioq_first(&softc->bio_queue_wr);
1316
984263bc
MD
1317 if (periph->immediate_priority <= periph->pinfo.priority) {
1318 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1319 ("queuing for immediate ccb\n"));
1320 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1321 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1322 periph_links.sle);
1323 periph->immediate_priority = CAM_PRIORITY_NONE;
984263bc 1324 wakeup(&periph->ccb_list);
af0aa0ac 1325 if (bio_rd || bio_wr) {
a9bf1b8c
MD
1326 /*
1327 * Have more work to do, so ensure we stay
1328 * scheduled
1329 */
1330 xpt_schedule(periph, /* XXX priority */1);
1331 }
1332 break;
1333 }
af0aa0ac 1334
e0fb398b
T
1335 /* Run the trim command if not already running */
1336 if (!softc->trim_running &&
4090d6ff 1337 (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
e0fb398b
T
1338 struct trim_request *req = &softc->trim_req;
1339 struct bio *bio1;
1340 int bps = 0, ranges = 0;
1341
1342 softc->trim_running = 1;
1343 bzero(req, sizeof(*req));
1344 bio1 = bio;
1345 while (1) {
1346 uint64_t lba;
1347 int count;
1348
1349 bp = bio1->bio_buf;
1350 count = bp->b_bcount / softc->params.secsize;
1351 lba = bio1->bio_offset/softc->params.secsize;
1352
1353 kprintf("trim lba:%llu boff:%llu count:%d\n",
1354 (unsigned long long) lba,
1355 (unsigned long long) bio1->bio_offset,
1356 count);
1357
1358 bioq_remove(&softc->bio_queue_trim, bio1);
1359 while (count > 0) {
1360 int c = min(count, 0xffff);
1361 int off = ranges * 8;
1362
1363 req->data[off + 0] = lba & 0xff;
1364 req->data[off + 1] = (lba >> 8) & 0xff;
1365 req->data[off + 2] = (lba >> 16) & 0xff;
1366 req->data[off + 3] = (lba >> 24) & 0xff;
1367 req->data[off + 4] = (lba >> 32) & 0xff;
1368 req->data[off + 5] = (lba >> 40) & 0xff;
1369 req->data[off + 6] = c & 0xff;
1370 req->data[off + 7] = (c >> 8) & 0xff;
1371 lba += c;
1372 count -= c;
1373 ranges++;
1374 }
1375
1376 /* Try to merge multiple TRIM requests */
1377 req->bios[bps++] = bio1;
1378 bio1 = bioq_first(&softc->bio_queue_trim);
1379 if (bio1 == NULL ||
1380 bio1->bio_buf->b_bcount / softc->params.secsize >
1381 (softc->trim_max_ranges - ranges) * 0xffff)
1382 break;
1383 }
1384
1385
1386 cam_fill_csio(&start_ccb->csio,
1387 1/*retries*/,
1388 dadone,
1389 CAM_DIR_OUT,
1390 MSG_SIMPLE_Q_TAG,
1391 req->data,
1392 ((ranges +63)/64)*512,
1393 SSD_FULL_SIZE,
1394 sizeof(struct scsi_rw_6),
1395 da_default_timeout*2);
1396
1397 start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1398 LIST_INSERT_HEAD(&softc->pending_ccbs,
1399 &start_ccb->ccb_h, periph_links.le);
1400 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1401 start_ccb->ccb_h.ccb_bio = bio;
1402 devstat_start_transaction(&softc->device_stats);
1403 xpt_action(start_ccb);
1404 xpt_schedule(periph, 1);
1405 break;
1406 }
1407
af0aa0ac
MD
1408 /*
1409 * Select a read or write buffer to queue. Limit the number
1410 * of tags dedicated to reading or writing, giving reads
1411 * precedence.
1412 *
1413 * Writes to modern hard drives go into the HDs cache and
1414 * return completion nearly instantly. That is until the
1415 * cache becomes full. When the HDs cache becomes full
1416 * write commands will begin to stall. If all available
1417 * tags are taken up by writes which saturate the drive
1418 * reads will become tag-starved.
1419 *
1420 * A similar situation can occur with reads. With many
1421 * parallel readers all tags can be taken up by reads
1422 * and prevent any writes from draining, even if the HD's
1423 * cache is not full.
1424 */
a3c9d3d8 1425 limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
af0aa0ac
MD
1426#if 0
1427 /* DEBUGGING */
1428 static int savets;
1429 static long savets2;
a3c9d3d8 1430 if (1 || time_second != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
af0aa0ac
MD
1431 kprintf("%d %d (%d)\n",
1432 softc->outstanding_cmds_rd,
1433 softc->outstanding_cmds_wr,
1434 limit);
1435 savets = ticks;
1436 savets2 = time_second;
1437 }
1438#endif
1439 if (bio_rd && softc->outstanding_cmds_rd < limit) {
1440 bio = bio_rd;
1441 bioq_remove(&softc->bio_queue_rd, bio);
1442 } else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1443 bio = bio_wr;
1444 bioq_remove(&softc->bio_queue_wr, bio);
1445 } else {
1446 if (bio_rd)
1447 softc->flags |= DA_FLAG_RD_LIMIT;
1448 if (bio_wr)
1449 softc->flags |= DA_FLAG_WR_LIMIT;
984263bc 1450 xpt_release_ccb(start_ccb);
a9bf1b8c
MD
1451 break;
1452 }
984263bc 1453
a9bf1b8c
MD
1454 /*
1455 * We can queue new work.
1456 */
a9bf1b8c 1457 bp = bio->bio_buf;
54078292 1458
a9bf1b8c 1459 devstat_start_transaction(&softc->device_stats);
54078292 1460
a9bf1b8c
MD
1461 if ((bp->b_flags & B_ORDERED) != 0 ||
1462 (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1463 softc->flags &= ~DA_FLAG_NEED_OTAG;
1464 softc->ordered_tag_count++;
1465 tag_code = MSG_ORDERED_Q_TAG;
1466 } else {
1467 tag_code = MSG_SIMPLE_Q_TAG;
1468 }
984263bc 1469
a9bf1b8c
MD
1470 switch(bp->b_cmd) {
1471 case BUF_CMD_READ:
1472 case BUF_CMD_WRITE:
984263bc 1473 /*
a9bf1b8c 1474 * Block read/write op
984263bc 1475 */
a9bf1b8c 1476 KKASSERT(bio->bio_offset % softc->params.secsize == 0);
984263bc 1477
a9bf1b8c
MD
1478 scsi_read_write(
1479 &start_ccb->csio,
1480 da_retry_count, /* retries */
1481 dadone,
1482 tag_code,
1483 (bp->b_cmd == BUF_CMD_READ),
1484 0, /* byte2 */
1485 softc->minimum_cmd_size,
1486 bio->bio_offset / softc->params.secsize,
1487 bp->b_bcount / softc->params.secsize,
1488 bp->b_data,
1489 bp->b_bcount,
1490 SSD_FULL_SIZE, /* sense_len */
1491 da_default_timeout * 1000
1492 );
1493 break;
1494 case BUF_CMD_FLUSH:
a9453758
MD
1495 /*
1496 * Silently complete a flush request if the device
1497 * cannot handle it.
1498 */
1499 if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1500 xpt_release_ccb(start_ccb);
1501 start_ccb = NULL;
1502 devstat_end_transaction_buf(
1503 &softc->device_stats, bp);
1504 biodone(bio);
1505 } else {
1506 scsi_synchronize_cache(
1507 &start_ccb->csio,
1508 1, /* retries */
1509 dadone, /* cbfcnp */
1510 MSG_SIMPLE_Q_TAG,
1511 0, /* lba */
1512 0, /* count (whole disk) */
1513 SSD_FULL_SIZE,
1514 da_default_timeout*1000 /* timeout */
1515 );
1516 }
a9bf1b8c 1517 break;
e0fb398b
T
1518 case BUF_CMD_FREEBLKS:
1519 if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1520 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1521 break;
1522 }
a9bf1b8c 1523 default:
a9453758
MD
1524 xpt_release_ccb(start_ccb);
1525 start_ccb = NULL;
a9bf1b8c
MD
1526 panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1527 break; /* NOT REACHED */
1528 }
984263bc 1529
a9bf1b8c
MD
1530 /*
1531 * Block out any asyncronous callbacks
1532 * while we touch the pending ccb list.
1533 */
a9453758
MD
1534 if (start_ccb) {
1535 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1536 LIST_INSERT_HEAD(&softc->pending_ccbs,
1537 &start_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1538 if (bp->b_cmd == BUF_CMD_WRITE ||
1539 bp->b_cmd == BUF_CMD_FLUSH) {
1540 ++softc->outstanding_cmds_wr;
1541 } else {
1542 ++softc->outstanding_cmds_rd;
1543 }
a9453758
MD
1544
1545 /* We expect a unit attention from this device */
1546 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1547 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1548 softc->flags &= ~DA_FLAG_RETRY_UA;
1549 }
a9bf1b8c 1550
a9453758
MD
1551 start_ccb->ccb_h.ccb_bio = bio;
1552 xpt_action(start_ccb);
1553 }
984263bc 1554
a9bf1b8c
MD
1555 /*
1556 * Be sure we stay scheduled if we have more work to do.
1557 */
af0aa0ac
MD
1558 if (bioq_first(&softc->bio_queue_rd) ||
1559 bioq_first(&softc->bio_queue_wr)) {
a9bf1b8c 1560 xpt_schedule(periph, 1);
af0aa0ac 1561 }
984263bc
MD
1562 break;
1563 }
1564 case DA_STATE_PROBE:
1565 {
1566 struct ccb_scsiio *csio;
1567 struct scsi_read_capacity_data *rcap;
1568
1c8b7a9a 1569 rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
984263bc
MD
1570 csio = &start_ccb->csio;
1571 scsi_read_capacity(csio,
1572 /*retries*/4,
1573 dadone,
1574 MSG_SIMPLE_Q_TAG,
1575 rcap,
1576 SSD_FULL_SIZE,
1577 /*timeout*/5000);
81b5c339 1578 start_ccb->ccb_h.ccb_bio = NULL;
984263bc
MD
1579 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1580 xpt_action(start_ccb);
1581 break;
1582 }
bdd58e03
MD
1583 case DA_STATE_PROBE2:
1584 {
1585 struct ccb_scsiio *csio;
0b0362e1 1586 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03 1587
0b0362e1
MD
1588 rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1589 M_INTWAIT | M_ZERO);
bdd58e03
MD
1590 if (rcaplong == NULL) {
1591 kprintf("dastart: Couldn't allocate read_capacity\n");
1592 /* da_free_periph??? */
1593 break;
1594 }
1595 csio = &start_ccb->csio;
1596 scsi_read_capacity_16(csio,
1597 /*retries*/ 4,
1598 /*cbfcnp*/ dadone,
1599 /*tag_action*/ MSG_SIMPLE_Q_TAG,
1600 /*lba*/ 0,
1601 /*reladr*/ 0,
1602 /*pmi*/ 0,
1603 rcaplong,
1604 /*sense_len*/ SSD_FULL_SIZE,
1605 /*timeout*/ 60000);
1606 start_ccb->ccb_h.ccb_bio = NULL;
1607 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1608 xpt_action(start_ccb);
1609 break;
1610 }
984263bc
MD
1611 }
1612}
1613
1614static int
1615cmd6workaround(union ccb *ccb)
1616{
1617 struct scsi_rw_6 cmd6;
1618 struct scsi_rw_10 *cmd10;
1619 struct da_softc *softc;
1620 u_int8_t *cdb;
1621 int frozen;
1622
1623 cdb = ccb->csio.cdb_io.cdb_bytes;
1624
1625 /* Translation only possible if CDB is an array and cmd is R/W6 */
1626 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1627 (*cdb != READ_6 && *cdb != WRITE_6))
1628 return 0;
1629
1c8b7a9a
PA
1630 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1631 "increasing minimum_cmd_size to 10.\n");
984263bc
MD
1632 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1633 softc->minimum_cmd_size = 10;
1634
1635 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1636 cmd10 = (struct scsi_rw_10 *)cdb;
1637 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1638 cmd10->byte2 = 0;
1639 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1640 cmd10->reserved = 0;
1641 scsi_ulto2b(cmd6.length, cmd10->length);
1642 cmd10->control = cmd6.control;
1643 ccb->csio.cdb_len = sizeof(*cmd10);
1644
1645 /* Requeue request, unfreezing queue if necessary */
1646 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1647 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1648 xpt_action(ccb);
1649 if (frozen) {
1650 cam_release_devq(ccb->ccb_h.path,
1651 /*relsim_flags*/0,
1652 /*reduction*/0,
1653 /*timeout*/0,
1654 /*getcount_only*/0);
1655 }
1656 return (ERESTART);
1657}
1658
1659static void
1660dadone(struct cam_periph *periph, union ccb *done_ccb)
1661{
1662 struct da_softc *softc;
1663 struct ccb_scsiio *csio;
cd29885a 1664 struct disk_info info;
984263bc
MD
1665
1666 softc = (struct da_softc *)periph->softc;
1667 csio = &done_ccb->csio;
1668 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1669 case DA_CCB_BUFFER_IO:
e0fb398b 1670 case DA_CCB_TRIM:
984263bc
MD
1671 {
1672 struct buf *bp;
81b5c339 1673 struct bio *bio;
af0aa0ac 1674 int mustsched = 0;
984263bc 1675
81b5c339
MD
1676 bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1677 bp = bio->bio_buf;
984263bc
MD
1678 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1679 int error;
984263bc
MD
1680 int sf;
1681
1682 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1683 sf = SF_RETRY_UA;
1684 else
1685 sf = 0;
1686
b05e84c9
PA
1687 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1688 if (error == ERESTART) {
984263bc
MD
1689 /*
1690 * A retry was scheuled, so
1691 * just return.
1692 */
1693 return;
1694 }
1695 if (error != 0) {
984263bc
MD
1696 if (error == ENXIO) {
1697 /*
1698 * Catastrophic error. Mark our pack as
1699 * invalid.
1700 */
1c8b7a9a
PA
1701 /*
1702 * XXX See if this is really a media
1703 * XXX change first?
984263bc 1704 */
1c8b7a9a
PA
1705 xpt_print(periph->path,
1706 "Invalidating pack\n");
984263bc
MD
1707 softc->flags |= DA_FLAG_PACK_INVALID;
1708 }
1709
1710 /*
af0aa0ac
MD
1711 * Return all queued write I/O's with EIO
1712 * so the client can retry these I/Os in the
984263bc 1713 * proper order should it attempt to recover.
af0aa0ac
MD
1714 *
1715 * Leave read I/O's alone.
984263bc 1716 */
af0aa0ac 1717 daflushbioq(&softc->bio_queue_wr, EIO);
984263bc
MD
1718 bp->b_error = error;
1719 bp->b_resid = bp->b_bcount;
1720 bp->b_flags |= B_ERROR;
1721 } else {
1722 bp->b_resid = csio->resid;
1723 bp->b_error = 0;
62ade751 1724 if (bp->b_resid != 0)
984263bc 1725 bp->b_flags |= B_ERROR;
984263bc
MD
1726 }
1727 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1728 cam_release_devq(done_ccb->ccb_h.path,
1729 /*relsim_flags*/0,
1730 /*reduction*/0,
1731 /*timeout*/0,
1732 /*getcount_only*/0);
1733 } else {
b05e84c9
PA
1734 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1735 panic("REQ_CMP with QFRZN");
984263bc 1736 bp->b_resid = csio->resid;
62ade751 1737 if (csio->resid > 0)
984263bc 1738 bp->b_flags |= B_ERROR;
984263bc
MD
1739 }
1740
1741 /*
1742 * Block out any asyncronous callbacks
1743 * while we touch the pending ccb list.
1744 */
984263bc 1745 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1746 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1747 --softc->outstanding_cmds_wr;
1748 if (softc->flags & DA_FLAG_WR_LIMIT) {
1749 softc->flags &= ~DA_FLAG_WR_LIMIT;
1750 mustsched = 1;
1751 }
1752 } else {
1753 --softc->outstanding_cmds_rd;
1754 if (softc->flags & DA_FLAG_RD_LIMIT) {
1755 softc->flags &= ~DA_FLAG_RD_LIMIT;
1756 mustsched = 1;
1757 }
1758 }
1759 if (softc->outstanding_cmds_rd +
1760 softc->outstanding_cmds_wr == 0) {
984263bc 1761 softc->flags |= DA_FLAG_WENT_IDLE;
af0aa0ac 1762 }
984263bc
MD
1763
1764 devstat_end_transaction_buf(&softc->device_stats, bp);
e0fb398b
T
1765 if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1766 DA_CCB_TRIM) {
1767 struct trim_request *req =
1768 (struct trim_request *) csio->data_ptr;
1769 int i;
1770
1771 for (i = 1; i < softc->trim_max_ranges &&
1772 req->bios[i]; i++) {
1773 struct bio *bp1 = req->bios[i];
1774
1775 bp1->bio_buf->b_resid = bp->b_resid;
1776 bp1->bio_buf->b_error = bp->b_error;
1777 if (bp->b_flags & B_ERROR)
1778 bp1->bio_buf->b_flags |= B_ERROR;
1779 biodone(bp1);
1780 }
1781 softc->trim_running = 0;
1782 biodone(bio);
1783 xpt_schedule(periph,1);
1784 } else
1785 biodone(bio);
1786
af0aa0ac
MD
1787
1788 if (mustsched)
1789 xpt_schedule(periph, /*priority*/1);
1790
984263bc
MD
1791 break;
1792 }
1793 case DA_CCB_PROBE:
bdd58e03 1794 case DA_CCB_PROBE2:
984263bc
MD
1795 {
1796 struct scsi_read_capacity_data *rdcap;
0b0362e1 1797 struct scsi_read_capacity_data_16 *rcaplong;
984263bc
MD
1798 char announce_buf[80];
1799
bdd58e03
MD
1800 rdcap = NULL;
1801 rcaplong = NULL;
1802 if (softc->state == DA_STATE_PROBE)
1803 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1804 else
0b0362e1 1805 rcaplong = (struct scsi_read_capacity_data_16 *)
bdd58e03 1806 csio->data_ptr;
f7b26992
MD
1807
1808 bzero(&info, sizeof(info));
1809 info.d_type = DTYPE_SCSI;
1810 info.d_serialno = xpt_path_serialno(periph->path);
984263bc
MD
1811
1812 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1813 struct disk_params *dp;
bdd58e03
MD
1814 uint32_t block_size;
1815 uint64_t maxsector;
1816
1817 if (softc->state == DA_STATE_PROBE) {
1818 block_size = scsi_4btoul(rdcap->length);
1819 maxsector = scsi_4btoul(rdcap->addr);
984263bc 1820
bdd58e03
MD
1821 /*
1822 * According to SBC-2, if the standard 10
1823 * byte READ CAPACITY command returns 2^32,
1824 * we should issue the 16 byte version of
1825 * the command, since the device in question
1826 * has more sectors than can be represented
1827 * with the short version of the command.
1828 */
1829 if (maxsector == 0xffffffff) {
1830 softc->state = DA_STATE_PROBE2;
1c8b7a9a 1831 kfree(rdcap, M_SCSIDA);
bdd58e03
MD
1832 xpt_release_ccb(done_ccb);
1833 xpt_schedule(periph, /*priority*/5);
1834 return;
1835 }
1836 } else {
1837 block_size = scsi_4btoul(rcaplong->length);
1838 maxsector = scsi_8btou64(rcaplong->addr);
1839 }
1840 dasetgeom(periph, block_size, maxsector);
984263bc 1841 dp = &softc->params;
f8c7a42d 1842 ksnprintf(announce_buf, sizeof(announce_buf),
bdd58e03
MD
1843 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1844 (uintmax_t) (((uintmax_t)dp->secsize *
1845 dp->sectors) / (1024*1024)),
1846 (uintmax_t)dp->sectors,
984263bc
MD
1847 dp->secsize, dp->heads, dp->secs_per_track,
1848 dp->cylinders);
e0fb398b 1849
cd29885a 1850 CAM_SIM_UNLOCK(periph->sim);
cd29885a
MD
1851 info.d_media_blksize = softc->params.secsize;
1852 info.d_media_blocks = softc->params.sectors;
1853 info.d_media_size = 0;
1854 info.d_secpertrack = softc->params.secs_per_track;
1855 info.d_nheads = softc->params.heads;
1856 info.d_ncylinders = softc->params.cylinders;
1857 info.d_secpercyl = softc->params.heads *
1858 softc->params.secs_per_track;
55230951 1859 info.d_serialno = xpt_path_serialno(periph->path);
cd29885a
MD
1860 disk_setdiskinfo(&softc->disk, &info);
1861 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1862 } else {
1863 int error;
1864
1865 announce_buf[0] = '\0';
1866
1867 /*
1868 * Retry any UNIT ATTENTION type errors. They
1869 * are expected at boot.
1870 */
b05e84c9
PA
1871 error = daerror(done_ccb, CAM_RETRY_SELTO,
1872 SF_RETRY_UA|SF_NO_PRINT);
984263bc
MD
1873 if (error == ERESTART) {
1874 /*
1875 * A retry was scheuled, so
1876 * just return.
1877 */
1878 return;
1879 } else if (error != 0) {
1880 struct scsi_sense_data *sense;
1881 int asc, ascq;
1882 int sense_key, error_code;
1883 int have_sense;
1884 cam_status status;
1885 struct ccb_getdev cgd;
1886
1887 /* Don't wedge this device's queue */
984263bc 1888 status = done_ccb->ccb_h.status;
b05e84c9
PA
1889 if ((status & CAM_DEV_QFRZN) != 0)
1890 cam_release_devq(done_ccb->ccb_h.path,
1891 /*relsim_flags*/0,
1892 /*reduction*/0,
1893 /*timeout*/0,
1894 /*getcount_only*/0);
1895
984263bc
MD
1896
1897 xpt_setup_ccb(&cgd.ccb_h,
1898 done_ccb->ccb_h.path,
1899 /* priority */ 1);
1900 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1901 xpt_action((union ccb *)&cgd);
1902
1903 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1904 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1905 || ((status & CAM_AUTOSNS_VALID) == 0))
1906 have_sense = FALSE;
1907 else
1908 have_sense = TRUE;
1909
1910 if (have_sense) {
1911 sense = &csio->sense_data;
1912 scsi_extract_sense(sense, &error_code,
1913 &sense_key,
1914 &asc, &ascq);
1915 }
1916 /*
1917 * Attach to anything that claims to be a
1918 * direct access or optical disk device,
1919 * as long as it doesn't return a "Logical
1920 * unit not supported" (0x25) error.
1921 */
1922 if ((have_sense) && (asc != 0x25)
b05e84c9
PA
1923 && (error_code == SSD_CURRENT_ERROR)) {
1924 const char *sense_key_desc;
1925 const char *asc_desc;
1926
1927 scsi_sense_desc(sense_key, asc, ascq,
1928 &cgd.inq_data,
1929 &sense_key_desc,
1930 &asc_desc);
f8c7a42d 1931 ksnprintf(announce_buf,
984263bc
MD
1932 sizeof(announce_buf),
1933 "Attempt to query device "
1934 "size failed: %s, %s",
b05e84c9
PA
1935 sense_key_desc,
1936 asc_desc);
f7b26992
MD
1937 info.d_media_blksize = 512;
1938 disk_setdiskinfo(&softc->disk, &info);
b05e84c9 1939 } else {
984263bc
MD
1940 if (have_sense)
1941 scsi_sense_print(
1942 &done_ccb->csio);
1943 else {
1c8b7a9a
PA
1944 xpt_print(periph->path,
1945 "got CAM status %#x\n",
1946 done_ccb->ccb_h.status);
984263bc
MD
1947 }
1948
1c8b7a9a
PA
1949 xpt_print(periph->path, "fatal error, "
1950 "failed to attach to device\n");
984263bc
MD
1951
1952 /*
1953 * Free up resources.
1954 */
1955 cam_periph_invalidate(periph);
1956 }
1957 }
1958 }
1c8b7a9a 1959 kfree(csio->data_ptr, M_SCSIDA);
62ade751 1960 if (announce_buf[0] != '\0') {
984263bc 1961 xpt_announce_periph(periph, announce_buf);
62ade751
MD
1962 /*
1963 * Create our sysctl variables, now that we know
1964 * we have successfully attached.
1965 */
b3504e03
JH
1966 taskqueue_enqueue(taskqueue_thread[mycpuid],
1967 &softc->sysctl_task);
62ade751 1968 }
e0fb398b
T
1969
1970 if (softc->trim_max_ranges) {
1971 softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1972 kprintf("%s%d: supports TRIM\n",
1973 periph->periph_name,
1974 periph->unit_number);
1975 }
b05e84c9 1976 softc->state = DA_STATE_NORMAL;
984263bc
MD
1977 /*
1978 * Since our peripheral may be invalidated by an error
1979 * above or an external event, we must release our CCB
1980 * before releasing the probe lock on the peripheral.
1981 * The peripheral will only go away once the last lock
1982 * is removed, and we need it around for the CCB release
1983 * operation.
1984 */
1985 xpt_release_ccb(done_ccb);
2d19cdd3 1986 cam_periph_unhold(periph, 0);
984263bc
MD
1987 return;
1988 }
1989 case DA_CCB_WAITING:
1990 {
1991 /* Caller will release the CCB */
1992 wakeup(&done_ccb->ccb_h.cbfcnp);
1993 return;
1994 }
1995 case DA_CCB_DUMP:
1996 /* No-op. We're polling */
1997 return;
1998 default:
1999 break;
2000 }
2001 xpt_release_ccb(done_ccb);
2002}
2003
2004static int
2005daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
2006{
2007 struct da_softc *softc;
2008 struct cam_periph *periph;
62ade751 2009 int error;
984263bc
MD
2010
2011 periph = xpt_path_periph(ccb->ccb_h.path);
2012 softc = (struct da_softc *)periph->softc;
2013
2014 /*
2015 * Automatically detect devices that do not support
2016 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
2017 */
2018 error = 0;
62ade751
MD
2019 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
2020 error = cmd6workaround(ccb);
2021 } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
2022 CAM_SCSI_STATUS_ERROR)
2023 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
2024 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
2025 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
2026 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
2027 int sense_key, error_code, asc, ascq;
2028
984263bc
MD
2029 scsi_extract_sense(&ccb->csio.sense_data,
2030 &error_code, &sense_key, &asc, &ascq);
2031 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
2032 error = cmd6workaround(ccb);
2033 }
2034 if (error == ERESTART)
2035 return (ERESTART);
2036
2037 /*
2038 * XXX
2039 * Until we have a better way of doing pack validation,
2040 * don't treat UAs as errors.
2041 */
2042 sense_flags |= SF_RETRY_UA;
2043 return(cam_periph_error(ccb, cam_flags, sense_flags,
2044 &softc->saved_ccb));
2045}
2046
2047static void
2048daprevent(struct cam_periph *periph, int action)
2049{
2050 struct da_softc *softc;
2051 union ccb *ccb;
2052 int error;
2053
2054 softc = (struct da_softc *)periph->softc;
2055
2056 if (((action == PR_ALLOW)
2057 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2058 || ((action == PR_PREVENT)
2059 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2060 return;
2061 }
2062
2063 ccb = cam_periph_getccb(periph, /*priority*/1);
2064
2065 scsi_prevent(&ccb->csio,
2066 /*retries*/1,
2067 /*cbcfp*/dadone,
2068 MSG_SIMPLE_Q_TAG,
2069 action,
2070 SSD_FULL_SIZE,
2071 5000);
2072
3f499af5
PA
2073 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2074 SF_RETRY_UA, &softc->device_stats);
984263bc
MD
2075
2076 if (error == 0) {
2077 if (action == PR_ALLOW)
2078 softc->flags &= ~DA_FLAG_PACK_LOCKED;
2079 else
2080 softc->flags |= DA_FLAG_PACK_LOCKED;
2081 }
2082
2083 xpt_release_ccb(ccb);
2084}
2085
f7b26992
MD
2086/*
2087 * Check media on open, e.g. card reader devices which had no initial media.
2088 */
2089static int
2090dacheckmedia(struct cam_periph *periph)
2091{
2092 struct disk_params *dp;
2093 struct da_softc *softc;
2094 struct disk_info info;
2095 int error;
2096
2097 softc = (struct da_softc *)periph->softc;
2098 dp = &softc->params;
2099
2100 error = dagetcapacity(periph);
2101
2102 /*
2103 * Only reprobe on initial open and if the media is removable.
9670bdda
MD
2104 *
2105 * NOTE: If we setdiskinfo() it will take the device probe
2106 * a bit of time to probe the slices and partitions,
2107 * and mess up booting. So avoid if nothing has changed.
2108 * XXX
f7b26992
MD
2109 */
2110 if (softc->flags & DA_FLAG_OPEN)
2111 return (error);
2112 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2113 return (error);
2114
2115 bzero(&info, sizeof(info));
2116 info.d_type = DTYPE_SCSI;
2117 info.d_serialno = xpt_path_serialno(periph->path);
2118
2119 if (error == 0) {
f7b26992
MD
2120 CAM_SIM_UNLOCK(periph->sim);
2121 info.d_media_blksize = softc->params.secsize;
2122 info.d_media_blocks = softc->params.sectors;
2123 info.d_media_size = 0;
2124 info.d_secpertrack = softc->params.secs_per_track;
2125 info.d_nheads = softc->params.heads;
2126 info.d_ncylinders = softc->params.cylinders;
2127 info.d_secpercyl = softc->params.heads *
2128 softc->params.secs_per_track;
2129 info.d_serialno = xpt_path_serialno(periph->path);
9670bdda
MD
2130 if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2131 kprintf("%s%d: open removable media: "
2132 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2133 periph->periph_name, periph->unit_number,
2134 (uintmax_t)(((uintmax_t)dp->secsize *
2135 dp->sectors) / (1024*1024)),
2136 (uintmax_t)dp->sectors, dp->secsize,
2137 dp->heads, dp->secs_per_track, dp->cylinders);
2138 disk_setdiskinfo(&softc->disk, &info);
2139 }
f7b26992
MD
2140 CAM_SIM_LOCK(periph->sim);
2141 } else {
2142 kprintf("%s%d: open removable media: no media present\n",
2143 periph->periph_name, periph->unit_number);
2144 info.d_media_blksize = 512;
2145 disk_setdiskinfo(&softc->disk, &info);
2146 }
2147 return (error);
2148}
2149
bdd58e03
MD
2150static int
2151dagetcapacity(struct cam_periph *periph)
2152{
2153 struct da_softc *softc;
2154 union ccb *ccb;
2155 struct scsi_read_capacity_data *rcap;
0b0362e1 2156 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03
MD
2157 uint32_t block_len;
2158 uint64_t maxsector;
2159 int error;
2160
2161 softc = (struct da_softc *)periph->softc;
2162 block_len = 0;
2163 maxsector = 0;
2164 error = 0;
2165
2166 /* Do a read capacity */
1c8b7a9a
PA
2167 rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2168 M_SCSIDA, M_INTWAIT);
bdd58e03
MD
2169
2170 ccb = cam_periph_getccb(periph, /*priority*/1);
2171 scsi_read_capacity(&ccb->csio,
2172 /*retries*/4,
2173 /*cbfncp*/dadone,
2174 MSG_SIMPLE_Q_TAG,
2175 rcap,
2176 SSD_FULL_SIZE,
2177 /*timeout*/60000);
2178 ccb->ccb_h.ccb_bio = NULL;
2179
2180 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2181 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2182 /*sense_flags*/SF_RETRY_UA,
2183 &softc->device_stats);
2184
2185 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2186 cam_release_devq(ccb->ccb_h.path,
2187 /*relsim_flags*/0,
2188 /*reduction*/0,
2189 /*timeout*/0,
2190 /*getcount_only*/0);
2191
2192 if (error == 0) {
2193 block_len = scsi_4btoul(rcap->length);
2194 maxsector = scsi_4btoul(rcap->addr);
2195
2196 if (maxsector != 0xffffffff)
2197 goto done;
2198 } else
2199 goto done;
2200
0b0362e1 2201 rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
bdd58e03
MD
2202
2203 scsi_read_capacity_16(&ccb->csio,
2204 /*retries*/ 4,
2205 /*cbfcnp*/ dadone,
2206 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2207 /*lba*/ 0,
2208 /*reladr*/ 0,
2209 /*pmi*/ 0,
2210 rcaplong,
2211 /*sense_len*/ SSD_FULL_SIZE,
2212 /*timeout*/ 60000);
2213 ccb->ccb_h.ccb_bio = NULL;
2214
2215 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2216 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2217 /*sense_flags*/SF_RETRY_UA,
2218 &softc->device_stats);
2219
2220 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2221 cam_release_devq(ccb->ccb_h.path,
2222 /*relsim_flags*/0,
2223 /*reduction*/0,
2224 /*timeout*/0,
2225 /*getcount_only*/0);
2226
2227 if (error == 0) {
2228 block_len = scsi_4btoul(rcaplong->length);
2229 maxsector = scsi_8btou64(rcaplong->addr);
2230 }
2231
2232done:
2233
2234 if (error == 0)
2235 dasetgeom(periph, block_len, maxsector);
2236
2237 xpt_release_ccb(ccb);
2238
1c8b7a9a 2239 kfree(rcap, M_SCSIDA);
bdd58e03
MD
2240
2241 return (error);
2242}
2243
984263bc 2244static void
bdd58e03 2245dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
984263bc
MD
2246{
2247 struct ccb_calc_geometry ccg;
2248 struct da_softc *softc;
2249 struct disk_params *dp;
2250
2251 softc = (struct da_softc *)periph->softc;
2252
2253 dp = &softc->params;
bdd58e03
MD
2254 dp->secsize = block_len;
2255 dp->sectors = maxsector + 1;
984263bc
MD
2256 /*
2257 * Have the controller provide us with a geometry
2258 * for this disk. The only time the geometry
2259 * matters is when we boot and the controller
2260 * is the only one knowledgeable enough to come
2261 * up with something that will make this a bootable
2262 * device.
2263 */
2264 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
2265 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
2266 ccg.block_size = dp->secsize;
2267 ccg.volume_size = dp->sectors;
2268 ccg.heads = 0;
2269 ccg.secs_per_track = 0;
2270 ccg.cylinders = 0;
2271 xpt_action((union ccb*)&ccg);
eac73adf
PA
2272 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2273 /*
2274 * We don't know what went wrong here- but just pick
2275 * a geometry so we don't have nasty things like divide
2276 * by zero.
2277 */
2278 dp->heads = 255;
2279 dp->secs_per_track = 255;
2280 dp->cylinders = dp->sectors / (255 * 255);
2281 if (dp->cylinders == 0) {
2282 dp->cylinders = 1;
2283 }
2284 } else {
2285 dp->heads = ccg.heads;
2286 dp->secs_per_track = ccg.secs_per_track;
2287 dp->cylinders = ccg.cylinders;
2288 }
984263bc
MD
2289}
2290
2291static void
2292dasendorderedtag(void *arg)
2293{
1c8b7a9a 2294 struct da_softc *softc = arg;
984263bc 2295
066e560b 2296 if (da_send_ordered) {
1c8b7a9a
PA
2297 if ((softc->ordered_tag_count == 0)
2298 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
2299 softc->flags |= DA_FLAG_NEED_OTAG;
984263bc 2300 }
af0aa0ac 2301 if (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr)
1c8b7a9a
PA
2302 softc->flags &= ~DA_FLAG_WENT_IDLE;
2303
2304 softc->ordered_tag_count = 0;
984263bc 2305 }
1c8b7a9a
PA
2306 /* Queue us up again */
2307 callout_reset(&softc->sendordered_c,
2308 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL,
2309 dasendorderedtag, softc);
984263bc
MD
2310}
2311
2312/*
2313 * Step through all DA peripheral drivers, and if the device is still open,
2314 * sync the disk cache to physical media.
2315 */
2316static void
2317dashutdown(void * arg, int howto)
2318{
2319 struct cam_periph *periph;
2320 struct da_softc *softc;
2321
234289a4 2322 TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
984263bc 2323 union ccb ccb;
234289a4 2324
1c8b7a9a 2325 cam_periph_lock(periph);
984263bc
MD
2326 softc = (struct da_softc *)periph->softc;
2327
2328 /*
2329 * We only sync the cache if the drive is still open, and
2330 * if the drive is capable of it..
2331 */
2332 if (((softc->flags & DA_FLAG_OPEN) == 0)
1c8b7a9a
PA
2333 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2334 cam_periph_unlock(periph);
984263bc 2335 continue;
1c8b7a9a 2336 }
984263bc
MD
2337
2338 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
2339
2340 ccb.ccb_h.ccb_state = DA_CCB_DUMP;
2341 scsi_synchronize_cache(&ccb.csio,
2342 /*retries*/1,
2343 /*cbfcnp*/dadone,
2344 MSG_SIMPLE_Q_TAG,
2345 /*begin_lba*/0, /* whole disk */
2346 /*lb_count*/0,
2347 SSD_FULL_SIZE,
19a136fb 2348 60 * 60 * 1000);
984263bc
MD
2349
2350 xpt_polled_action(&ccb);
2351
2352 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2353 if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
2354 CAM_SCSI_STATUS_ERROR)
2355 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2356 int error_code, sense_key, asc, ascq;
2357
2358 scsi_extract_sense(&ccb.csio.sense_data,
2359 &error_code, &sense_key,
2360 &asc, &ascq);
2361
2362 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2363 scsi_sense_print(&ccb.csio);
2364 } else {
1c8b7a9a
PA
2365 xpt_print(periph->path, "Synchronize "
2366 "cache failed, status == 0x%x, scsi status "
2367 "== 0x%x\n", ccb.ccb_h.status,
2368 ccb.csio.scsi_status);
984263bc
MD
2369 }
2370 }
2371
2372 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
2373 cam_release_devq(ccb.ccb_h.path,
2374 /*relsim_flags*/0,
2375 /*reduction*/0,
2376 /*timeout*/0,
2377 /*getcount_only*/0);
2378
1c8b7a9a 2379 cam_periph_unlock(periph);
984263bc
MD
2380 }
2381}
2382
2383#else /* !_KERNEL */
2384
2385/*
2386 * XXX This is only left out of the kernel build to silence warnings. If,
2387 * for some reason this function is used in the kernel, the ifdefs should
2388 * be moved so it is included both in the kernel and userland.
2389 */
2390void
2391scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2392 void (*cbfcnp)(struct cam_periph *, union ccb *),
2393 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2394 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2395 u_int32_t timeout)
2396{
2397 struct scsi_format_unit *scsi_cmd;
2398
2399 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2400 scsi_cmd->opcode = FORMAT_UNIT;
2401 scsi_cmd->byte2 = byte2;
2402 scsi_ulto2b(ileave, scsi_cmd->interleave);
2403
2404 cam_fill_csio(csio,
2405 retries,
2406 cbfcnp,
2407 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2408 tag_action,
2409 data_ptr,
2410 dxfer_len,
2411 sense_len,
2412 sizeof(*scsi_cmd),
2413 timeout);
2414}
2415
2416#endif /* _KERNEL */