fdisk, gpt - Support trim on recent kernels
[dragonfly.git] / sys / bus / cam / scsi / scsi_da.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
62ade751 28 * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
984263bc
MD
29 */
30
984263bc
MD
31#include <sys/param.h>
32
33#ifdef _KERNEL
684a93c4 34
984263bc
MD
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/buf.h>
38#include <sys/sysctl.h>
62ade751 39#include <sys/taskqueue.h>
1c8b7a9a 40#include <sys/lock.h>
984263bc 41#include <sys/conf.h>
1c8b7a9a 42#include <sys/devicestat.h>
984263bc 43#include <sys/disk.h>
55a78310 44#include <sys/dtype.h>
984263bc
MD
45#include <sys/eventhandler.h>
46#include <sys/malloc.h>
47#include <sys/cons.h>
3020e3be 48#include <sys/proc.h>
684a93c4 49
3020e3be 50#include <sys/buf2.h>
4e01b467 51#include <sys/thread2.h>
684a93c4 52
1c8b7a9a 53#endif /* _KERNEL */
984263bc 54
05220613 55#ifdef _KERNEL
984263bc 56#include <vm/pmap.h>
05220613 57#endif
984263bc
MD
58
59#ifndef _KERNEL
60#include <stdio.h>
61#include <string.h>
62#endif /* _KERNEL */
63
55230951 64#include <sys/camlib.h>
1f2de5d4
MD
65#include "../cam.h"
66#include "../cam_ccb.h"
67#include "../cam_extend.h"
68#include "../cam_periph.h"
69#include "../cam_xpt_periph.h"
1c8b7a9a 70#include "../cam_sim.h"
984263bc 71
32506cfa 72#include "scsi_daio.h"
1f2de5d4 73#include "scsi_message.h"
984263bc
MD
74
75#ifndef _KERNEL
1f2de5d4 76#include "scsi_da.h"
984263bc
MD
77#endif /* !_KERNEL */
78
79#ifdef _KERNEL
80typedef enum {
81 DA_STATE_PROBE,
bdd58e03 82 DA_STATE_PROBE2,
984263bc
MD
83 DA_STATE_NORMAL
84} da_state;
85
86typedef enum {
87 DA_FLAG_PACK_INVALID = 0x001,
88 DA_FLAG_NEW_PACK = 0x002,
89 DA_FLAG_PACK_LOCKED = 0x004,
90 DA_FLAG_PACK_REMOVABLE = 0x008,
91 DA_FLAG_TAGGED_QUEUING = 0x010,
984263bc 92 DA_FLAG_RETRY_UA = 0x080,
62ade751 93 DA_FLAG_OPEN = 0x100,
af0aa0ac
MD
94 DA_FLAG_SCTX_INIT = 0x200,
95 DA_FLAG_RD_LIMIT = 0x400,
e0fb398b
T
96 DA_FLAG_WR_LIMIT = 0x800,
97 DA_FLAG_CAN_TRIM = 0x1000
984263bc
MD
98} da_flags;
99
100typedef enum {
101 DA_Q_NONE = 0x00,
102 DA_Q_NO_SYNC_CACHE = 0x01,
62ade751
MD
103 DA_Q_NO_6_BYTE = 0x02,
104 DA_Q_NO_PREVENT = 0x04
984263bc
MD
105} da_quirks;
106
107typedef enum {
a9f09b75 108 DA_CCB_POLLED = 0x00,
984263bc 109 DA_CCB_PROBE = 0x01,
bdd58e03
MD
110 DA_CCB_PROBE2 = 0x02,
111 DA_CCB_BUFFER_IO = 0x03,
112 DA_CCB_WAITING = 0x04,
113 DA_CCB_DUMP = 0x05,
e0fb398b 114 DA_CCB_TRIM = 0x06,
984263bc
MD
115 DA_CCB_TYPE_MASK = 0x0F,
116 DA_CCB_RETRY_UA = 0x10
117} da_ccb_state;
118
119/* Offsets into our private area for storing information */
120#define ccb_state ppriv_field0
81b5c339 121#define ccb_bio ppriv_ptr1
984263bc
MD
122
123struct disk_params {
124 u_int8_t heads;
bdd58e03 125 u_int32_t cylinders;
984263bc
MD
126 u_int8_t secs_per_track;
127 u_int32_t secsize; /* Number of bytes/sector */
bdd58e03 128 u_int64_t sectors; /* total number sectors */
984263bc
MD
129};
130
e0fb398b
T
131#define TRIM_MAX_BLOCKS 8
132#define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
133struct trim_request {
134 uint8_t data[TRIM_MAX_RANGES * 8];
135 struct bio *bios[TRIM_MAX_RANGES];
136};
137
984263bc 138struct da_softc {
af0aa0ac
MD
139 struct bio_queue_head bio_queue_rd;
140 struct bio_queue_head bio_queue_wr;
e0fb398b 141 struct bio_queue_head bio_queue_trim;
984263bc
MD
142 struct devstat device_stats;
143 SLIST_ENTRY(da_softc) links;
144 LIST_HEAD(, ccb_hdr) pending_ccbs;
145 da_state state;
146 da_flags flags;
147 da_quirks quirks;
148 int minimum_cmd_size;
af0aa0ac
MD
149 int outstanding_cmds_rd;
150 int outstanding_cmds_wr;
e0fb398b
T
151 int trim_max_ranges;
152 int trim_running;
153 int trim_enabled;
984263bc
MD
154 struct disk_params params;
155 struct disk disk;
156 union ccb saved_ccb;
62ade751
MD
157 struct task sysctl_task;
158 struct sysctl_ctx_list sysctl_ctx;
159 struct sysctl_oid *sysctl_tree;
e0fb398b 160 struct trim_request trim_req;
984263bc
MD
161};
162
163struct da_quirk_entry {
164 struct scsi_inquiry_pattern inq_pat;
165 da_quirks quirks;
166};
167
168static const char quantum[] = "QUANTUM";
169static const char microp[] = "MICROP";
170
171static struct da_quirk_entry da_quirk_table[] =
172{
62ade751 173 /* SPI, FC devices */
984263bc
MD
174 {
175 /*
176 * Fujitsu M2513A MO drives.
177 * Tested devices: M2513A2 firmware versions 1200 & 1300.
178 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
179 * Reported by: W.Scholten <whs@xs4all.nl>
180 */
181 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
182 /*quirks*/ DA_Q_NO_SYNC_CACHE
183 },
184 {
185 /* See above. */
186 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
187 /*quirks*/ DA_Q_NO_SYNC_CACHE
188 },
189 {
190 /*
191 * This particular Fujitsu drive doesn't like the
192 * synchronize cache command.
193 * Reported by: Tom Jackson <toj@gorilla.net>
194 */
195 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
196 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
197 },
198 {
199 /*
200 * This drive doesn't like the synchronize cache command
201 * either. Reported by: Matthew Jacob <mjacob@feral.com>
202 * in NetBSD PR kern/6027, August 24, 1998.
203 */
204 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
205 /*quirks*/ DA_Q_NO_SYNC_CACHE
206 },
207 {
208 /*
209 * This drive doesn't like the synchronize cache command
210 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
211 * (PR 8882).
212 */
213 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
214 /*quirks*/ DA_Q_NO_SYNC_CACHE
215 },
216 {
217 /*
218 * Doesn't like the synchronize cache command.
219 * Reported by: Blaz Zupan <blaz@gold.amis.net>
220 */
221 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
222 /*quirks*/ DA_Q_NO_SYNC_CACHE
223 },
224 {
225 /*
226 * Doesn't like the synchronize cache command.
d92d7552 227 * Reported by: Blaz Zupan <blaz@gold.amis.net>
984263bc
MD
228 */
229 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
230 /*quirks*/ DA_Q_NO_SYNC_CACHE
231 },
232 {
233 /*
234 * Doesn't like the synchronize cache command.
235 */
236 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
237 /*quirks*/ DA_Q_NO_SYNC_CACHE
238 },
d92d7552
PA
239 {
240 /*
241 * Doesn't like the synchronize cache command.
242 * Reported by: walter@pelissero.de
243 */
244 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
245 /*quirks*/ DA_Q_NO_SYNC_CACHE
246 },
984263bc
MD
247 {
248 /*
249 * Doesn't work correctly with 6 byte reads/writes.
250 * Returns illegal request, and points to byte 9 of the
251 * 6-byte CDB.
252 * Reported by: Adam McDougall <bsdx@spawnet.com>
253 */
254 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
255 /*quirks*/ DA_Q_NO_6_BYTE
256 },
257 {
62ade751 258 /* See above. */
984263bc
MD
259 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
260 /*quirks*/ DA_Q_NO_6_BYTE
261 },
984263bc
MD
262 {
263 /*
d92d7552
PA
264 * Doesn't like the synchronize cache command.
265 * Reported by: walter@pelissero.de
984263bc 266 */
d92d7552 267 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
62ade751 268 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
269 },
270 {
d92d7552
PA
271 /*
272 * The CISS RAID controllers do not support SYNC_CACHE
273 */
274 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
62ade751
MD
275 /*quirks*/ DA_Q_NO_SYNC_CACHE
276 },
a4a9ba75
SW
277 {
278 /*
279 * The same goes for the mly(4) controllers
280 */
281 {T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
282 /*quirks*/ DA_Q_NO_SYNC_CACHE
283 },
a9453758
MD
284 /*
285 * USB mass storage devices supported by umass(4)
286 *
287 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
288 * it does not have to be specified here.
289 */
d92d7552
PA
290 {
291 /*
292 * Creative Nomad MUVO mp3 player (USB)
293 * PR: kern/53094
294 */
295 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
a9453758 296 /*quirks*/ DA_Q_NO_PREVENT
d92d7552 297 },
984263bc
MD
298 {
299 /*
d92d7552
PA
300 * Sigmatel USB Flash MP3 Player
301 * PR: kern/57046
984263bc 302 */
d92d7552 303 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
a9453758 304 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
305 },
306 {
307 /*
d92d7552
PA
308 * SEAGRAND NP-900 MP3 Player
309 * PR: kern/64563
984263bc 310 */
d92d7552 311 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
a9453758 312 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
313 },
314 {
315 /*
d92d7552
PA
316 * Creative MUVO Slim mp3 player (USB)
317 * PR: usb/86131
984263bc 318 */
d92d7552 319 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
a9453758 320 "*"}, /*quirks*/ DA_Q_NO_PREVENT
d92d7552
PA
321 },
322 {
323 /*
324 * Philips USB Key Audio KEY013
325 * PR: usb/68412
326 */
327 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
a9453758 328 /*quirks*/ DA_Q_NO_PREVENT
285d490c 329 },
984263bc
MD
330};
331
332static d_open_t daopen;
333static d_close_t daclose;
334static d_strategy_t dastrategy;
984263bc 335static d_dump_t dadump;
e0fb398b 336static d_ioctl_t daioctl;
984263bc
MD
337static periph_init_t dainit;
338static void daasync(void *callback_arg, u_int32_t code,
339 struct cam_path *path, void *arg);
62ade751 340static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
984263bc
MD
341static periph_ctor_t daregister;
342static periph_dtor_t dacleanup;
343static periph_start_t dastart;
344static periph_oninv_t daoninvalidate;
345static void dadone(struct cam_periph *periph,
346 union ccb *done_ccb);
347static int daerror(union ccb *ccb, u_int32_t cam_flags,
348 u_int32_t sense_flags);
349static void daprevent(struct cam_periph *periph, int action);
bdd58e03 350static int dagetcapacity(struct cam_periph *periph);
f7b26992 351static int dacheckmedia(struct cam_periph *periph);
bdd58e03
MD
352static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
353 uint64_t maxsector);
af0aa0ac 354static void daflushbioq(struct bio_queue_head *bioq, int error);
984263bc
MD
355static void dashutdown(void *arg, int howto);
356
357#ifndef DA_DEFAULT_TIMEOUT
358#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
359#endif
360
361#ifndef DA_DEFAULT_RETRY
362#define DA_DEFAULT_RETRY 4
363#endif
364
365static int da_retry_count = DA_DEFAULT_RETRY;
366static int da_default_timeout = DA_DEFAULT_TIMEOUT;
984263bc 367
984263bc
MD
368SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
369 "CAM Direct Access Disk driver");
370SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
371 &da_retry_count, 0, "Normal I/O retry count");
62ade751 372TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
984263bc
MD
373SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
374 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
62ade751 375TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
984263bc
MD
376
377static struct periph_driver dadriver =
378{
379 dainit, "da",
380 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
381};
382
2ad14cb5 383PERIPHDRIVER_DECLARE(da, dadriver);
984263bc 384
fef8985e 385static struct dev_ops da_ops = {
b8e1d863 386 { "da", 0, D_DISK | D_MPSAFE },
fef8985e
MD
387 .d_open = daopen,
388 .d_close = daclose,
389 .d_read = physread,
390 .d_write = physwrite,
fef8985e 391 .d_strategy = dastrategy,
e0fb398b
T
392 .d_dump = dadump,
393 .d_ioctl = daioctl
984263bc
MD
394};
395
984263bc
MD
396static struct extend_array *daperiphs;
397
1c8b7a9a
PA
398MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
399
e0fb398b
T
400static int
401daioctl(struct dev_ioctl_args *ap)
402{
403 int unit;
404 int error = 0;
405 struct buf *bp;
406 struct cam_periph *periph;
407 int byte_count;
e0fb398b
T
408
409 off_t *del_num = (off_t*)ap->a_data;
410 off_t bytes_left;
411 off_t bytes_start;
412
413 cdev_t dev = ap->a_head.a_dev;
414
415
416 unit = dkunit(dev);
417 periph = cam_extend_get(daperiphs, unit);
418 if (periph == NULL)
419 return(ENXIO);
e0fb398b
T
420
421 switch (ap->a_cmd) {
32506cfa 422 case DAIOCTRIM:
e0fb398b
T
423 {
424
425 bytes_left = del_num[1];
426 bytes_start = del_num[0];
427
428 /* TRIM occurs on 512-byte sectors. */
429 KKASSERT((bytes_left % 512) == 0);
430 KKASSERT((bytes_start% 512) == 0);
431
432
433 /* Break TRIM up into int-sized commands because of b_bcount */
434 while(bytes_left) {
435
436 /*
437 * Rather than than squezing out more blocks in b_bcount
438 * and having to break up the TRIM request in da_start(),
439 * we ensure we can always TRIM this many bytes with one
440 * TRIM command (this happens if the device only
441 * supports one TRIM block).
442 *
443 * With min TRIM blksize of 1, TRIM command free
444 * 4194240 blks(64*65535): each LBA range can address
445 * 65535 blks and there 64 such ranges in a 512-byte
446 * block. And, 4194240 * 512 = 0x7FFF8000
447 *
448 */
449 byte_count = MIN(bytes_left,0x7FFF8000);
d2812084 450 bp = getnewbuf(0, 0, 0, 1);
e0fb398b
T
451
452 bp->b_cmd = BUF_CMD_FREEBLKS;
453 bp->b_bio1.bio_offset = bytes_start;
454 bp->b_bcount = byte_count;
455 bp->b_bio1.bio_flags |= BIO_SYNC;
456 bp->b_bio1.bio_done = biodone_sync;
457
458 dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
459
460 if (biowait(&bp->b_bio1, "TRIM")) {
461 kprintf("Error:%d\n", bp->b_error);
53005b09 462 brelse(bp);
e0fb398b
T
463 return(bp->b_error ? bp->b_error : EIO);
464 }
465 brelse(bp);
466 bytes_left -= byte_count;
467 bytes_start += byte_count;
468 }
469 break;
470 }
471 default:
472 return(EINVAL);
473 }
474
475 return(error);
476}
477
984263bc 478static int
fef8985e 479daopen(struct dev_open_args *ap)
984263bc 480{
b13267a5 481 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
482 struct cam_periph *periph;
483 struct da_softc *softc;
a688b15c 484 struct disk_info info;
984263bc 485 int unit;
984263bc 486 int error;
984263bc
MD
487
488 unit = dkunit(dev);
984263bc 489 periph = cam_extend_get(daperiphs, unit);
b05e84c9 490 if (periph == NULL) {
984263bc 491 return (ENXIO);
b05e84c9 492 }
984263bc 493
1c8b7a9a
PA
494 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
495 return(ENXIO);
496 }
497
498 cam_periph_lock(periph);
499 if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
500 cam_periph_unlock(periph);
501 cam_periph_release(periph);
502 return (error);
503 }
504
505 unit = periph->unit_number;
984263bc
MD
506 softc = (struct da_softc *)periph->softc;
507
508 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
9ece9268
PA
509 ("daopen: dev=%s (unit %d)\n", devtoname(dev),
510 unit));
984263bc 511
984263bc
MD
512 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
513 /* Invalidate our pack information. */
514 disk_invalidate(&softc->disk);
515 softc->flags &= ~DA_FLAG_PACK_INVALID;
516 }
984263bc 517
f7b26992
MD
518 error = dacheckmedia(periph);
519 softc->flags |= DA_FLAG_OPEN;
984263bc
MD
520
521 if (error == 0) {
522 struct ccb_getdev cgd;
523
a688b15c
MD
524 /* Build disk information structure */
525 bzero(&info, sizeof(info));
526 info.d_type = DTYPE_SCSI;
984263bc
MD
527
528 /*
529 * Grab the inquiry data to get the vendor and product names.
530 * Put them in the typename and packname for the label.
531 */
532 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
533 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
534 xpt_action((union ccb *)&cgd);
535
984263bc
MD
536 /*
537 * Check to see whether or not the blocksize is set yet.
538 * If it isn't, set it and then clear the blocksize
539 * unavailable flag for the device statistics.
540 */
541 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
542 softc->device_stats.block_size = softc->params.secsize;
543 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
544 }
545 }
546
b05e84c9 547 if (error == 0) {
62ade751
MD
548 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
549 (softc->quirks & DA_Q_NO_PREVENT) == 0)
b05e84c9
PA
550 daprevent(periph, PR_PREVENT);
551 } else {
984263bc
MD
552 softc->flags &= ~DA_FLAG_OPEN;
553 cam_periph_release(periph);
554 }
2d19cdd3 555 cam_periph_unhold(periph, 1);
984263bc
MD
556 return (error);
557}
558
559static int
fef8985e 560daclose(struct dev_close_args *ap)
984263bc 561{
b13267a5 562 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
563 struct cam_periph *periph;
564 struct da_softc *softc;
565 int unit;
566 int error;
567
568 unit = dkunit(dev);
569 periph = cam_extend_get(daperiphs, unit);
570 if (periph == NULL)
571 return (ENXIO);
572
1c8b7a9a
PA
573 cam_periph_lock(periph);
574 if ((error = cam_periph_hold(periph, 0)) != 0) {
575 cam_periph_unlock(periph);
576 cam_periph_release(periph);
577 return (error);
984263bc
MD
578 }
579
1c8b7a9a
PA
580 softc = (struct da_softc *)periph->softc;
581
984263bc
MD
582 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
583 union ccb *ccb;
584
585 ccb = cam_periph_getccb(periph, /*priority*/1);
a9f09b75 586 ccb->ccb_h.ccb_state = DA_CCB_POLLED;
984263bc
MD
587
588 scsi_synchronize_cache(&ccb->csio,
589 /*retries*/1,
590 /*cbfcnp*/dadone,
591 MSG_SIMPLE_Q_TAG,
592 /*begin_lba*/0,/* Cover the whole disk */
593 /*lb_count*/0,
594 SSD_FULL_SIZE,
595 5 * 60 * 1000);
596
597 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
598 /*sense_flags*/SF_RETRY_UA,
599 &softc->device_stats);
600
601 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
602 if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
603 CAM_SCSI_STATUS_ERROR) {
604 int asc, ascq;
605 int sense_key, error_code;
606
607 scsi_extract_sense(&ccb->csio.sense_data,
608 &error_code,
609 &sense_key,
610 &asc, &ascq);
611 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
612 scsi_sense_print(&ccb->csio);
613 } else {
1c8b7a9a
PA
614 xpt_print(periph->path, "Synchronize cache "
615 "failed, status == 0x%x, scsi status == "
616 "0x%x\n", ccb->csio.ccb_h.status,
617 ccb->csio.scsi_status);
984263bc
MD
618 }
619 }
620
621 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
622 cam_release_devq(ccb->ccb_h.path,
623 /*relsim_flags*/0,
624 /*reduction*/0,
625 /*timeout*/0,
626 /*getcount_only*/0);
627
628 xpt_release_ccb(ccb);
629
630 }
631
632 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
62ade751
MD
633 if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
634 daprevent(periph, PR_ALLOW);
984263bc
MD
635 /*
636 * If we've got removeable media, mark the blocksize as
637 * unavailable, since it could change when new media is
638 * inserted.
639 */
640 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
641 }
642
fca0fce6
MD
643 /*
644 * Don't compound any ref counting software bugs with more.
645 */
646 if (softc->flags & DA_FLAG_OPEN) {
647 softc->flags &= ~DA_FLAG_OPEN;
648 cam_periph_release(periph);
649 } else {
1c8b7a9a
PA
650 xpt_print(periph->path,
651 "daclose() called on an already closed device!\n");
fca0fce6 652 }
2d19cdd3 653 cam_periph_unhold(periph, 1);
984263bc
MD
654 return (0);
655}
656
657/*
658 * Actually translate the requested transfer into one the physical driver
659 * can understand. The transfer is described by a buf and will include
660 * only one physical transfer.
661 */
fef8985e
MD
662static int
663dastrategy(struct dev_strategy_args *ap)
984263bc 664{
b13267a5 665 cdev_t dev = ap->a_head.a_dev;
fef8985e 666 struct bio *bio = ap->a_bio;
81b5c339 667 struct buf *bp = bio->bio_buf;
984263bc
MD
668 struct cam_periph *periph;
669 struct da_softc *softc;
670 u_int unit;
984263bc 671
81b5c339 672 unit = dkunit(dev);
984263bc
MD
673 periph = cam_extend_get(daperiphs, unit);
674 if (periph == NULL) {
675 bp->b_error = ENXIO;
676 goto bad;
677 }
678 softc = (struct da_softc *)periph->softc;
1c8b7a9a
PA
679
680 cam_periph_lock(periph);
681
984263bc
MD
682#if 0
683 /*
684 * check it's not too big a transfer for our adapter
685 */
81b5c339 686 scsi_minphys(bp, &sd_switch);
984263bc
MD
687#endif
688
689 /*
690 * Mask interrupts so that the pack cannot be invalidated until
691 * after we are in the queue. Otherwise, we might not properly
692 * clean up one of the buffers.
693 */
984263bc
MD
694
695 /*
696 * If the device has been made invalid, error out
697 */
698 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1c8b7a9a 699 cam_periph_unlock(periph);
984263bc
MD
700 bp->b_error = ENXIO;
701 goto bad;
702 }
703
704 /*
705 * Place it in the queue of disk activities for this disk
706 */
af0aa0ac 707 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
c6cad506 708 bioqdisksort(&softc->bio_queue_wr, bio);
e0fb398b 709 else if (bp->b_cmd == BUF_CMD_FREEBLKS)
c6cad506 710 bioqdisksort(&softc->bio_queue_trim, bio);
af0aa0ac 711 else
c6cad506 712 bioqdisksort(&softc->bio_queue_rd, bio);
984263bc
MD
713
714 /*
715 * Schedule ourselves for performing the work.
716 */
717 xpt_schedule(periph, /* XXX priority */1);
1c8b7a9a 718 cam_periph_unlock(periph);
984263bc 719
fef8985e 720 return(0);
984263bc
MD
721bad:
722 bp->b_flags |= B_ERROR;
723
724 /*
725 * Correctly set the buf to indicate a completed xfer
726 */
727 bp->b_resid = bp->b_bcount;
81b5c339 728 biodone(bio);
fef8985e 729 return(0);
984263bc
MD
730}
731
984263bc 732static int
fef8985e 733dadump(struct dev_dump_args *ap)
984263bc 734{
b13267a5 735 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
736 struct cam_periph *periph;
737 struct da_softc *softc;
738 u_int unit;
b24cd69c 739 u_int32_t secsize;
984263bc 740 struct ccb_scsiio csio;
984263bc
MD
741
742 unit = dkunit(dev);
984263bc 743 periph = cam_extend_get(daperiphs, unit);
b24cd69c 744 if (periph == NULL)
984263bc 745 return (ENXIO);
b24cd69c 746
984263bc 747 softc = (struct da_softc *)periph->softc;
1c8b7a9a 748 cam_periph_lock(periph);
b24cd69c
AH
749 secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
750
1c8b7a9a
PA
751 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
752 cam_periph_unlock(periph);
984263bc 753 return (ENXIO);
1c8b7a9a 754 }
984263bc 755
b24cd69c
AH
756 /*
757 * because length == 0 means we are supposed to flush cache, we only
758 * try to write something if length > 0.
759 */
760 if (ap->a_length > 0) {
984263bc 761 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
0b0362e1 762 csio.ccb_h.flags |= CAM_POLLED;
984263bc
MD
763 csio.ccb_h.ccb_state = DA_CCB_DUMP;
764 scsi_read_write(&csio,
765 /*retries*/1,
766 dadone,
767 MSG_ORDERED_Q_TAG,
768 /*read*/FALSE,
769 /*byte2*/0,
770 /*minimum_cmd_size*/ softc->minimum_cmd_size,
b24cd69c
AH
771 ap->a_offset / secsize,
772 ap->a_length / secsize,
773 /*data_ptr*/(u_int8_t *) ap->a_virtual,
774 /*dxfer_len*/ap->a_length,
984263bc
MD
775 /*sense_len*/SSD_FULL_SIZE,
776 DA_DEFAULT_TIMEOUT * 1000);
777 xpt_polled_action((union ccb *)&csio);
778
779 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
85f8e2ea 780 kprintf("Aborting dump due to I/O error.\n");
984263bc
MD
781 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
782 CAM_SCSI_STATUS_ERROR)
783 scsi_sense_print(&csio);
784 else
85f8e2ea 785 kprintf("status == 0x%x, scsi status == 0x%x\n",
984263bc
MD
786 csio.ccb_h.status, csio.scsi_status);
787 return(EIO);
788 }
b24cd69c
AH
789 cam_periph_unlock(periph);
790 return 0;
984263bc
MD
791 }
792
793 /*
794 * Sync the disk cache contents to the physical media.
795 */
796 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
797
798 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
799 csio.ccb_h.ccb_state = DA_CCB_DUMP;
800 scsi_synchronize_cache(&csio,
801 /*retries*/1,
802 /*cbfcnp*/dadone,
803 MSG_SIMPLE_Q_TAG,
804 /*begin_lba*/0,/* Cover the whole disk */
805 /*lb_count*/0,
806 SSD_FULL_SIZE,
807 5 * 60 * 1000);
808 xpt_polled_action((union ccb *)&csio);
809
810 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
811 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
812 CAM_SCSI_STATUS_ERROR) {
813 int asc, ascq;
814 int sense_key, error_code;
815
816 scsi_extract_sense(&csio.sense_data,
817 &error_code,
818 &sense_key,
819 &asc, &ascq);
820 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
821 scsi_sense_print(&csio);
822 } else {
1c8b7a9a
PA
823 xpt_print(periph->path, "Synchronize cache "
824 "failed, status == 0x%x, scsi status == "
825 "0x%x\n", csio.ccb_h.status,
826 csio.scsi_status);
984263bc
MD
827 }
828 }
829 }
1c8b7a9a 830 cam_periph_unlock(periph);
984263bc
MD
831 return (0);
832}
833
834static void
835dainit(void)
836{
837 cam_status status;
984263bc
MD
838
839 /*
840 * Create our extend array for storing the devices we attach to.
841 */
842 daperiphs = cam_extend_new();
984263bc 843 if (daperiphs == NULL) {
85f8e2ea 844 kprintf("da: Failed to alloc extend array!\n");
984263bc
MD
845 return;
846 }
3690a379 847
984263bc
MD
848 /*
849 * Install a global async callback. This callback will
850 * receive async callbacks like "new device found".
851 */
1c8b7a9a 852 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
984263bc
MD
853
854 if (status != CAM_REQ_CMP) {
85f8e2ea 855 kprintf("da: Failed to attach master async callback "
984263bc 856 "due to status 0x%x!\n", status);
0c4341b9 857 } else {
984263bc
MD
858 /* Register our shutdown event handler */
859 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
c022ffc9 860 NULL, SHUTDOWN_PRI_SECOND)) == NULL)
5ee727b6
SW
861 kprintf("%s: shutdown event registration failed!\n",
862 __func__);
984263bc
MD
863 }
864}
865
866static void
867daoninvalidate(struct cam_periph *periph)
868{
984263bc 869 struct da_softc *softc;
984263bc
MD
870
871 softc = (struct da_softc *)periph->softc;
872
873 /*
874 * De-register any async callbacks.
875 */
1c8b7a9a 876 xpt_register_async(0, daasync, periph, periph->path);
984263bc
MD
877
878 softc->flags |= DA_FLAG_PACK_INVALID;
879
984263bc
MD
880 /*
881 * Return all queued I/O with ENXIO.
882 * XXX Handle any transactions queued to the card
883 * with XPT_ABORT_CCB.
884 */
e0fb398b 885 daflushbioq(&softc->bio_queue_trim, ENXIO);
af0aa0ac
MD
886 daflushbioq(&softc->bio_queue_wr, ENXIO);
887 daflushbioq(&softc->bio_queue_rd, ENXIO);
888 xpt_print(periph->path, "lost device\n");
889}
890
891static void
892daflushbioq(struct bio_queue_head *bioq, int error)
893{
894 struct bio *q_bio;
895 struct buf *q_bp;
896
897 while ((q_bio = bioq_first(bioq)) != NULL){
898 bioq_remove(bioq, q_bio);
81b5c339 899 q_bp = q_bio->bio_buf;
984263bc 900 q_bp->b_resid = q_bp->b_bcount;
af0aa0ac 901 q_bp->b_error = error;
984263bc 902 q_bp->b_flags |= B_ERROR;
81b5c339 903 biodone(q_bio);
984263bc 904 }
984263bc
MD
905}
906
907static void
908dacleanup(struct cam_periph *periph)
909{
910 struct da_softc *softc;
911
912 softc = (struct da_softc *)periph->softc;
913
914 devstat_remove_entry(&softc->device_stats);
915 cam_extend_release(daperiphs, periph->unit_number);
1c8b7a9a 916 xpt_print(periph->path, "removing device entry\n");
62ade751
MD
917 /*
918 * If we can't free the sysctl tree, oh well...
919 */
920 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
921 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
1c8b7a9a 922 xpt_print(periph->path, "can't remove sysctl context\n");
62ade751 923 }
2d19cdd3 924 periph->softc = NULL;
e4c9c0c8 925 if (softc->disk.d_rawdev) {
1c8b7a9a 926 cam_periph_unlock(periph);
335dda38 927 disk_destroy(&softc->disk);
1c8b7a9a 928 cam_periph_lock(periph);
984263bc 929 }
1c8b7a9a 930
efda3bd0 931 kfree(softc, M_DEVBUF);
984263bc
MD
932}
933
934static void
935daasync(void *callback_arg, u_int32_t code,
936 struct cam_path *path, void *arg)
937{
938 struct cam_periph *periph;
939
940 periph = (struct cam_periph *)callback_arg;
87993e5a 941
984263bc
MD
942 switch (code) {
943 case AC_FOUND_DEVICE:
944 {
945 struct ccb_getdev *cgd;
946 cam_status status;
947
948 cgd = (struct ccb_getdev *)arg;
e9936c96
PA
949 if (cgd == NULL)
950 break;
984263bc
MD
951
952 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
953 && SID_TYPE(&cgd->inq_data) != T_RBC
954 && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
955 break;
956
87993e5a
MD
957 /*
958 * Don't complain if a valid peripheral is already attached.
959 */
960 periph = cam_periph_find(cgd->ccb_h.path, "da");
961 if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
962 break;
963
984263bc
MD
964 /*
965 * Allocate a peripheral instance for
966 * this device and start the probe
967 * process.
968 */
969 status = cam_periph_alloc(daregister, daoninvalidate,
970 dacleanup, dastart,
971 "da", CAM_PERIPH_BIO,
972 cgd->ccb_h.path, daasync,
973 AC_FOUND_DEVICE, cgd);
974
87993e5a 975 if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
5ee727b6
SW
976 kprintf("%s: Unable to attach to new device "
977 "due to status 0x%x\n", __func__, status);
87993e5a 978 }
984263bc
MD
979 break;
980 }
981 case AC_SENT_BDR:
982 case AC_BUS_RESET:
983 {
984 struct da_softc *softc;
985 struct ccb_hdr *ccbh;
984263bc
MD
986
987 softc = (struct da_softc *)periph->softc;
984263bc
MD
988 /*
989 * Don't fail on the expected unit attention
990 * that will occur.
991 */
992 softc->flags |= DA_FLAG_RETRY_UA;
cbe8f7dc 993 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
984263bc 994 ccbh->ccb_state |= DA_CCB_RETRY_UA;
984263bc
MD
995 /* FALLTHROUGH*/
996 }
997 default:
998 cam_periph_async(periph, code, path, arg);
999 break;
1000 }
1001}
1002
62ade751
MD
1003static void
1004dasysctlinit(void *context, int pending)
1005{
1006 struct cam_periph *periph;
1007 struct da_softc *softc;
1008 char tmpstr[80], tmpstr2[80];
1009
1010 periph = (struct cam_periph *)context;
227ce828 1011 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1c8b7a9a 1012 return;
227ce828 1013 }
62ade751 1014
1c8b7a9a 1015 softc = (struct da_softc *)periph->softc;
da10ea93
MD
1016 ksnprintf(tmpstr, sizeof(tmpstr),
1017 "CAM DA unit %d", periph->unit_number);
1018 ksnprintf(tmpstr2, sizeof(tmpstr2),
1019 "%d", periph->unit_number);
62ade751 1020
da10ea93 1021 sysctl_ctx_free(&softc->sysctl_ctx);
62ade751
MD
1022 sysctl_ctx_init(&softc->sysctl_ctx);
1023 softc->flags |= DA_FLAG_SCTX_INIT;
1024 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1025 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1026 CTLFLAG_RD, 0, tmpstr);
1027 if (softc->sysctl_tree == NULL) {
5ee727b6 1028 kprintf("%s: unable to allocate sysctl tree\n", __func__);
1c8b7a9a 1029 cam_periph_release(periph);
62ade751
MD
1030 return;
1031 }
1032
1033 /*
1034 * Now register the sysctl handler, so the user can the value on
1035 * the fly.
1036 */
1037 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1038 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1039 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1040 "Minimum CDB size");
1c8b7a9a 1041
e0fb398b
T
1042 /* Only create the option if the device supports TRIM */
1043 if (softc->disk.d_info.d_trimflag) {
1044 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1045 SYSCTL_CHILDREN(softc->sysctl_tree),
1046 OID_AUTO,
1047 "trim_enabled",
1048 CTLFLAG_RW,
1049 &softc->trim_enabled,
1050 0,
1051 "Enable TRIM for this device (SSD))");
1052 }
1053
1c8b7a9a 1054 cam_periph_release(periph);
62ade751
MD
1055}
1056
1057static int
1058dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1059{
1060 int error, value;
1061
1062 value = *(int *)arg1;
1063
1064 error = sysctl_handle_int(oidp, &value, 0, req);
1065
1066 if ((error != 0)
1067 || (req->newptr == NULL))
1068 return (error);
1069
1070 /*
bdd58e03 1071 * Acceptable values here are 6, 10 or 12, or 16.
62ade751
MD
1072 */
1073 if (value < 6)
1074 value = 6;
1075 else if ((value > 6)
1076 && (value <= 10))
1077 value = 10;
bdd58e03
MD
1078 else if ((value > 10)
1079 && (value <= 12))
62ade751 1080 value = 12;
bdd58e03
MD
1081 else if (value > 12)
1082 value = 16;
62ade751
MD
1083
1084 *(int *)arg1 = value;
1085
1086 return (0);
1087}
1088
984263bc
MD
1089static cam_status
1090daregister(struct cam_periph *periph, void *arg)
1091{
984263bc 1092 struct da_softc *softc;
62ade751 1093 struct ccb_pathinq cpi;
984263bc 1094 struct ccb_getdev *cgd;
62ade751 1095 char tmpstr[80];
984263bc
MD
1096 caddr_t match;
1097
1098 cgd = (struct ccb_getdev *)arg;
1099 if (periph == NULL) {
5ee727b6 1100 kprintf("%s: periph was NULL!!\n", __func__);
984263bc
MD
1101 return(CAM_REQ_CMP_ERR);
1102 }
1103
1104 if (cgd == NULL) {
5ee727b6
SW
1105 kprintf("%s: no getdev CCB, can't register device\n",
1106 __func__);
984263bc
MD
1107 return(CAM_REQ_CMP_ERR);
1108 }
1109
efda3bd0 1110 softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
da10ea93 1111 sysctl_ctx_init(&softc->sysctl_ctx);
984263bc
MD
1112 LIST_INIT(&softc->pending_ccbs);
1113 softc->state = DA_STATE_PROBE;
e0fb398b 1114 bioq_init(&softc->bio_queue_trim);
af0aa0ac
MD
1115 bioq_init(&softc->bio_queue_rd);
1116 bioq_init(&softc->bio_queue_wr);
984263bc
MD
1117 if (SID_IS_REMOVABLE(&cgd->inq_data))
1118 softc->flags |= DA_FLAG_PACK_REMOVABLE;
1119 if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1120 softc->flags |= DA_FLAG_TAGGED_QUEUING;
1121
e0fb398b
T
1122 /* Used to get TRIM status from AHCI driver */
1123 if (cgd->inq_data.vendor_specific1[0] == 1) {
1124 /*
1125 * max number of lba ranges an SSD can handle in a single
1126 * TRIM command. vendor_specific1[1] is the num of 512-byte
1127 * blocks the SSD reports that can be passed in a TRIM cmd.
1128 */
1129 softc->trim_max_ranges =
1130 min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1131 }
1132
984263bc
MD
1133 periph->softc = softc;
1134
1135 cam_extend_set(daperiphs, periph->unit_number, periph);
1136
1137 /*
1138 * See if this device has any quirks.
1139 */
1140 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1141 (caddr_t)da_quirk_table,
b370aff7 1142 NELEM(da_quirk_table),
984263bc
MD
1143 sizeof(*da_quirk_table), scsi_inquiry_match);
1144
1145 if (match != NULL)
1146 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1147 else
1148 softc->quirks = DA_Q_NONE;
1149
a9453758
MD
1150 /*
1151 * Unconditionally disable the synchronize cache command for
1152 * usb attachments. It's just impossible to determine if the
1153 * device supports it or not and if it doesn't the port can
1154 * brick.
1155 */
1156 if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1157 softc->quirks |= DA_Q_NO_SYNC_CACHE;
1158 }
1159
62ade751
MD
1160 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1161
1162 /* Check if the SIM does not want 6 byte commands */
eaae6702 1163 bzero(&cpi, sizeof(cpi));
62ade751
MD
1164 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
1165 cpi.ccb_h.func_code = XPT_PATH_INQ;
1166 xpt_action((union ccb *)&cpi);
1167 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
1168 softc->quirks |= DA_Q_NO_6_BYTE;
1169
1170 /*
1171 * RBC devices don't have to support READ(6), only READ(10).
1172 */
984263bc
MD
1173 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1174 softc->minimum_cmd_size = 10;
1175 else
1176 softc->minimum_cmd_size = 6;
1177
62ade751
MD
1178 /*
1179 * Load the user's default, if any.
1180 */
f8c7a42d 1181 ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
62ade751
MD
1182 periph->unit_number);
1183 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1184
1185 /*
bdd58e03 1186 * 6, 10, 12, and 16 are the currently permissible values.
62ade751
MD
1187 */
1188 if (softc->minimum_cmd_size < 6)
1189 softc->minimum_cmd_size = 6;
1190 else if ((softc->minimum_cmd_size > 6)
1191 && (softc->minimum_cmd_size <= 10))
1192 softc->minimum_cmd_size = 10;
bdd58e03
MD
1193 else if ((softc->minimum_cmd_size > 10)
1194 && (softc->minimum_cmd_size <= 12))
62ade751 1195 softc->minimum_cmd_size = 12;
bdd58e03
MD
1196 else if (softc->minimum_cmd_size > 12)
1197 softc->minimum_cmd_size = 16;
62ade751 1198
984263bc
MD
1199 /*
1200 * The DA driver supports a blocksize, but
1201 * we don't know the blocksize until we do
1202 * a read capacity. So, set a flag to
1203 * indicate that the blocksize is
1204 * unavailable right now. We'll clear the
1205 * flag as soon as we've done a read capacity.
1206 */
1207 devstat_add_entry(&softc->device_stats, "da",
1208 periph->unit_number, 0,
1209 DEVSTAT_BS_UNAVAILABLE,
1210 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1211 DEVSTAT_PRIORITY_DISK);
1212
1213 /*
1214 * Register this media as a disk
1215 */
1c8b7a9a 1216 CAM_SIM_UNLOCK(periph->sim);
a688b15c 1217 disk_create(periph->unit_number, &softc->disk, &da_ops);
eaae6702
SW
1218 if (cpi.maxio == 0 || cpi.maxio > MAXPHYS)
1219 softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1220 else
1221 softc->disk.d_rawdev->si_iosize_max = cpi.maxio;
0b41f48b
SW
1222 if (bootverbose) {
1223 kprintf("%s%d: si_iosize_max:%d\n",
1224 periph->periph_name,
1225 periph->unit_number,
1226 softc->disk.d_rawdev->si_iosize_max);
1227 }
1c8b7a9a 1228 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1229
1230 /*
1231 * Add async callbacks for bus reset and
1232 * bus device reset calls. I don't bother
1233 * checking if this fails as, in most cases,
1234 * the system will function just fine without
1235 * them and the only alternative would be to
1236 * not attach the device on failure.
1237 */
1c8b7a9a
PA
1238 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1239 daasync, periph, periph->path);
1240
984263bc 1241 /*
1c8b7a9a
PA
1242 * Take an exclusive refcount on the periph while dastart is called
1243 * to finish the probe. The reference will be dropped in dadone at
1244 * the end of probe.
984263bc 1245 */
1c8b7a9a 1246 cam_periph_hold(periph, 0);
984263bc
MD
1247 xpt_schedule(periph, /*priority*/5);
1248
1249 return(CAM_REQ_CMP);
1250}
1251
1252static void
1253dastart(struct cam_periph *periph, union ccb *start_ccb)
1254{
1255 struct da_softc *softc;
1256
1257 softc = (struct da_softc *)periph->softc;
1258
984263bc
MD
1259 switch (softc->state) {
1260 case DA_STATE_NORMAL:
1261 {
1262 /* Pull a buffer from the queue and get going on it */
81b5c339 1263 struct bio *bio;
af0aa0ac
MD
1264 struct bio *bio_rd;
1265 struct bio *bio_wr;
984263bc 1266 struct buf *bp;
a9bf1b8c 1267 u_int8_t tag_code;
af0aa0ac 1268 int limit;
984263bc
MD
1269
1270 /*
1271 * See if there is a buf with work for us to do..
1272 */
af0aa0ac
MD
1273 bio_rd = bioq_first(&softc->bio_queue_rd);
1274 bio_wr = bioq_first(&softc->bio_queue_wr);
1275
984263bc
MD
1276 if (periph->immediate_priority <= periph->pinfo.priority) {
1277 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1278 ("queuing for immediate ccb\n"));
1279 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1280 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1281 periph_links.sle);
1282 periph->immediate_priority = CAM_PRIORITY_NONE;
984263bc 1283 wakeup(&periph->ccb_list);
af0aa0ac 1284 if (bio_rd || bio_wr) {
a9bf1b8c
MD
1285 /*
1286 * Have more work to do, so ensure we stay
1287 * scheduled
1288 */
1289 xpt_schedule(periph, /* XXX priority */1);
1290 }
1291 break;
1292 }
af0aa0ac 1293
e0fb398b
T
1294 /* Run the trim command if not already running */
1295 if (!softc->trim_running &&
4090d6ff 1296 (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
e0fb398b
T
1297 struct trim_request *req = &softc->trim_req;
1298 struct bio *bio1;
1299 int bps = 0, ranges = 0;
1300
1301 softc->trim_running = 1;
1302 bzero(req, sizeof(*req));
1303 bio1 = bio;
1304 while (1) {
1305 uint64_t lba;
1306 int count;
1307
1308 bp = bio1->bio_buf;
1309 count = bp->b_bcount / softc->params.secsize;
1310 lba = bio1->bio_offset/softc->params.secsize;
1311
e0fb398b
T
1312 bioq_remove(&softc->bio_queue_trim, bio1);
1313 while (count > 0) {
1314 int c = min(count, 0xffff);
1315 int off = ranges * 8;
1316
1317 req->data[off + 0] = lba & 0xff;
1318 req->data[off + 1] = (lba >> 8) & 0xff;
1319 req->data[off + 2] = (lba >> 16) & 0xff;
1320 req->data[off + 3] = (lba >> 24) & 0xff;
1321 req->data[off + 4] = (lba >> 32) & 0xff;
1322 req->data[off + 5] = (lba >> 40) & 0xff;
1323 req->data[off + 6] = c & 0xff;
1324 req->data[off + 7] = (c >> 8) & 0xff;
1325 lba += c;
1326 count -= c;
1327 ranges++;
1328 }
1329
1330 /* Try to merge multiple TRIM requests */
1331 req->bios[bps++] = bio1;
1332 bio1 = bioq_first(&softc->bio_queue_trim);
1333 if (bio1 == NULL ||
1334 bio1->bio_buf->b_bcount / softc->params.secsize >
1335 (softc->trim_max_ranges - ranges) * 0xffff)
1336 break;
1337 }
1338
1339
1340 cam_fill_csio(&start_ccb->csio,
1341 1/*retries*/,
1342 dadone,
1343 CAM_DIR_OUT,
1344 MSG_SIMPLE_Q_TAG,
1345 req->data,
1346 ((ranges +63)/64)*512,
1347 SSD_FULL_SIZE,
1348 sizeof(struct scsi_rw_6),
1349 da_default_timeout*2);
1350
1351 start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1352 LIST_INSERT_HEAD(&softc->pending_ccbs,
1353 &start_ccb->ccb_h, periph_links.le);
1354 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1355 start_ccb->ccb_h.ccb_bio = bio;
1356 devstat_start_transaction(&softc->device_stats);
1357 xpt_action(start_ccb);
1358 xpt_schedule(periph, 1);
1359 break;
1360 }
1361
af0aa0ac
MD
1362 /*
1363 * Select a read or write buffer to queue. Limit the number
1364 * of tags dedicated to reading or writing, giving reads
1365 * precedence.
1366 *
1367 * Writes to modern hard drives go into the HDs cache and
1368 * return completion nearly instantly. That is until the
1369 * cache becomes full. When the HDs cache becomes full
1370 * write commands will begin to stall. If all available
1371 * tags are taken up by writes which saturate the drive
1372 * reads will become tag-starved.
1373 *
1374 * A similar situation can occur with reads. With many
1375 * parallel readers all tags can be taken up by reads
1376 * and prevent any writes from draining, even if the HD's
1377 * cache is not full.
1378 */
a3c9d3d8 1379 limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
af0aa0ac
MD
1380#if 0
1381 /* DEBUGGING */
1382 static int savets;
1383 static long savets2;
cec73927 1384 if (1 || time_uptime != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
af0aa0ac
MD
1385 kprintf("%d %d (%d)\n",
1386 softc->outstanding_cmds_rd,
1387 softc->outstanding_cmds_wr,
1388 limit);
1389 savets = ticks;
cec73927 1390 savets2 = time_uptime;
af0aa0ac
MD
1391 }
1392#endif
1393 if (bio_rd && softc->outstanding_cmds_rd < limit) {
1394 bio = bio_rd;
1395 bioq_remove(&softc->bio_queue_rd, bio);
1396 } else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1397 bio = bio_wr;
1398 bioq_remove(&softc->bio_queue_wr, bio);
1399 } else {
1400 if (bio_rd)
1401 softc->flags |= DA_FLAG_RD_LIMIT;
1402 if (bio_wr)
1403 softc->flags |= DA_FLAG_WR_LIMIT;
984263bc 1404 xpt_release_ccb(start_ccb);
a9bf1b8c
MD
1405 break;
1406 }
984263bc 1407
a9bf1b8c
MD
1408 /*
1409 * We can queue new work.
1410 */
a9bf1b8c 1411 bp = bio->bio_buf;
54078292 1412
a9bf1b8c 1413 devstat_start_transaction(&softc->device_stats);
54078292 1414
0c4341b9 1415 tag_code = MSG_SIMPLE_Q_TAG;
984263bc 1416
a9bf1b8c
MD
1417 switch(bp->b_cmd) {
1418 case BUF_CMD_READ:
1419 case BUF_CMD_WRITE:
984263bc 1420 /*
a9bf1b8c 1421 * Block read/write op
984263bc 1422 */
a9bf1b8c 1423 KKASSERT(bio->bio_offset % softc->params.secsize == 0);
984263bc 1424
a9bf1b8c
MD
1425 scsi_read_write(
1426 &start_ccb->csio,
1427 da_retry_count, /* retries */
1428 dadone,
1429 tag_code,
1430 (bp->b_cmd == BUF_CMD_READ),
1431 0, /* byte2 */
1432 softc->minimum_cmd_size,
1433 bio->bio_offset / softc->params.secsize,
1434 bp->b_bcount / softc->params.secsize,
1435 bp->b_data,
1436 bp->b_bcount,
1437 SSD_FULL_SIZE, /* sense_len */
1438 da_default_timeout * 1000
1439 );
1440 break;
1441 case BUF_CMD_FLUSH:
a9453758
MD
1442 /*
1443 * Silently complete a flush request if the device
1444 * cannot handle it.
1445 */
1446 if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1447 xpt_release_ccb(start_ccb);
1448 start_ccb = NULL;
1449 devstat_end_transaction_buf(
1450 &softc->device_stats, bp);
1451 biodone(bio);
1452 } else {
1453 scsi_synchronize_cache(
1454 &start_ccb->csio,
1455 1, /* retries */
1456 dadone, /* cbfcnp */
1457 MSG_SIMPLE_Q_TAG,
1458 0, /* lba */
1459 0, /* count (whole disk) */
1460 SSD_FULL_SIZE,
1461 da_default_timeout*1000 /* timeout */
1462 );
1463 }
a9bf1b8c 1464 break;
e0fb398b
T
1465 case BUF_CMD_FREEBLKS:
1466 if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1467 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1468 break;
1469 }
a9bf1b8c 1470 default:
a9453758
MD
1471 xpt_release_ccb(start_ccb);
1472 start_ccb = NULL;
a9bf1b8c
MD
1473 panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1474 break; /* NOT REACHED */
1475 }
984263bc 1476
a9bf1b8c
MD
1477 /*
1478 * Block out any asyncronous callbacks
1479 * while we touch the pending ccb list.
1480 */
a9453758
MD
1481 if (start_ccb) {
1482 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1483 LIST_INSERT_HEAD(&softc->pending_ccbs,
1484 &start_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1485 if (bp->b_cmd == BUF_CMD_WRITE ||
1486 bp->b_cmd == BUF_CMD_FLUSH) {
1487 ++softc->outstanding_cmds_wr;
1488 } else {
1489 ++softc->outstanding_cmds_rd;
1490 }
a9453758
MD
1491
1492 /* We expect a unit attention from this device */
1493 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1494 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1495 softc->flags &= ~DA_FLAG_RETRY_UA;
1496 }
a9bf1b8c 1497
a9453758
MD
1498 start_ccb->ccb_h.ccb_bio = bio;
1499 xpt_action(start_ccb);
1500 }
984263bc 1501
a9bf1b8c
MD
1502 /*
1503 * Be sure we stay scheduled if we have more work to do.
1504 */
af0aa0ac
MD
1505 if (bioq_first(&softc->bio_queue_rd) ||
1506 bioq_first(&softc->bio_queue_wr)) {
a9bf1b8c 1507 xpt_schedule(periph, 1);
af0aa0ac 1508 }
984263bc
MD
1509 break;
1510 }
1511 case DA_STATE_PROBE:
1512 {
1513 struct ccb_scsiio *csio;
1514 struct scsi_read_capacity_data *rcap;
1515
1c8b7a9a 1516 rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
984263bc
MD
1517 csio = &start_ccb->csio;
1518 scsi_read_capacity(csio,
1519 /*retries*/4,
1520 dadone,
1521 MSG_SIMPLE_Q_TAG,
1522 rcap,
1523 SSD_FULL_SIZE,
1524 /*timeout*/5000);
81b5c339 1525 start_ccb->ccb_h.ccb_bio = NULL;
984263bc
MD
1526 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1527 xpt_action(start_ccb);
1528 break;
1529 }
bdd58e03
MD
1530 case DA_STATE_PROBE2:
1531 {
1532 struct ccb_scsiio *csio;
0b0362e1 1533 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03 1534
0b0362e1
MD
1535 rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1536 M_INTWAIT | M_ZERO);
bdd58e03
MD
1537 csio = &start_ccb->csio;
1538 scsi_read_capacity_16(csio,
1539 /*retries*/ 4,
1540 /*cbfcnp*/ dadone,
1541 /*tag_action*/ MSG_SIMPLE_Q_TAG,
1542 /*lba*/ 0,
1543 /*reladr*/ 0,
1544 /*pmi*/ 0,
1545 rcaplong,
1546 /*sense_len*/ SSD_FULL_SIZE,
1547 /*timeout*/ 60000);
1548 start_ccb->ccb_h.ccb_bio = NULL;
1549 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1550 xpt_action(start_ccb);
1551 break;
1552 }
984263bc
MD
1553 }
1554}
1555
1556static int
1557cmd6workaround(union ccb *ccb)
1558{
1559 struct scsi_rw_6 cmd6;
1560 struct scsi_rw_10 *cmd10;
1561 struct da_softc *softc;
1562 u_int8_t *cdb;
1563 int frozen;
1564
1565 cdb = ccb->csio.cdb_io.cdb_bytes;
1566
1567 /* Translation only possible if CDB is an array and cmd is R/W6 */
1568 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1569 (*cdb != READ_6 && *cdb != WRITE_6))
1570 return 0;
1571
1c8b7a9a
PA
1572 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1573 "increasing minimum_cmd_size to 10.\n");
984263bc
MD
1574 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1575 softc->minimum_cmd_size = 10;
1576
1577 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1578 cmd10 = (struct scsi_rw_10 *)cdb;
1579 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1580 cmd10->byte2 = 0;
1581 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1582 cmd10->reserved = 0;
1583 scsi_ulto2b(cmd6.length, cmd10->length);
1584 cmd10->control = cmd6.control;
1585 ccb->csio.cdb_len = sizeof(*cmd10);
1586
1587 /* Requeue request, unfreezing queue if necessary */
1588 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1589 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1590 xpt_action(ccb);
1591 if (frozen) {
1592 cam_release_devq(ccb->ccb_h.path,
1593 /*relsim_flags*/0,
1594 /*reduction*/0,
1595 /*timeout*/0,
1596 /*getcount_only*/0);
1597 }
1598 return (ERESTART);
1599}
1600
1601static void
1602dadone(struct cam_periph *periph, union ccb *done_ccb)
1603{
1604 struct da_softc *softc;
1605 struct ccb_scsiio *csio;
cd29885a 1606 struct disk_info info;
984263bc
MD
1607
1608 softc = (struct da_softc *)periph->softc;
1609 csio = &done_ccb->csio;
1610 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1611 case DA_CCB_BUFFER_IO:
e0fb398b 1612 case DA_CCB_TRIM:
984263bc
MD
1613 {
1614 struct buf *bp;
81b5c339 1615 struct bio *bio;
af0aa0ac 1616 int mustsched = 0;
984263bc 1617
81b5c339
MD
1618 bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1619 bp = bio->bio_buf;
984263bc
MD
1620 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1621 int error;
984263bc
MD
1622 int sf;
1623
1624 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1625 sf = SF_RETRY_UA;
1626 else
1627 sf = 0;
1628
b05e84c9
PA
1629 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1630 if (error == ERESTART) {
984263bc
MD
1631 /*
1632 * A retry was scheuled, so
1633 * just return.
1634 */
1635 return;
1636 }
1637 if (error != 0) {
984263bc
MD
1638 if (error == ENXIO) {
1639 /*
1640 * Catastrophic error. Mark our pack as
1641 * invalid.
1642 */
1c8b7a9a
PA
1643 /*
1644 * XXX See if this is really a media
1645 * XXX change first?
984263bc 1646 */
1c8b7a9a
PA
1647 xpt_print(periph->path,
1648 "Invalidating pack\n");
984263bc
MD
1649 softc->flags |= DA_FLAG_PACK_INVALID;
1650 }
1651
1652 /*
af0aa0ac
MD
1653 * Return all queued write I/O's with EIO
1654 * so the client can retry these I/Os in the
984263bc 1655 * proper order should it attempt to recover.
af0aa0ac
MD
1656 *
1657 * Leave read I/O's alone.
984263bc 1658 */
af0aa0ac 1659 daflushbioq(&softc->bio_queue_wr, EIO);
984263bc
MD
1660 bp->b_error = error;
1661 bp->b_resid = bp->b_bcount;
1662 bp->b_flags |= B_ERROR;
1663 } else {
1664 bp->b_resid = csio->resid;
1665 bp->b_error = 0;
62ade751 1666 if (bp->b_resid != 0)
984263bc 1667 bp->b_flags |= B_ERROR;
984263bc
MD
1668 }
1669 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1670 cam_release_devq(done_ccb->ccb_h.path,
1671 /*relsim_flags*/0,
1672 /*reduction*/0,
1673 /*timeout*/0,
1674 /*getcount_only*/0);
1675 } else {
b05e84c9
PA
1676 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1677 panic("REQ_CMP with QFRZN");
984263bc 1678 bp->b_resid = csio->resid;
62ade751 1679 if (csio->resid > 0)
984263bc 1680 bp->b_flags |= B_ERROR;
984263bc
MD
1681 }
1682
1683 /*
1684 * Block out any asyncronous callbacks
1685 * while we touch the pending ccb list.
1686 */
984263bc 1687 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1688 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1689 --softc->outstanding_cmds_wr;
1690 if (softc->flags & DA_FLAG_WR_LIMIT) {
1691 softc->flags &= ~DA_FLAG_WR_LIMIT;
1692 mustsched = 1;
1693 }
1694 } else {
1695 --softc->outstanding_cmds_rd;
1696 if (softc->flags & DA_FLAG_RD_LIMIT) {
1697 softc->flags &= ~DA_FLAG_RD_LIMIT;
1698 mustsched = 1;
1699 }
1700 }
984263bc
MD
1701
1702 devstat_end_transaction_buf(&softc->device_stats, bp);
e0fb398b
T
1703 if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1704 DA_CCB_TRIM) {
1705 struct trim_request *req =
1706 (struct trim_request *) csio->data_ptr;
1707 int i;
1708
1709 for (i = 1; i < softc->trim_max_ranges &&
1710 req->bios[i]; i++) {
1711 struct bio *bp1 = req->bios[i];
1712
1713 bp1->bio_buf->b_resid = bp->b_resid;
1714 bp1->bio_buf->b_error = bp->b_error;
1715 if (bp->b_flags & B_ERROR)
1716 bp1->bio_buf->b_flags |= B_ERROR;
1717 biodone(bp1);
1718 }
1719 softc->trim_running = 0;
1720 biodone(bio);
1721 xpt_schedule(periph,1);
1722 } else
1723 biodone(bio);
1724
af0aa0ac
MD
1725
1726 if (mustsched)
1727 xpt_schedule(periph, /*priority*/1);
1728
984263bc
MD
1729 break;
1730 }
1731 case DA_CCB_PROBE:
bdd58e03 1732 case DA_CCB_PROBE2:
984263bc
MD
1733 {
1734 struct scsi_read_capacity_data *rdcap;
0b0362e1 1735 struct scsi_read_capacity_data_16 *rcaplong;
984263bc 1736 char announce_buf[80];
2ea825fb 1737 int doinfo = 0;
984263bc 1738
bdd58e03
MD
1739 rdcap = NULL;
1740 rcaplong = NULL;
1741 if (softc->state == DA_STATE_PROBE)
1742 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1743 else
0b0362e1 1744 rcaplong = (struct scsi_read_capacity_data_16 *)
bdd58e03 1745 csio->data_ptr;
f7b26992
MD
1746
1747 bzero(&info, sizeof(info));
1748 info.d_type = DTYPE_SCSI;
1749 info.d_serialno = xpt_path_serialno(periph->path);
984263bc
MD
1750
1751 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1752 struct disk_params *dp;
bdd58e03
MD
1753 uint32_t block_size;
1754 uint64_t maxsector;
1755
1756 if (softc->state == DA_STATE_PROBE) {
1757 block_size = scsi_4btoul(rdcap->length);
1758 maxsector = scsi_4btoul(rdcap->addr);
984263bc 1759
bdd58e03
MD
1760 /*
1761 * According to SBC-2, if the standard 10
1762 * byte READ CAPACITY command returns 2^32,
1763 * we should issue the 16 byte version of
1764 * the command, since the device in question
1765 * has more sectors than can be represented
1766 * with the short version of the command.
1767 */
1768 if (maxsector == 0xffffffff) {
1769 softc->state = DA_STATE_PROBE2;
1c8b7a9a 1770 kfree(rdcap, M_SCSIDA);
bdd58e03
MD
1771 xpt_release_ccb(done_ccb);
1772 xpt_schedule(periph, /*priority*/5);
1773 return;
1774 }
1775 } else {
1776 block_size = scsi_4btoul(rcaplong->length);
1777 maxsector = scsi_8btou64(rcaplong->addr);
1778 }
1779 dasetgeom(periph, block_size, maxsector);
984263bc 1780 dp = &softc->params;
f8c7a42d 1781 ksnprintf(announce_buf, sizeof(announce_buf),
bdd58e03
MD
1782 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1783 (uintmax_t) (((uintmax_t)dp->secsize *
1784 dp->sectors) / (1024*1024)),
1785 (uintmax_t)dp->sectors,
984263bc
MD
1786 dp->secsize, dp->heads, dp->secs_per_track,
1787 dp->cylinders);
e0fb398b 1788
cd29885a
MD
1789 info.d_media_blksize = softc->params.secsize;
1790 info.d_media_blocks = softc->params.sectors;
1791 info.d_media_size = 0;
1792 info.d_secpertrack = softc->params.secs_per_track;
1793 info.d_nheads = softc->params.heads;
1794 info.d_ncylinders = softc->params.cylinders;
1795 info.d_secpercyl = softc->params.heads *
1796 softc->params.secs_per_track;
55230951 1797 info.d_serialno = xpt_path_serialno(periph->path);
2ea825fb 1798 doinfo = 1;
984263bc
MD
1799 } else {
1800 int error;
1801
1802 announce_buf[0] = '\0';
1803
1804 /*
1805 * Retry any UNIT ATTENTION type errors. They
1806 * are expected at boot.
1807 */
b05e84c9
PA
1808 error = daerror(done_ccb, CAM_RETRY_SELTO,
1809 SF_RETRY_UA|SF_NO_PRINT);
984263bc
MD
1810 if (error == ERESTART) {
1811 /*
1812 * A retry was scheuled, so
1813 * just return.
1814 */
1815 return;
1816 } else if (error != 0) {
1817 struct scsi_sense_data *sense;
1818 int asc, ascq;
1819 int sense_key, error_code;
1820 int have_sense;
1821 cam_status status;
1822 struct ccb_getdev cgd;
1823
1824 /* Don't wedge this device's queue */
984263bc 1825 status = done_ccb->ccb_h.status;
b05e84c9
PA
1826 if ((status & CAM_DEV_QFRZN) != 0)
1827 cam_release_devq(done_ccb->ccb_h.path,
1828 /*relsim_flags*/0,
1829 /*reduction*/0,
1830 /*timeout*/0,
1831 /*getcount_only*/0);
1832
984263bc
MD
1833
1834 xpt_setup_ccb(&cgd.ccb_h,
1835 done_ccb->ccb_h.path,
1836 /* priority */ 1);
1837 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1838 xpt_action((union ccb *)&cgd);
1839
1840 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1841 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1842 || ((status & CAM_AUTOSNS_VALID) == 0))
1843 have_sense = FALSE;
1844 else
1845 have_sense = TRUE;
1846
1847 if (have_sense) {
1848 sense = &csio->sense_data;
1849 scsi_extract_sense(sense, &error_code,
1850 &sense_key,
1851 &asc, &ascq);
1852 }
1853 /*
1854 * Attach to anything that claims to be a
1855 * direct access or optical disk device,
1856 * as long as it doesn't return a "Logical
1857 * unit not supported" (0x25) error.
1858 */
1859 if ((have_sense) && (asc != 0x25)
b05e84c9
PA
1860 && (error_code == SSD_CURRENT_ERROR)) {
1861 const char *sense_key_desc;
1862 const char *asc_desc;
1863
1864 scsi_sense_desc(sense_key, asc, ascq,
1865 &cgd.inq_data,
1866 &sense_key_desc,
1867 &asc_desc);
f8c7a42d 1868 ksnprintf(announce_buf,
984263bc
MD
1869 sizeof(announce_buf),
1870 "Attempt to query device "
1871 "size failed: %s, %s",
b05e84c9
PA
1872 sense_key_desc,
1873 asc_desc);
f7b26992 1874 info.d_media_blksize = 512;
2ea825fb 1875 doinfo = 1;
b05e84c9 1876 } else {
984263bc
MD
1877 if (have_sense)
1878 scsi_sense_print(
1879 &done_ccb->csio);
1880 else {
1c8b7a9a
PA
1881 xpt_print(periph->path,
1882 "got CAM status %#x\n",
1883 done_ccb->ccb_h.status);
984263bc
MD
1884 }
1885
1c8b7a9a
PA
1886 xpt_print(periph->path, "fatal error, "
1887 "failed to attach to device\n");
984263bc
MD
1888
1889 /*
1890 * Free up resources.
1891 */
1892 cam_periph_invalidate(periph);
1893 }
1894 }
1895 }
1c8b7a9a 1896 kfree(csio->data_ptr, M_SCSIDA);
62ade751 1897 if (announce_buf[0] != '\0') {
984263bc 1898 xpt_announce_periph(periph, announce_buf);
62ade751
MD
1899 /*
1900 * Create our sysctl variables, now that we know
1901 * we have successfully attached.
1902 */
b3504e03
JH
1903 taskqueue_enqueue(taskqueue_thread[mycpuid],
1904 &softc->sysctl_task);
62ade751 1905 }
e0fb398b
T
1906
1907 if (softc->trim_max_ranges) {
1908 softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1909 kprintf("%s%d: supports TRIM\n",
1910 periph->periph_name,
1911 periph->unit_number);
1912 }
b05e84c9 1913 softc->state = DA_STATE_NORMAL;
984263bc
MD
1914 /*
1915 * Since our peripheral may be invalidated by an error
1916 * above or an external event, we must release our CCB
1917 * before releasing the probe lock on the peripheral.
1918 * The peripheral will only go away once the last lock
1919 * is removed, and we need it around for the CCB release
1920 * operation.
1921 */
1922 xpt_release_ccb(done_ccb);
2d19cdd3 1923 cam_periph_unhold(periph, 0);
2ea825fb
MD
1924 if (doinfo) {
1925 CAM_SIM_UNLOCK(periph->sim);
1926 disk_setdiskinfo(&softc->disk, &info);
1927 CAM_SIM_LOCK(periph->sim);
1928 }
984263bc
MD
1929 return;
1930 }
1931 case DA_CCB_WAITING:
1932 {
1933 /* Caller will release the CCB */
1934 wakeup(&done_ccb->ccb_h.cbfcnp);
1935 return;
1936 }
1937 case DA_CCB_DUMP:
1938 /* No-op. We're polling */
1939 return;
a9f09b75
MD
1940 case DA_CCB_POLLED:
1941 /* Caller releases ccb */
1942 wakeup(&done_ccb->ccb_h.cbfcnp);
1943 return;
984263bc
MD
1944 default:
1945 break;
1946 }
1947 xpt_release_ccb(done_ccb);
1948}
1949
1950static int
1951daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1952{
1953 struct da_softc *softc;
1954 struct cam_periph *periph;
62ade751 1955 int error;
984263bc
MD
1956
1957 periph = xpt_path_periph(ccb->ccb_h.path);
1958 softc = (struct da_softc *)periph->softc;
1959
1960 /*
1961 * Automatically detect devices that do not support
1962 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
1963 */
1964 error = 0;
62ade751
MD
1965 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
1966 error = cmd6workaround(ccb);
1967 } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
1968 CAM_SCSI_STATUS_ERROR)
1969 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
1970 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
1971 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
1972 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
1973 int sense_key, error_code, asc, ascq;
1974
984263bc
MD
1975 scsi_extract_sense(&ccb->csio.sense_data,
1976 &error_code, &sense_key, &asc, &ascq);
1977 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
1978 error = cmd6workaround(ccb);
1979 }
1980 if (error == ERESTART)
1981 return (ERESTART);
1982
1983 /*
1984 * XXX
1985 * Until we have a better way of doing pack validation,
1986 * don't treat UAs as errors.
1987 */
1988 sense_flags |= SF_RETRY_UA;
1989 return(cam_periph_error(ccb, cam_flags, sense_flags,
1990 &softc->saved_ccb));
1991}
1992
1993static void
1994daprevent(struct cam_periph *periph, int action)
1995{
1996 struct da_softc *softc;
1997 union ccb *ccb;
1998 int error;
1999
2000 softc = (struct da_softc *)periph->softc;
2001
2002 if (((action == PR_ALLOW)
2003 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2004 || ((action == PR_PREVENT)
2005 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2006 return;
2007 }
2008
2009 ccb = cam_periph_getccb(periph, /*priority*/1);
a9f09b75 2010 ccb->ccb_h.ccb_state = DA_CCB_POLLED;
984263bc
MD
2011
2012 scsi_prevent(&ccb->csio,
2013 /*retries*/1,
2014 /*cbcfp*/dadone,
2015 MSG_SIMPLE_Q_TAG,
2016 action,
2017 SSD_FULL_SIZE,
2018 5000);
2019
3f499af5
PA
2020 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2021 SF_RETRY_UA, &softc->device_stats);
984263bc
MD
2022
2023 if (error == 0) {
2024 if (action == PR_ALLOW)
2025 softc->flags &= ~DA_FLAG_PACK_LOCKED;
2026 else
2027 softc->flags |= DA_FLAG_PACK_LOCKED;
2028 }
2029
2030 xpt_release_ccb(ccb);
2031}
2032
f7b26992
MD
2033/*
2034 * Check media on open, e.g. card reader devices which had no initial media.
2035 */
2036static int
2037dacheckmedia(struct cam_periph *periph)
2038{
2039 struct disk_params *dp;
2040 struct da_softc *softc;
2041 struct disk_info info;
2042 int error;
2043
2044 softc = (struct da_softc *)periph->softc;
2045 dp = &softc->params;
2046
2047 error = dagetcapacity(periph);
2048
2049 /*
2050 * Only reprobe on initial open and if the media is removable.
9670bdda
MD
2051 *
2052 * NOTE: If we setdiskinfo() it will take the device probe
2053 * a bit of time to probe the slices and partitions,
2054 * and mess up booting. So avoid if nothing has changed.
2055 * XXX
f7b26992
MD
2056 */
2057 if (softc->flags & DA_FLAG_OPEN)
2058 return (error);
2059 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2060 return (error);
2061
2062 bzero(&info, sizeof(info));
2063 info.d_type = DTYPE_SCSI;
2064 info.d_serialno = xpt_path_serialno(periph->path);
2065
2066 if (error == 0) {
f7b26992
MD
2067 CAM_SIM_UNLOCK(periph->sim);
2068 info.d_media_blksize = softc->params.secsize;
2069 info.d_media_blocks = softc->params.sectors;
2070 info.d_media_size = 0;
2071 info.d_secpertrack = softc->params.secs_per_track;
2072 info.d_nheads = softc->params.heads;
2073 info.d_ncylinders = softc->params.cylinders;
2074 info.d_secpercyl = softc->params.heads *
2075 softc->params.secs_per_track;
2076 info.d_serialno = xpt_path_serialno(periph->path);
9670bdda
MD
2077 if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2078 kprintf("%s%d: open removable media: "
2079 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2080 periph->periph_name, periph->unit_number,
2081 (uintmax_t)(((uintmax_t)dp->secsize *
2082 dp->sectors) / (1024*1024)),
2083 (uintmax_t)dp->sectors, dp->secsize,
2084 dp->heads, dp->secs_per_track, dp->cylinders);
2085 disk_setdiskinfo(&softc->disk, &info);
2086 }
f7b26992
MD
2087 CAM_SIM_LOCK(periph->sim);
2088 } else {
2089 kprintf("%s%d: open removable media: no media present\n",
2090 periph->periph_name, periph->unit_number);
2091 info.d_media_blksize = 512;
2092 disk_setdiskinfo(&softc->disk, &info);
2093 }
2094 return (error);
2095}
2096
bdd58e03
MD
2097static int
2098dagetcapacity(struct cam_periph *periph)
2099{
2100 struct da_softc *softc;
2101 union ccb *ccb;
2102 struct scsi_read_capacity_data *rcap;
0b0362e1 2103 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03
MD
2104 uint32_t block_len;
2105 uint64_t maxsector;
2106 int error;
2107
2108 softc = (struct da_softc *)periph->softc;
2109 block_len = 0;
2110 maxsector = 0;
2111 error = 0;
2112
2113 /* Do a read capacity */
1c8b7a9a
PA
2114 rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2115 M_SCSIDA, M_INTWAIT);
bdd58e03
MD
2116
2117 ccb = cam_periph_getccb(periph, /*priority*/1);
a9f09b75
MD
2118 ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2119
bdd58e03
MD
2120 scsi_read_capacity(&ccb->csio,
2121 /*retries*/4,
2122 /*cbfncp*/dadone,
2123 MSG_SIMPLE_Q_TAG,
2124 rcap,
2125 SSD_FULL_SIZE,
2126 /*timeout*/60000);
2127 ccb->ccb_h.ccb_bio = NULL;
2128
2129 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2130 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2131 /*sense_flags*/SF_RETRY_UA,
2132 &softc->device_stats);
2133
2134 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2135 cam_release_devq(ccb->ccb_h.path,
2136 /*relsim_flags*/0,
2137 /*reduction*/0,
2138 /*timeout*/0,
2139 /*getcount_only*/0);
2140
2141 if (error == 0) {
2142 block_len = scsi_4btoul(rcap->length);
2143 maxsector = scsi_4btoul(rcap->addr);
2144
2145 if (maxsector != 0xffffffff)
2146 goto done;
2147 } else
2148 goto done;
2149
0b0362e1 2150 rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
bdd58e03
MD
2151
2152 scsi_read_capacity_16(&ccb->csio,
2153 /*retries*/ 4,
2154 /*cbfcnp*/ dadone,
2155 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2156 /*lba*/ 0,
2157 /*reladr*/ 0,
2158 /*pmi*/ 0,
2159 rcaplong,
2160 /*sense_len*/ SSD_FULL_SIZE,
2161 /*timeout*/ 60000);
2162 ccb->ccb_h.ccb_bio = NULL;
2163
2164 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2165 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2166 /*sense_flags*/SF_RETRY_UA,
2167 &softc->device_stats);
2168
2169 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2170 cam_release_devq(ccb->ccb_h.path,
2171 /*relsim_flags*/0,
2172 /*reduction*/0,
2173 /*timeout*/0,
2174 /*getcount_only*/0);
2175
2176 if (error == 0) {
2177 block_len = scsi_4btoul(rcaplong->length);
2178 maxsector = scsi_8btou64(rcaplong->addr);
2179 }
2180
2181done:
2182
2183 if (error == 0)
2184 dasetgeom(periph, block_len, maxsector);
2185
2186 xpt_release_ccb(ccb);
2187
1c8b7a9a 2188 kfree(rcap, M_SCSIDA);
bdd58e03
MD
2189
2190 return (error);
2191}
2192
984263bc 2193static void
bdd58e03 2194dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
984263bc
MD
2195{
2196 struct ccb_calc_geometry ccg;
2197 struct da_softc *softc;
2198 struct disk_params *dp;
2199
2200 softc = (struct da_softc *)periph->softc;
2201
2202 dp = &softc->params;
bdd58e03
MD
2203 dp->secsize = block_len;
2204 dp->sectors = maxsector + 1;
984263bc
MD
2205 /*
2206 * Have the controller provide us with a geometry
2207 * for this disk. The only time the geometry
2208 * matters is when we boot and the controller
2209 * is the only one knowledgeable enough to come
2210 * up with something that will make this a bootable
2211 * device.
2212 */
2213 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
2214 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
2215 ccg.block_size = dp->secsize;
2216 ccg.volume_size = dp->sectors;
2217 ccg.heads = 0;
2218 ccg.secs_per_track = 0;
2219 ccg.cylinders = 0;
2220 xpt_action((union ccb*)&ccg);
eac73adf
PA
2221 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2222 /*
2223 * We don't know what went wrong here- but just pick
2224 * a geometry so we don't have nasty things like divide
2225 * by zero.
2226 */
2227 dp->heads = 255;
2228 dp->secs_per_track = 255;
2229 dp->cylinders = dp->sectors / (255 * 255);
2230 if (dp->cylinders == 0) {
2231 dp->cylinders = 1;
2232 }
2233 } else {
2234 dp->heads = ccg.heads;
2235 dp->secs_per_track = ccg.secs_per_track;
2236 dp->cylinders = ccg.cylinders;
2237 }
984263bc
MD
2238}
2239
984263bc
MD
2240/*
2241 * Step through all DA peripheral drivers, and if the device is still open,
2242 * sync the disk cache to physical media.
2243 */
2244static void
2245dashutdown(void * arg, int howto)
2246{
2247 struct cam_periph *periph;
2248 struct da_softc *softc;
2249
234289a4 2250 TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
984263bc 2251 union ccb ccb;
234289a4 2252
1c8b7a9a 2253 cam_periph_lock(periph);
984263bc
MD
2254 softc = (struct da_softc *)periph->softc;
2255
2256 /*
2257 * We only sync the cache if the drive is still open, and
2258 * if the drive is capable of it..
2259 */
2260 if (((softc->flags & DA_FLAG_OPEN) == 0)
1c8b7a9a
PA
2261 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2262 cam_periph_unlock(periph);
984263bc 2263 continue;
1c8b7a9a 2264 }
984263bc
MD
2265
2266 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
2267
2268 ccb.ccb_h.ccb_state = DA_CCB_DUMP;
2269 scsi_synchronize_cache(&ccb.csio,
2270 /*retries*/1,
2271 /*cbfcnp*/dadone,
2272 MSG_SIMPLE_Q_TAG,
2273 /*begin_lba*/0, /* whole disk */
2274 /*lb_count*/0,
2275 SSD_FULL_SIZE,
19a136fb 2276 60 * 60 * 1000);
984263bc
MD
2277
2278 xpt_polled_action(&ccb);
2279
2280 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2281 if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
2282 CAM_SCSI_STATUS_ERROR)
2283 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2284 int error_code, sense_key, asc, ascq;
2285
2286 scsi_extract_sense(&ccb.csio.sense_data,
2287 &error_code, &sense_key,
2288 &asc, &ascq);
2289
2290 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2291 scsi_sense_print(&ccb.csio);
2292 } else {
1c8b7a9a
PA
2293 xpt_print(periph->path, "Synchronize "
2294 "cache failed, status == 0x%x, scsi status "
2295 "== 0x%x\n", ccb.ccb_h.status,
2296 ccb.csio.scsi_status);
984263bc
MD
2297 }
2298 }
2299
2300 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
2301 cam_release_devq(ccb.ccb_h.path,
2302 /*relsim_flags*/0,
2303 /*reduction*/0,
2304 /*timeout*/0,
2305 /*getcount_only*/0);
2306
1c8b7a9a 2307 cam_periph_unlock(periph);
984263bc
MD
2308 }
2309}
2310
2311#else /* !_KERNEL */
2312
2313/*
2314 * XXX This is only left out of the kernel build to silence warnings. If,
2315 * for some reason this function is used in the kernel, the ifdefs should
2316 * be moved so it is included both in the kernel and userland.
2317 */
2318void
2319scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2320 void (*cbfcnp)(struct cam_periph *, union ccb *),
2321 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2322 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2323 u_int32_t timeout)
2324{
2325 struct scsi_format_unit *scsi_cmd;
2326
2327 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2328 scsi_cmd->opcode = FORMAT_UNIT;
2329 scsi_cmd->byte2 = byte2;
2330 scsi_ulto2b(ileave, scsi_cmd->interleave);
2331
2332 cam_fill_csio(csio,
2333 retries,
2334 cbfcnp,
2335 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2336 tag_action,
2337 data_ptr,
2338 dxfer_len,
2339 sense_len,
2340 sizeof(*scsi_cmd),
2341 timeout);
2342}
2343
2344#endif /* _KERNEL */