Move the IOCTLTRIM ioctl to a better header and rename it to DAIOCTRIM.
[dragonfly.git] / sys / bus / cam / scsi / scsi_da.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
62ade751 28 * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
984263bc
MD
29 */
30
984263bc
MD
31#include <sys/param.h>
32
33#ifdef _KERNEL
684a93c4 34
984263bc
MD
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/buf.h>
38#include <sys/sysctl.h>
62ade751 39#include <sys/taskqueue.h>
1c8b7a9a 40#include <sys/lock.h>
984263bc 41#include <sys/conf.h>
1c8b7a9a 42#include <sys/devicestat.h>
984263bc 43#include <sys/disk.h>
55a78310 44#include <sys/dtype.h>
984263bc
MD
45#include <sys/eventhandler.h>
46#include <sys/malloc.h>
47#include <sys/cons.h>
3020e3be 48#include <sys/proc.h>
e0fb398b 49#include <sys/ioctl_compat.h>
684a93c4 50
3020e3be 51#include <sys/buf2.h>
4e01b467 52#include <sys/thread2.h>
684a93c4 53
1c8b7a9a 54#endif /* _KERNEL */
984263bc 55
05220613 56#ifdef _KERNEL
984263bc 57#include <vm/pmap.h>
05220613 58#endif
984263bc
MD
59
60#ifndef _KERNEL
61#include <stdio.h>
62#include <string.h>
63#endif /* _KERNEL */
64
55230951 65#include <sys/camlib.h>
1f2de5d4
MD
66#include "../cam.h"
67#include "../cam_ccb.h"
68#include "../cam_extend.h"
69#include "../cam_periph.h"
70#include "../cam_xpt_periph.h"
1c8b7a9a 71#include "../cam_sim.h"
984263bc 72
32506cfa 73#include "scsi_daio.h"
1f2de5d4 74#include "scsi_message.h"
984263bc
MD
75
76#ifndef _KERNEL
1f2de5d4 77#include "scsi_da.h"
984263bc
MD
78#endif /* !_KERNEL */
79
80#ifdef _KERNEL
81typedef enum {
82 DA_STATE_PROBE,
bdd58e03 83 DA_STATE_PROBE2,
984263bc
MD
84 DA_STATE_NORMAL
85} da_state;
86
87typedef enum {
88 DA_FLAG_PACK_INVALID = 0x001,
89 DA_FLAG_NEW_PACK = 0x002,
90 DA_FLAG_PACK_LOCKED = 0x004,
91 DA_FLAG_PACK_REMOVABLE = 0x008,
92 DA_FLAG_TAGGED_QUEUING = 0x010,
984263bc 93 DA_FLAG_RETRY_UA = 0x080,
62ade751 94 DA_FLAG_OPEN = 0x100,
af0aa0ac
MD
95 DA_FLAG_SCTX_INIT = 0x200,
96 DA_FLAG_RD_LIMIT = 0x400,
e0fb398b
T
97 DA_FLAG_WR_LIMIT = 0x800,
98 DA_FLAG_CAN_TRIM = 0x1000
984263bc
MD
99} da_flags;
100
101typedef enum {
102 DA_Q_NONE = 0x00,
103 DA_Q_NO_SYNC_CACHE = 0x01,
62ade751
MD
104 DA_Q_NO_6_BYTE = 0x02,
105 DA_Q_NO_PREVENT = 0x04
984263bc
MD
106} da_quirks;
107
108typedef enum {
a9f09b75 109 DA_CCB_POLLED = 0x00,
984263bc 110 DA_CCB_PROBE = 0x01,
bdd58e03
MD
111 DA_CCB_PROBE2 = 0x02,
112 DA_CCB_BUFFER_IO = 0x03,
113 DA_CCB_WAITING = 0x04,
114 DA_CCB_DUMP = 0x05,
e0fb398b 115 DA_CCB_TRIM = 0x06,
984263bc
MD
116 DA_CCB_TYPE_MASK = 0x0F,
117 DA_CCB_RETRY_UA = 0x10
118} da_ccb_state;
119
120/* Offsets into our private area for storing information */
121#define ccb_state ppriv_field0
81b5c339 122#define ccb_bio ppriv_ptr1
984263bc
MD
123
124struct disk_params {
125 u_int8_t heads;
bdd58e03 126 u_int32_t cylinders;
984263bc
MD
127 u_int8_t secs_per_track;
128 u_int32_t secsize; /* Number of bytes/sector */
bdd58e03 129 u_int64_t sectors; /* total number sectors */
984263bc
MD
130};
131
e0fb398b
T
132#define TRIM_MAX_BLOCKS 8
133#define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
134struct trim_request {
135 uint8_t data[TRIM_MAX_RANGES * 8];
136 struct bio *bios[TRIM_MAX_RANGES];
137};
138
984263bc 139struct da_softc {
af0aa0ac
MD
140 struct bio_queue_head bio_queue_rd;
141 struct bio_queue_head bio_queue_wr;
e0fb398b 142 struct bio_queue_head bio_queue_trim;
984263bc
MD
143 struct devstat device_stats;
144 SLIST_ENTRY(da_softc) links;
145 LIST_HEAD(, ccb_hdr) pending_ccbs;
146 da_state state;
147 da_flags flags;
148 da_quirks quirks;
149 int minimum_cmd_size;
af0aa0ac
MD
150 int outstanding_cmds_rd;
151 int outstanding_cmds_wr;
e0fb398b
T
152 int trim_max_ranges;
153 int trim_running;
154 int trim_enabled;
984263bc
MD
155 struct disk_params params;
156 struct disk disk;
157 union ccb saved_ccb;
62ade751
MD
158 struct task sysctl_task;
159 struct sysctl_ctx_list sysctl_ctx;
160 struct sysctl_oid *sysctl_tree;
e0fb398b 161 struct trim_request trim_req;
984263bc
MD
162};
163
164struct da_quirk_entry {
165 struct scsi_inquiry_pattern inq_pat;
166 da_quirks quirks;
167};
168
169static const char quantum[] = "QUANTUM";
170static const char microp[] = "MICROP";
171
172static struct da_quirk_entry da_quirk_table[] =
173{
62ade751 174 /* SPI, FC devices */
984263bc
MD
175 {
176 /*
177 * Fujitsu M2513A MO drives.
178 * Tested devices: M2513A2 firmware versions 1200 & 1300.
179 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
180 * Reported by: W.Scholten <whs@xs4all.nl>
181 */
182 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
183 /*quirks*/ DA_Q_NO_SYNC_CACHE
184 },
185 {
186 /* See above. */
187 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
188 /*quirks*/ DA_Q_NO_SYNC_CACHE
189 },
190 {
191 /*
192 * This particular Fujitsu drive doesn't like the
193 * synchronize cache command.
194 * Reported by: Tom Jackson <toj@gorilla.net>
195 */
196 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
197 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
198 },
199 {
200 /*
201 * This drive doesn't like the synchronize cache command
202 * either. Reported by: Matthew Jacob <mjacob@feral.com>
203 * in NetBSD PR kern/6027, August 24, 1998.
204 */
205 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
206 /*quirks*/ DA_Q_NO_SYNC_CACHE
207 },
208 {
209 /*
210 * This drive doesn't like the synchronize cache command
211 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
212 * (PR 8882).
213 */
214 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
215 /*quirks*/ DA_Q_NO_SYNC_CACHE
216 },
217 {
218 /*
219 * Doesn't like the synchronize cache command.
220 * Reported by: Blaz Zupan <blaz@gold.amis.net>
221 */
222 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
223 /*quirks*/ DA_Q_NO_SYNC_CACHE
224 },
225 {
226 /*
227 * Doesn't like the synchronize cache command.
d92d7552 228 * Reported by: Blaz Zupan <blaz@gold.amis.net>
984263bc
MD
229 */
230 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
231 /*quirks*/ DA_Q_NO_SYNC_CACHE
232 },
233 {
234 /*
235 * Doesn't like the synchronize cache command.
236 */
237 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
238 /*quirks*/ DA_Q_NO_SYNC_CACHE
239 },
d92d7552
PA
240 {
241 /*
242 * Doesn't like the synchronize cache command.
243 * Reported by: walter@pelissero.de
244 */
245 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
246 /*quirks*/ DA_Q_NO_SYNC_CACHE
247 },
984263bc
MD
248 {
249 /*
250 * Doesn't work correctly with 6 byte reads/writes.
251 * Returns illegal request, and points to byte 9 of the
252 * 6-byte CDB.
253 * Reported by: Adam McDougall <bsdx@spawnet.com>
254 */
255 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
256 /*quirks*/ DA_Q_NO_6_BYTE
257 },
258 {
62ade751 259 /* See above. */
984263bc
MD
260 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
261 /*quirks*/ DA_Q_NO_6_BYTE
262 },
984263bc
MD
263 {
264 /*
d92d7552
PA
265 * Doesn't like the synchronize cache command.
266 * Reported by: walter@pelissero.de
984263bc 267 */
d92d7552 268 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
62ade751 269 /*quirks*/ DA_Q_NO_SYNC_CACHE
984263bc
MD
270 },
271 {
d92d7552
PA
272 /*
273 * The CISS RAID controllers do not support SYNC_CACHE
274 */
275 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
62ade751
MD
276 /*quirks*/ DA_Q_NO_SYNC_CACHE
277 },
a4a9ba75
SW
278 {
279 /*
280 * The same goes for the mly(4) controllers
281 */
282 {T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
283 /*quirks*/ DA_Q_NO_SYNC_CACHE
284 },
a9453758
MD
285 /*
286 * USB mass storage devices supported by umass(4)
287 *
288 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
289 * it does not have to be specified here.
290 */
d92d7552
PA
291 {
292 /*
293 * Creative Nomad MUVO mp3 player (USB)
294 * PR: kern/53094
295 */
296 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
a9453758 297 /*quirks*/ DA_Q_NO_PREVENT
d92d7552 298 },
984263bc
MD
299 {
300 /*
d92d7552
PA
301 * Sigmatel USB Flash MP3 Player
302 * PR: kern/57046
984263bc 303 */
d92d7552 304 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
a9453758 305 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
306 },
307 {
308 /*
d92d7552
PA
309 * SEAGRAND NP-900 MP3 Player
310 * PR: kern/64563
984263bc 311 */
d92d7552 312 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
a9453758 313 /*quirks*/ DA_Q_NO_PREVENT
984263bc
MD
314 },
315 {
316 /*
d92d7552
PA
317 * Creative MUVO Slim mp3 player (USB)
318 * PR: usb/86131
984263bc 319 */
d92d7552 320 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
a9453758 321 "*"}, /*quirks*/ DA_Q_NO_PREVENT
d92d7552
PA
322 },
323 {
324 /*
325 * Philips USB Key Audio KEY013
326 * PR: usb/68412
327 */
328 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
a9453758 329 /*quirks*/ DA_Q_NO_PREVENT
285d490c 330 },
984263bc
MD
331};
332
333static d_open_t daopen;
334static d_close_t daclose;
335static d_strategy_t dastrategy;
984263bc 336static d_dump_t dadump;
e0fb398b 337static d_ioctl_t daioctl;
984263bc
MD
338static periph_init_t dainit;
339static void daasync(void *callback_arg, u_int32_t code,
340 struct cam_path *path, void *arg);
62ade751 341static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
984263bc
MD
342static periph_ctor_t daregister;
343static periph_dtor_t dacleanup;
344static periph_start_t dastart;
345static periph_oninv_t daoninvalidate;
346static void dadone(struct cam_periph *periph,
347 union ccb *done_ccb);
348static int daerror(union ccb *ccb, u_int32_t cam_flags,
349 u_int32_t sense_flags);
350static void daprevent(struct cam_periph *periph, int action);
bdd58e03 351static int dagetcapacity(struct cam_periph *periph);
f7b26992 352static int dacheckmedia(struct cam_periph *periph);
bdd58e03
MD
353static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
354 uint64_t maxsector);
af0aa0ac 355static void daflushbioq(struct bio_queue_head *bioq, int error);
984263bc
MD
356static void dashutdown(void *arg, int howto);
357
358#ifndef DA_DEFAULT_TIMEOUT
359#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
360#endif
361
362#ifndef DA_DEFAULT_RETRY
363#define DA_DEFAULT_RETRY 4
364#endif
365
366static int da_retry_count = DA_DEFAULT_RETRY;
367static int da_default_timeout = DA_DEFAULT_TIMEOUT;
984263bc 368
984263bc
MD
369SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
370 "CAM Direct Access Disk driver");
371SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
372 &da_retry_count, 0, "Normal I/O retry count");
62ade751 373TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
984263bc
MD
374SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
375 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
62ade751 376TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
984263bc
MD
377
378static struct periph_driver dadriver =
379{
380 dainit, "da",
381 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
382};
383
2ad14cb5 384PERIPHDRIVER_DECLARE(da, dadriver);
984263bc 385
fef8985e 386static struct dev_ops da_ops = {
b8e1d863 387 { "da", 0, D_DISK | D_MPSAFE },
fef8985e
MD
388 .d_open = daopen,
389 .d_close = daclose,
390 .d_read = physread,
391 .d_write = physwrite,
fef8985e 392 .d_strategy = dastrategy,
e0fb398b
T
393 .d_dump = dadump,
394 .d_ioctl = daioctl
984263bc
MD
395};
396
984263bc
MD
397static struct extend_array *daperiphs;
398
1c8b7a9a
PA
399MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
400
e0fb398b
T
401static int
402daioctl(struct dev_ioctl_args *ap)
403{
404 int unit;
405 int error = 0;
406 struct buf *bp;
407 struct cam_periph *periph;
408 int byte_count;
e0fb398b
T
409
410 off_t *del_num = (off_t*)ap->a_data;
411 off_t bytes_left;
412 off_t bytes_start;
413
414 cdev_t dev = ap->a_head.a_dev;
415
416
417 unit = dkunit(dev);
418 periph = cam_extend_get(daperiphs, unit);
419 if (periph == NULL)
420 return(ENXIO);
e0fb398b
T
421
422 switch (ap->a_cmd) {
423 case IOCTLTRIM:
32506cfa 424 case DAIOCTRIM:
e0fb398b
T
425 {
426
427 bytes_left = del_num[1];
428 bytes_start = del_num[0];
429
430 /* TRIM occurs on 512-byte sectors. */
431 KKASSERT((bytes_left % 512) == 0);
432 KKASSERT((bytes_start% 512) == 0);
433
434
435 /* Break TRIM up into int-sized commands because of b_bcount */
436 while(bytes_left) {
437
438 /*
439 * Rather than than squezing out more blocks in b_bcount
440 * and having to break up the TRIM request in da_start(),
441 * we ensure we can always TRIM this many bytes with one
442 * TRIM command (this happens if the device only
443 * supports one TRIM block).
444 *
445 * With min TRIM blksize of 1, TRIM command free
446 * 4194240 blks(64*65535): each LBA range can address
447 * 65535 blks and there 64 such ranges in a 512-byte
448 * block. And, 4194240 * 512 = 0x7FFF8000
449 *
450 */
451 byte_count = MIN(bytes_left,0x7FFF8000);
d2812084 452 bp = getnewbuf(0, 0, 0, 1);
e0fb398b
T
453
454 bp->b_cmd = BUF_CMD_FREEBLKS;
455 bp->b_bio1.bio_offset = bytes_start;
456 bp->b_bcount = byte_count;
457 bp->b_bio1.bio_flags |= BIO_SYNC;
458 bp->b_bio1.bio_done = biodone_sync;
459
460 dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
461
462 if (biowait(&bp->b_bio1, "TRIM")) {
463 kprintf("Error:%d\n", bp->b_error);
53005b09 464 brelse(bp);
e0fb398b
T
465 return(bp->b_error ? bp->b_error : EIO);
466 }
467 brelse(bp);
468 bytes_left -= byte_count;
469 bytes_start += byte_count;
470 }
471 break;
472 }
473 default:
474 return(EINVAL);
475 }
476
477 return(error);
478}
479
984263bc 480static int
fef8985e 481daopen(struct dev_open_args *ap)
984263bc 482{
b13267a5 483 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
484 struct cam_periph *periph;
485 struct da_softc *softc;
a688b15c 486 struct disk_info info;
984263bc 487 int unit;
984263bc 488 int error;
984263bc
MD
489
490 unit = dkunit(dev);
984263bc 491 periph = cam_extend_get(daperiphs, unit);
b05e84c9 492 if (periph == NULL) {
984263bc 493 return (ENXIO);
b05e84c9 494 }
984263bc 495
1c8b7a9a
PA
496 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
497 return(ENXIO);
498 }
499
500 cam_periph_lock(periph);
501 if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
502 cam_periph_unlock(periph);
503 cam_periph_release(periph);
504 return (error);
505 }
506
507 unit = periph->unit_number;
984263bc
MD
508 softc = (struct da_softc *)periph->softc;
509
510 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
9ece9268
PA
511 ("daopen: dev=%s (unit %d)\n", devtoname(dev),
512 unit));
984263bc 513
984263bc
MD
514 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
515 /* Invalidate our pack information. */
516 disk_invalidate(&softc->disk);
517 softc->flags &= ~DA_FLAG_PACK_INVALID;
518 }
984263bc 519
f7b26992
MD
520 error = dacheckmedia(periph);
521 softc->flags |= DA_FLAG_OPEN;
984263bc
MD
522
523 if (error == 0) {
524 struct ccb_getdev cgd;
525
a688b15c
MD
526 /* Build disk information structure */
527 bzero(&info, sizeof(info));
528 info.d_type = DTYPE_SCSI;
984263bc
MD
529
530 /*
531 * Grab the inquiry data to get the vendor and product names.
532 * Put them in the typename and packname for the label.
533 */
534 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
535 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
536 xpt_action((union ccb *)&cgd);
537
984263bc
MD
538 /*
539 * Check to see whether or not the blocksize is set yet.
540 * If it isn't, set it and then clear the blocksize
541 * unavailable flag for the device statistics.
542 */
543 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
544 softc->device_stats.block_size = softc->params.secsize;
545 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
546 }
547 }
548
b05e84c9 549 if (error == 0) {
62ade751
MD
550 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
551 (softc->quirks & DA_Q_NO_PREVENT) == 0)
b05e84c9
PA
552 daprevent(periph, PR_PREVENT);
553 } else {
984263bc
MD
554 softc->flags &= ~DA_FLAG_OPEN;
555 cam_periph_release(periph);
556 }
2d19cdd3 557 cam_periph_unhold(periph, 1);
984263bc
MD
558 return (error);
559}
560
561static int
fef8985e 562daclose(struct dev_close_args *ap)
984263bc 563{
b13267a5 564 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
565 struct cam_periph *periph;
566 struct da_softc *softc;
567 int unit;
568 int error;
569
570 unit = dkunit(dev);
571 periph = cam_extend_get(daperiphs, unit);
572 if (periph == NULL)
573 return (ENXIO);
574
1c8b7a9a
PA
575 cam_periph_lock(periph);
576 if ((error = cam_periph_hold(periph, 0)) != 0) {
577 cam_periph_unlock(periph);
578 cam_periph_release(periph);
579 return (error);
984263bc
MD
580 }
581
1c8b7a9a
PA
582 softc = (struct da_softc *)periph->softc;
583
984263bc
MD
584 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
585 union ccb *ccb;
586
587 ccb = cam_periph_getccb(periph, /*priority*/1);
a9f09b75 588 ccb->ccb_h.ccb_state = DA_CCB_POLLED;
984263bc
MD
589
590 scsi_synchronize_cache(&ccb->csio,
591 /*retries*/1,
592 /*cbfcnp*/dadone,
593 MSG_SIMPLE_Q_TAG,
594 /*begin_lba*/0,/* Cover the whole disk */
595 /*lb_count*/0,
596 SSD_FULL_SIZE,
597 5 * 60 * 1000);
598
599 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
600 /*sense_flags*/SF_RETRY_UA,
601 &softc->device_stats);
602
603 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
604 if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
605 CAM_SCSI_STATUS_ERROR) {
606 int asc, ascq;
607 int sense_key, error_code;
608
609 scsi_extract_sense(&ccb->csio.sense_data,
610 &error_code,
611 &sense_key,
612 &asc, &ascq);
613 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
614 scsi_sense_print(&ccb->csio);
615 } else {
1c8b7a9a
PA
616 xpt_print(periph->path, "Synchronize cache "
617 "failed, status == 0x%x, scsi status == "
618 "0x%x\n", ccb->csio.ccb_h.status,
619 ccb->csio.scsi_status);
984263bc
MD
620 }
621 }
622
623 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
624 cam_release_devq(ccb->ccb_h.path,
625 /*relsim_flags*/0,
626 /*reduction*/0,
627 /*timeout*/0,
628 /*getcount_only*/0);
629
630 xpt_release_ccb(ccb);
631
632 }
633
634 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
62ade751
MD
635 if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
636 daprevent(periph, PR_ALLOW);
984263bc
MD
637 /*
638 * If we've got removeable media, mark the blocksize as
639 * unavailable, since it could change when new media is
640 * inserted.
641 */
642 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
643 }
644
fca0fce6
MD
645 /*
646 * Don't compound any ref counting software bugs with more.
647 */
648 if (softc->flags & DA_FLAG_OPEN) {
649 softc->flags &= ~DA_FLAG_OPEN;
650 cam_periph_release(periph);
651 } else {
1c8b7a9a
PA
652 xpt_print(periph->path,
653 "daclose() called on an already closed device!\n");
fca0fce6 654 }
2d19cdd3 655 cam_periph_unhold(periph, 1);
984263bc
MD
656 return (0);
657}
658
659/*
660 * Actually translate the requested transfer into one the physical driver
661 * can understand. The transfer is described by a buf and will include
662 * only one physical transfer.
663 */
fef8985e
MD
664static int
665dastrategy(struct dev_strategy_args *ap)
984263bc 666{
b13267a5 667 cdev_t dev = ap->a_head.a_dev;
fef8985e 668 struct bio *bio = ap->a_bio;
81b5c339 669 struct buf *bp = bio->bio_buf;
984263bc
MD
670 struct cam_periph *periph;
671 struct da_softc *softc;
672 u_int unit;
984263bc 673
81b5c339 674 unit = dkunit(dev);
984263bc
MD
675 periph = cam_extend_get(daperiphs, unit);
676 if (periph == NULL) {
677 bp->b_error = ENXIO;
678 goto bad;
679 }
680 softc = (struct da_softc *)periph->softc;
1c8b7a9a
PA
681
682 cam_periph_lock(periph);
683
984263bc
MD
684#if 0
685 /*
686 * check it's not too big a transfer for our adapter
687 */
81b5c339 688 scsi_minphys(bp, &sd_switch);
984263bc
MD
689#endif
690
691 /*
692 * Mask interrupts so that the pack cannot be invalidated until
693 * after we are in the queue. Otherwise, we might not properly
694 * clean up one of the buffers.
695 */
984263bc
MD
696
697 /*
698 * If the device has been made invalid, error out
699 */
700 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1c8b7a9a 701 cam_periph_unlock(periph);
984263bc
MD
702 bp->b_error = ENXIO;
703 goto bad;
704 }
705
706 /*
707 * Place it in the queue of disk activities for this disk
708 */
af0aa0ac 709 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
c6cad506 710 bioqdisksort(&softc->bio_queue_wr, bio);
e0fb398b 711 else if (bp->b_cmd == BUF_CMD_FREEBLKS)
c6cad506 712 bioqdisksort(&softc->bio_queue_trim, bio);
af0aa0ac 713 else
c6cad506 714 bioqdisksort(&softc->bio_queue_rd, bio);
984263bc
MD
715
716 /*
717 * Schedule ourselves for performing the work.
718 */
719 xpt_schedule(periph, /* XXX priority */1);
1c8b7a9a 720 cam_periph_unlock(periph);
984263bc 721
fef8985e 722 return(0);
984263bc
MD
723bad:
724 bp->b_flags |= B_ERROR;
725
726 /*
727 * Correctly set the buf to indicate a completed xfer
728 */
729 bp->b_resid = bp->b_bcount;
81b5c339 730 biodone(bio);
fef8985e 731 return(0);
984263bc
MD
732}
733
984263bc 734static int
fef8985e 735dadump(struct dev_dump_args *ap)
984263bc 736{
b13267a5 737 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
738 struct cam_periph *periph;
739 struct da_softc *softc;
740 u_int unit;
b24cd69c 741 u_int32_t secsize;
984263bc 742 struct ccb_scsiio csio;
984263bc
MD
743
744 unit = dkunit(dev);
984263bc 745 periph = cam_extend_get(daperiphs, unit);
b24cd69c 746 if (periph == NULL)
984263bc 747 return (ENXIO);
b24cd69c 748
984263bc 749 softc = (struct da_softc *)periph->softc;
1c8b7a9a 750 cam_periph_lock(periph);
b24cd69c
AH
751 secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
752
1c8b7a9a
PA
753 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
754 cam_periph_unlock(periph);
984263bc 755 return (ENXIO);
1c8b7a9a 756 }
984263bc 757
b24cd69c
AH
758 /*
759 * because length == 0 means we are supposed to flush cache, we only
760 * try to write something if length > 0.
761 */
762 if (ap->a_length > 0) {
984263bc 763 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
0b0362e1 764 csio.ccb_h.flags |= CAM_POLLED;
984263bc
MD
765 csio.ccb_h.ccb_state = DA_CCB_DUMP;
766 scsi_read_write(&csio,
767 /*retries*/1,
768 dadone,
769 MSG_ORDERED_Q_TAG,
770 /*read*/FALSE,
771 /*byte2*/0,
772 /*minimum_cmd_size*/ softc->minimum_cmd_size,
b24cd69c
AH
773 ap->a_offset / secsize,
774 ap->a_length / secsize,
775 /*data_ptr*/(u_int8_t *) ap->a_virtual,
776 /*dxfer_len*/ap->a_length,
984263bc
MD
777 /*sense_len*/SSD_FULL_SIZE,
778 DA_DEFAULT_TIMEOUT * 1000);
779 xpt_polled_action((union ccb *)&csio);
780
781 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
85f8e2ea 782 kprintf("Aborting dump due to I/O error.\n");
984263bc
MD
783 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
784 CAM_SCSI_STATUS_ERROR)
785 scsi_sense_print(&csio);
786 else
85f8e2ea 787 kprintf("status == 0x%x, scsi status == 0x%x\n",
984263bc
MD
788 csio.ccb_h.status, csio.scsi_status);
789 return(EIO);
790 }
b24cd69c
AH
791 cam_periph_unlock(periph);
792 return 0;
984263bc
MD
793 }
794
795 /*
796 * Sync the disk cache contents to the physical media.
797 */
798 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
799
800 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
801 csio.ccb_h.ccb_state = DA_CCB_DUMP;
802 scsi_synchronize_cache(&csio,
803 /*retries*/1,
804 /*cbfcnp*/dadone,
805 MSG_SIMPLE_Q_TAG,
806 /*begin_lba*/0,/* Cover the whole disk */
807 /*lb_count*/0,
808 SSD_FULL_SIZE,
809 5 * 60 * 1000);
810 xpt_polled_action((union ccb *)&csio);
811
812 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
813 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
814 CAM_SCSI_STATUS_ERROR) {
815 int asc, ascq;
816 int sense_key, error_code;
817
818 scsi_extract_sense(&csio.sense_data,
819 &error_code,
820 &sense_key,
821 &asc, &ascq);
822 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
823 scsi_sense_print(&csio);
824 } else {
1c8b7a9a
PA
825 xpt_print(periph->path, "Synchronize cache "
826 "failed, status == 0x%x, scsi status == "
827 "0x%x\n", csio.ccb_h.status,
828 csio.scsi_status);
984263bc
MD
829 }
830 }
831 }
1c8b7a9a 832 cam_periph_unlock(periph);
984263bc
MD
833 return (0);
834}
835
836static void
837dainit(void)
838{
839 cam_status status;
984263bc
MD
840
841 /*
842 * Create our extend array for storing the devices we attach to.
843 */
844 daperiphs = cam_extend_new();
984263bc 845 if (daperiphs == NULL) {
85f8e2ea 846 kprintf("da: Failed to alloc extend array!\n");
984263bc
MD
847 return;
848 }
3690a379 849
984263bc
MD
850 /*
851 * Install a global async callback. This callback will
852 * receive async callbacks like "new device found".
853 */
1c8b7a9a 854 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
984263bc
MD
855
856 if (status != CAM_REQ_CMP) {
85f8e2ea 857 kprintf("da: Failed to attach master async callback "
984263bc 858 "due to status 0x%x!\n", status);
0c4341b9 859 } else {
984263bc
MD
860 /* Register our shutdown event handler */
861 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
c022ffc9 862 NULL, SHUTDOWN_PRI_SECOND)) == NULL)
5ee727b6
SW
863 kprintf("%s: shutdown event registration failed!\n",
864 __func__);
984263bc
MD
865 }
866}
867
868static void
869daoninvalidate(struct cam_periph *periph)
870{
984263bc 871 struct da_softc *softc;
984263bc
MD
872
873 softc = (struct da_softc *)periph->softc;
874
875 /*
876 * De-register any async callbacks.
877 */
1c8b7a9a 878 xpt_register_async(0, daasync, periph, periph->path);
984263bc
MD
879
880 softc->flags |= DA_FLAG_PACK_INVALID;
881
984263bc
MD
882 /*
883 * Return all queued I/O with ENXIO.
884 * XXX Handle any transactions queued to the card
885 * with XPT_ABORT_CCB.
886 */
e0fb398b 887 daflushbioq(&softc->bio_queue_trim, ENXIO);
af0aa0ac
MD
888 daflushbioq(&softc->bio_queue_wr, ENXIO);
889 daflushbioq(&softc->bio_queue_rd, ENXIO);
890 xpt_print(periph->path, "lost device\n");
891}
892
893static void
894daflushbioq(struct bio_queue_head *bioq, int error)
895{
896 struct bio *q_bio;
897 struct buf *q_bp;
898
899 while ((q_bio = bioq_first(bioq)) != NULL){
900 bioq_remove(bioq, q_bio);
81b5c339 901 q_bp = q_bio->bio_buf;
984263bc 902 q_bp->b_resid = q_bp->b_bcount;
af0aa0ac 903 q_bp->b_error = error;
984263bc 904 q_bp->b_flags |= B_ERROR;
81b5c339 905 biodone(q_bio);
984263bc 906 }
984263bc
MD
907}
908
909static void
910dacleanup(struct cam_periph *periph)
911{
912 struct da_softc *softc;
913
914 softc = (struct da_softc *)periph->softc;
915
916 devstat_remove_entry(&softc->device_stats);
917 cam_extend_release(daperiphs, periph->unit_number);
1c8b7a9a 918 xpt_print(periph->path, "removing device entry\n");
62ade751
MD
919 /*
920 * If we can't free the sysctl tree, oh well...
921 */
922 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
923 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
1c8b7a9a 924 xpt_print(periph->path, "can't remove sysctl context\n");
62ade751 925 }
2d19cdd3 926 periph->softc = NULL;
e4c9c0c8 927 if (softc->disk.d_rawdev) {
1c8b7a9a 928 cam_periph_unlock(periph);
335dda38 929 disk_destroy(&softc->disk);
1c8b7a9a 930 cam_periph_lock(periph);
984263bc 931 }
1c8b7a9a 932
efda3bd0 933 kfree(softc, M_DEVBUF);
984263bc
MD
934}
935
936static void
937daasync(void *callback_arg, u_int32_t code,
938 struct cam_path *path, void *arg)
939{
940 struct cam_periph *periph;
941
942 periph = (struct cam_periph *)callback_arg;
87993e5a 943
984263bc
MD
944 switch (code) {
945 case AC_FOUND_DEVICE:
946 {
947 struct ccb_getdev *cgd;
948 cam_status status;
949
950 cgd = (struct ccb_getdev *)arg;
e9936c96
PA
951 if (cgd == NULL)
952 break;
984263bc
MD
953
954 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
955 && SID_TYPE(&cgd->inq_data) != T_RBC
956 && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
957 break;
958
87993e5a
MD
959 /*
960 * Don't complain if a valid peripheral is already attached.
961 */
962 periph = cam_periph_find(cgd->ccb_h.path, "da");
963 if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
964 break;
965
984263bc
MD
966 /*
967 * Allocate a peripheral instance for
968 * this device and start the probe
969 * process.
970 */
971 status = cam_periph_alloc(daregister, daoninvalidate,
972 dacleanup, dastart,
973 "da", CAM_PERIPH_BIO,
974 cgd->ccb_h.path, daasync,
975 AC_FOUND_DEVICE, cgd);
976
87993e5a 977 if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
5ee727b6
SW
978 kprintf("%s: Unable to attach to new device "
979 "due to status 0x%x\n", __func__, status);
87993e5a 980 }
984263bc
MD
981 break;
982 }
983 case AC_SENT_BDR:
984 case AC_BUS_RESET:
985 {
986 struct da_softc *softc;
987 struct ccb_hdr *ccbh;
984263bc
MD
988
989 softc = (struct da_softc *)periph->softc;
984263bc
MD
990 /*
991 * Don't fail on the expected unit attention
992 * that will occur.
993 */
994 softc->flags |= DA_FLAG_RETRY_UA;
cbe8f7dc 995 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
984263bc 996 ccbh->ccb_state |= DA_CCB_RETRY_UA;
984263bc
MD
997 /* FALLTHROUGH*/
998 }
999 default:
1000 cam_periph_async(periph, code, path, arg);
1001 break;
1002 }
1003}
1004
62ade751
MD
1005static void
1006dasysctlinit(void *context, int pending)
1007{
1008 struct cam_periph *periph;
1009 struct da_softc *softc;
1010 char tmpstr[80], tmpstr2[80];
1011
1012 periph = (struct cam_periph *)context;
227ce828 1013 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1c8b7a9a 1014 return;
227ce828 1015 }
62ade751 1016
1c8b7a9a 1017 softc = (struct da_softc *)periph->softc;
da10ea93
MD
1018 ksnprintf(tmpstr, sizeof(tmpstr),
1019 "CAM DA unit %d", periph->unit_number);
1020 ksnprintf(tmpstr2, sizeof(tmpstr2),
1021 "%d", periph->unit_number);
62ade751 1022
da10ea93 1023 sysctl_ctx_free(&softc->sysctl_ctx);
62ade751
MD
1024 sysctl_ctx_init(&softc->sysctl_ctx);
1025 softc->flags |= DA_FLAG_SCTX_INIT;
1026 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1027 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1028 CTLFLAG_RD, 0, tmpstr);
1029 if (softc->sysctl_tree == NULL) {
5ee727b6 1030 kprintf("%s: unable to allocate sysctl tree\n", __func__);
1c8b7a9a 1031 cam_periph_release(periph);
62ade751
MD
1032 return;
1033 }
1034
1035 /*
1036 * Now register the sysctl handler, so the user can the value on
1037 * the fly.
1038 */
1039 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1040 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1041 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1042 "Minimum CDB size");
1c8b7a9a 1043
e0fb398b
T
1044 /* Only create the option if the device supports TRIM */
1045 if (softc->disk.d_info.d_trimflag) {
1046 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1047 SYSCTL_CHILDREN(softc->sysctl_tree),
1048 OID_AUTO,
1049 "trim_enabled",
1050 CTLFLAG_RW,
1051 &softc->trim_enabled,
1052 0,
1053 "Enable TRIM for this device (SSD))");
1054 }
1055
1c8b7a9a 1056 cam_periph_release(periph);
62ade751
MD
1057}
1058
1059static int
1060dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1061{
1062 int error, value;
1063
1064 value = *(int *)arg1;
1065
1066 error = sysctl_handle_int(oidp, &value, 0, req);
1067
1068 if ((error != 0)
1069 || (req->newptr == NULL))
1070 return (error);
1071
1072 /*
bdd58e03 1073 * Acceptable values here are 6, 10 or 12, or 16.
62ade751
MD
1074 */
1075 if (value < 6)
1076 value = 6;
1077 else if ((value > 6)
1078 && (value <= 10))
1079 value = 10;
bdd58e03
MD
1080 else if ((value > 10)
1081 && (value <= 12))
62ade751 1082 value = 12;
bdd58e03
MD
1083 else if (value > 12)
1084 value = 16;
62ade751
MD
1085
1086 *(int *)arg1 = value;
1087
1088 return (0);
1089}
1090
984263bc
MD
1091static cam_status
1092daregister(struct cam_periph *periph, void *arg)
1093{
984263bc 1094 struct da_softc *softc;
62ade751 1095 struct ccb_pathinq cpi;
984263bc 1096 struct ccb_getdev *cgd;
62ade751 1097 char tmpstr[80];
984263bc
MD
1098 caddr_t match;
1099
1100 cgd = (struct ccb_getdev *)arg;
1101 if (periph == NULL) {
5ee727b6 1102 kprintf("%s: periph was NULL!!\n", __func__);
984263bc
MD
1103 return(CAM_REQ_CMP_ERR);
1104 }
1105
1106 if (cgd == NULL) {
5ee727b6
SW
1107 kprintf("%s: no getdev CCB, can't register device\n",
1108 __func__);
984263bc
MD
1109 return(CAM_REQ_CMP_ERR);
1110 }
1111
efda3bd0 1112 softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
da10ea93 1113 sysctl_ctx_init(&softc->sysctl_ctx);
984263bc
MD
1114 LIST_INIT(&softc->pending_ccbs);
1115 softc->state = DA_STATE_PROBE;
e0fb398b 1116 bioq_init(&softc->bio_queue_trim);
af0aa0ac
MD
1117 bioq_init(&softc->bio_queue_rd);
1118 bioq_init(&softc->bio_queue_wr);
984263bc
MD
1119 if (SID_IS_REMOVABLE(&cgd->inq_data))
1120 softc->flags |= DA_FLAG_PACK_REMOVABLE;
1121 if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1122 softc->flags |= DA_FLAG_TAGGED_QUEUING;
1123
e0fb398b
T
1124 /* Used to get TRIM status from AHCI driver */
1125 if (cgd->inq_data.vendor_specific1[0] == 1) {
1126 /*
1127 * max number of lba ranges an SSD can handle in a single
1128 * TRIM command. vendor_specific1[1] is the num of 512-byte
1129 * blocks the SSD reports that can be passed in a TRIM cmd.
1130 */
1131 softc->trim_max_ranges =
1132 min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1133 }
1134
984263bc
MD
1135 periph->softc = softc;
1136
1137 cam_extend_set(daperiphs, periph->unit_number, periph);
1138
1139 /*
1140 * See if this device has any quirks.
1141 */
1142 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1143 (caddr_t)da_quirk_table,
b370aff7 1144 NELEM(da_quirk_table),
984263bc
MD
1145 sizeof(*da_quirk_table), scsi_inquiry_match);
1146
1147 if (match != NULL)
1148 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1149 else
1150 softc->quirks = DA_Q_NONE;
1151
a9453758
MD
1152 /*
1153 * Unconditionally disable the synchronize cache command for
1154 * usb attachments. It's just impossible to determine if the
1155 * device supports it or not and if it doesn't the port can
1156 * brick.
1157 */
1158 if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1159 softc->quirks |= DA_Q_NO_SYNC_CACHE;
1160 }
1161
62ade751
MD
1162 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1163
1164 /* Check if the SIM does not want 6 byte commands */
eaae6702 1165 bzero(&cpi, sizeof(cpi));
62ade751
MD
1166 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
1167 cpi.ccb_h.func_code = XPT_PATH_INQ;
1168 xpt_action((union ccb *)&cpi);
1169 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
1170 softc->quirks |= DA_Q_NO_6_BYTE;
1171
1172 /*
1173 * RBC devices don't have to support READ(6), only READ(10).
1174 */
984263bc
MD
1175 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1176 softc->minimum_cmd_size = 10;
1177 else
1178 softc->minimum_cmd_size = 6;
1179
62ade751
MD
1180 /*
1181 * Load the user's default, if any.
1182 */
f8c7a42d 1183 ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
62ade751
MD
1184 periph->unit_number);
1185 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1186
1187 /*
bdd58e03 1188 * 6, 10, 12, and 16 are the currently permissible values.
62ade751
MD
1189 */
1190 if (softc->minimum_cmd_size < 6)
1191 softc->minimum_cmd_size = 6;
1192 else if ((softc->minimum_cmd_size > 6)
1193 && (softc->minimum_cmd_size <= 10))
1194 softc->minimum_cmd_size = 10;
bdd58e03
MD
1195 else if ((softc->minimum_cmd_size > 10)
1196 && (softc->minimum_cmd_size <= 12))
62ade751 1197 softc->minimum_cmd_size = 12;
bdd58e03
MD
1198 else if (softc->minimum_cmd_size > 12)
1199 softc->minimum_cmd_size = 16;
62ade751 1200
984263bc
MD
1201 /*
1202 * The DA driver supports a blocksize, but
1203 * we don't know the blocksize until we do
1204 * a read capacity. So, set a flag to
1205 * indicate that the blocksize is
1206 * unavailable right now. We'll clear the
1207 * flag as soon as we've done a read capacity.
1208 */
1209 devstat_add_entry(&softc->device_stats, "da",
1210 periph->unit_number, 0,
1211 DEVSTAT_BS_UNAVAILABLE,
1212 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1213 DEVSTAT_PRIORITY_DISK);
1214
1215 /*
1216 * Register this media as a disk
1217 */
1c8b7a9a 1218 CAM_SIM_UNLOCK(periph->sim);
a688b15c 1219 disk_create(periph->unit_number, &softc->disk, &da_ops);
eaae6702
SW
1220 if (cpi.maxio == 0 || cpi.maxio > MAXPHYS)
1221 softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1222 else
1223 softc->disk.d_rawdev->si_iosize_max = cpi.maxio;
0b41f48b
SW
1224 if (bootverbose) {
1225 kprintf("%s%d: si_iosize_max:%d\n",
1226 periph->periph_name,
1227 periph->unit_number,
1228 softc->disk.d_rawdev->si_iosize_max);
1229 }
1c8b7a9a 1230 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1231
1232 /*
1233 * Add async callbacks for bus reset and
1234 * bus device reset calls. I don't bother
1235 * checking if this fails as, in most cases,
1236 * the system will function just fine without
1237 * them and the only alternative would be to
1238 * not attach the device on failure.
1239 */
1c8b7a9a
PA
1240 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1241 daasync, periph, periph->path);
1242
984263bc 1243 /*
1c8b7a9a
PA
1244 * Take an exclusive refcount on the periph while dastart is called
1245 * to finish the probe. The reference will be dropped in dadone at
1246 * the end of probe.
984263bc 1247 */
1c8b7a9a 1248 cam_periph_hold(periph, 0);
984263bc
MD
1249 xpt_schedule(periph, /*priority*/5);
1250
1251 return(CAM_REQ_CMP);
1252}
1253
1254static void
1255dastart(struct cam_periph *periph, union ccb *start_ccb)
1256{
1257 struct da_softc *softc;
1258
1259 softc = (struct da_softc *)periph->softc;
1260
984263bc
MD
1261 switch (softc->state) {
1262 case DA_STATE_NORMAL:
1263 {
1264 /* Pull a buffer from the queue and get going on it */
81b5c339 1265 struct bio *bio;
af0aa0ac
MD
1266 struct bio *bio_rd;
1267 struct bio *bio_wr;
984263bc 1268 struct buf *bp;
a9bf1b8c 1269 u_int8_t tag_code;
af0aa0ac 1270 int limit;
984263bc
MD
1271
1272 /*
1273 * See if there is a buf with work for us to do..
1274 */
af0aa0ac
MD
1275 bio_rd = bioq_first(&softc->bio_queue_rd);
1276 bio_wr = bioq_first(&softc->bio_queue_wr);
1277
984263bc
MD
1278 if (periph->immediate_priority <= periph->pinfo.priority) {
1279 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1280 ("queuing for immediate ccb\n"));
1281 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1282 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1283 periph_links.sle);
1284 periph->immediate_priority = CAM_PRIORITY_NONE;
984263bc 1285 wakeup(&periph->ccb_list);
af0aa0ac 1286 if (bio_rd || bio_wr) {
a9bf1b8c
MD
1287 /*
1288 * Have more work to do, so ensure we stay
1289 * scheduled
1290 */
1291 xpt_schedule(periph, /* XXX priority */1);
1292 }
1293 break;
1294 }
af0aa0ac 1295
e0fb398b
T
1296 /* Run the trim command if not already running */
1297 if (!softc->trim_running &&
4090d6ff 1298 (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
e0fb398b
T
1299 struct trim_request *req = &softc->trim_req;
1300 struct bio *bio1;
1301 int bps = 0, ranges = 0;
1302
1303 softc->trim_running = 1;
1304 bzero(req, sizeof(*req));
1305 bio1 = bio;
1306 while (1) {
1307 uint64_t lba;
1308 int count;
1309
1310 bp = bio1->bio_buf;
1311 count = bp->b_bcount / softc->params.secsize;
1312 lba = bio1->bio_offset/softc->params.secsize;
1313
1314 kprintf("trim lba:%llu boff:%llu count:%d\n",
1315 (unsigned long long) lba,
1316 (unsigned long long) bio1->bio_offset,
1317 count);
1318
1319 bioq_remove(&softc->bio_queue_trim, bio1);
1320 while (count > 0) {
1321 int c = min(count, 0xffff);
1322 int off = ranges * 8;
1323
1324 req->data[off + 0] = lba & 0xff;
1325 req->data[off + 1] = (lba >> 8) & 0xff;
1326 req->data[off + 2] = (lba >> 16) & 0xff;
1327 req->data[off + 3] = (lba >> 24) & 0xff;
1328 req->data[off + 4] = (lba >> 32) & 0xff;
1329 req->data[off + 5] = (lba >> 40) & 0xff;
1330 req->data[off + 6] = c & 0xff;
1331 req->data[off + 7] = (c >> 8) & 0xff;
1332 lba += c;
1333 count -= c;
1334 ranges++;
1335 }
1336
1337 /* Try to merge multiple TRIM requests */
1338 req->bios[bps++] = bio1;
1339 bio1 = bioq_first(&softc->bio_queue_trim);
1340 if (bio1 == NULL ||
1341 bio1->bio_buf->b_bcount / softc->params.secsize >
1342 (softc->trim_max_ranges - ranges) * 0xffff)
1343 break;
1344 }
1345
1346
1347 cam_fill_csio(&start_ccb->csio,
1348 1/*retries*/,
1349 dadone,
1350 CAM_DIR_OUT,
1351 MSG_SIMPLE_Q_TAG,
1352 req->data,
1353 ((ranges +63)/64)*512,
1354 SSD_FULL_SIZE,
1355 sizeof(struct scsi_rw_6),
1356 da_default_timeout*2);
1357
1358 start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1359 LIST_INSERT_HEAD(&softc->pending_ccbs,
1360 &start_ccb->ccb_h, periph_links.le);
1361 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1362 start_ccb->ccb_h.ccb_bio = bio;
1363 devstat_start_transaction(&softc->device_stats);
1364 xpt_action(start_ccb);
1365 xpt_schedule(periph, 1);
1366 break;
1367 }
1368
af0aa0ac
MD
1369 /*
1370 * Select a read or write buffer to queue. Limit the number
1371 * of tags dedicated to reading or writing, giving reads
1372 * precedence.
1373 *
1374 * Writes to modern hard drives go into the HDs cache and
1375 * return completion nearly instantly. That is until the
1376 * cache becomes full. When the HDs cache becomes full
1377 * write commands will begin to stall. If all available
1378 * tags are taken up by writes which saturate the drive
1379 * reads will become tag-starved.
1380 *
1381 * A similar situation can occur with reads. With many
1382 * parallel readers all tags can be taken up by reads
1383 * and prevent any writes from draining, even if the HD's
1384 * cache is not full.
1385 */
a3c9d3d8 1386 limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
af0aa0ac
MD
1387#if 0
1388 /* DEBUGGING */
1389 static int savets;
1390 static long savets2;
cec73927 1391 if (1 || time_uptime != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
af0aa0ac
MD
1392 kprintf("%d %d (%d)\n",
1393 softc->outstanding_cmds_rd,
1394 softc->outstanding_cmds_wr,
1395 limit);
1396 savets = ticks;
cec73927 1397 savets2 = time_uptime;
af0aa0ac
MD
1398 }
1399#endif
1400 if (bio_rd && softc->outstanding_cmds_rd < limit) {
1401 bio = bio_rd;
1402 bioq_remove(&softc->bio_queue_rd, bio);
1403 } else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1404 bio = bio_wr;
1405 bioq_remove(&softc->bio_queue_wr, bio);
1406 } else {
1407 if (bio_rd)
1408 softc->flags |= DA_FLAG_RD_LIMIT;
1409 if (bio_wr)
1410 softc->flags |= DA_FLAG_WR_LIMIT;
984263bc 1411 xpt_release_ccb(start_ccb);
a9bf1b8c
MD
1412 break;
1413 }
984263bc 1414
a9bf1b8c
MD
1415 /*
1416 * We can queue new work.
1417 */
a9bf1b8c 1418 bp = bio->bio_buf;
54078292 1419
a9bf1b8c 1420 devstat_start_transaction(&softc->device_stats);
54078292 1421
0c4341b9 1422 tag_code = MSG_SIMPLE_Q_TAG;
984263bc 1423
a9bf1b8c
MD
1424 switch(bp->b_cmd) {
1425 case BUF_CMD_READ:
1426 case BUF_CMD_WRITE:
984263bc 1427 /*
a9bf1b8c 1428 * Block read/write op
984263bc 1429 */
a9bf1b8c 1430 KKASSERT(bio->bio_offset % softc->params.secsize == 0);
984263bc 1431
a9bf1b8c
MD
1432 scsi_read_write(
1433 &start_ccb->csio,
1434 da_retry_count, /* retries */
1435 dadone,
1436 tag_code,
1437 (bp->b_cmd == BUF_CMD_READ),
1438 0, /* byte2 */
1439 softc->minimum_cmd_size,
1440 bio->bio_offset / softc->params.secsize,
1441 bp->b_bcount / softc->params.secsize,
1442 bp->b_data,
1443 bp->b_bcount,
1444 SSD_FULL_SIZE, /* sense_len */
1445 da_default_timeout * 1000
1446 );
1447 break;
1448 case BUF_CMD_FLUSH:
a9453758
MD
1449 /*
1450 * Silently complete a flush request if the device
1451 * cannot handle it.
1452 */
1453 if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1454 xpt_release_ccb(start_ccb);
1455 start_ccb = NULL;
1456 devstat_end_transaction_buf(
1457 &softc->device_stats, bp);
1458 biodone(bio);
1459 } else {
1460 scsi_synchronize_cache(
1461 &start_ccb->csio,
1462 1, /* retries */
1463 dadone, /* cbfcnp */
1464 MSG_SIMPLE_Q_TAG,
1465 0, /* lba */
1466 0, /* count (whole disk) */
1467 SSD_FULL_SIZE,
1468 da_default_timeout*1000 /* timeout */
1469 );
1470 }
a9bf1b8c 1471 break;
e0fb398b
T
1472 case BUF_CMD_FREEBLKS:
1473 if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1474 start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1475 break;
1476 }
a9bf1b8c 1477 default:
a9453758
MD
1478 xpt_release_ccb(start_ccb);
1479 start_ccb = NULL;
a9bf1b8c
MD
1480 panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1481 break; /* NOT REACHED */
1482 }
984263bc 1483
a9bf1b8c
MD
1484 /*
1485 * Block out any asyncronous callbacks
1486 * while we touch the pending ccb list.
1487 */
a9453758
MD
1488 if (start_ccb) {
1489 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1490 LIST_INSERT_HEAD(&softc->pending_ccbs,
1491 &start_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1492 if (bp->b_cmd == BUF_CMD_WRITE ||
1493 bp->b_cmd == BUF_CMD_FLUSH) {
1494 ++softc->outstanding_cmds_wr;
1495 } else {
1496 ++softc->outstanding_cmds_rd;
1497 }
a9453758
MD
1498
1499 /* We expect a unit attention from this device */
1500 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1501 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1502 softc->flags &= ~DA_FLAG_RETRY_UA;
1503 }
a9bf1b8c 1504
a9453758
MD
1505 start_ccb->ccb_h.ccb_bio = bio;
1506 xpt_action(start_ccb);
1507 }
984263bc 1508
a9bf1b8c
MD
1509 /*
1510 * Be sure we stay scheduled if we have more work to do.
1511 */
af0aa0ac
MD
1512 if (bioq_first(&softc->bio_queue_rd) ||
1513 bioq_first(&softc->bio_queue_wr)) {
a9bf1b8c 1514 xpt_schedule(periph, 1);
af0aa0ac 1515 }
984263bc
MD
1516 break;
1517 }
1518 case DA_STATE_PROBE:
1519 {
1520 struct ccb_scsiio *csio;
1521 struct scsi_read_capacity_data *rcap;
1522
1c8b7a9a 1523 rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
984263bc
MD
1524 csio = &start_ccb->csio;
1525 scsi_read_capacity(csio,
1526 /*retries*/4,
1527 dadone,
1528 MSG_SIMPLE_Q_TAG,
1529 rcap,
1530 SSD_FULL_SIZE,
1531 /*timeout*/5000);
81b5c339 1532 start_ccb->ccb_h.ccb_bio = NULL;
984263bc
MD
1533 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1534 xpt_action(start_ccb);
1535 break;
1536 }
bdd58e03
MD
1537 case DA_STATE_PROBE2:
1538 {
1539 struct ccb_scsiio *csio;
0b0362e1 1540 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03 1541
0b0362e1
MD
1542 rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1543 M_INTWAIT | M_ZERO);
bdd58e03
MD
1544 csio = &start_ccb->csio;
1545 scsi_read_capacity_16(csio,
1546 /*retries*/ 4,
1547 /*cbfcnp*/ dadone,
1548 /*tag_action*/ MSG_SIMPLE_Q_TAG,
1549 /*lba*/ 0,
1550 /*reladr*/ 0,
1551 /*pmi*/ 0,
1552 rcaplong,
1553 /*sense_len*/ SSD_FULL_SIZE,
1554 /*timeout*/ 60000);
1555 start_ccb->ccb_h.ccb_bio = NULL;
1556 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1557 xpt_action(start_ccb);
1558 break;
1559 }
984263bc
MD
1560 }
1561}
1562
1563static int
1564cmd6workaround(union ccb *ccb)
1565{
1566 struct scsi_rw_6 cmd6;
1567 struct scsi_rw_10 *cmd10;
1568 struct da_softc *softc;
1569 u_int8_t *cdb;
1570 int frozen;
1571
1572 cdb = ccb->csio.cdb_io.cdb_bytes;
1573
1574 /* Translation only possible if CDB is an array and cmd is R/W6 */
1575 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1576 (*cdb != READ_6 && *cdb != WRITE_6))
1577 return 0;
1578
1c8b7a9a
PA
1579 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1580 "increasing minimum_cmd_size to 10.\n");
984263bc
MD
1581 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1582 softc->minimum_cmd_size = 10;
1583
1584 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1585 cmd10 = (struct scsi_rw_10 *)cdb;
1586 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1587 cmd10->byte2 = 0;
1588 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1589 cmd10->reserved = 0;
1590 scsi_ulto2b(cmd6.length, cmd10->length);
1591 cmd10->control = cmd6.control;
1592 ccb->csio.cdb_len = sizeof(*cmd10);
1593
1594 /* Requeue request, unfreezing queue if necessary */
1595 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1596 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1597 xpt_action(ccb);
1598 if (frozen) {
1599 cam_release_devq(ccb->ccb_h.path,
1600 /*relsim_flags*/0,
1601 /*reduction*/0,
1602 /*timeout*/0,
1603 /*getcount_only*/0);
1604 }
1605 return (ERESTART);
1606}
1607
1608static void
1609dadone(struct cam_periph *periph, union ccb *done_ccb)
1610{
1611 struct da_softc *softc;
1612 struct ccb_scsiio *csio;
cd29885a 1613 struct disk_info info;
984263bc
MD
1614
1615 softc = (struct da_softc *)periph->softc;
1616 csio = &done_ccb->csio;
1617 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1618 case DA_CCB_BUFFER_IO:
e0fb398b 1619 case DA_CCB_TRIM:
984263bc
MD
1620 {
1621 struct buf *bp;
81b5c339 1622 struct bio *bio;
af0aa0ac 1623 int mustsched = 0;
984263bc 1624
81b5c339
MD
1625 bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1626 bp = bio->bio_buf;
984263bc
MD
1627 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1628 int error;
984263bc
MD
1629 int sf;
1630
1631 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1632 sf = SF_RETRY_UA;
1633 else
1634 sf = 0;
1635
b05e84c9
PA
1636 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1637 if (error == ERESTART) {
984263bc
MD
1638 /*
1639 * A retry was scheuled, so
1640 * just return.
1641 */
1642 return;
1643 }
1644 if (error != 0) {
984263bc
MD
1645 if (error == ENXIO) {
1646 /*
1647 * Catastrophic error. Mark our pack as
1648 * invalid.
1649 */
1c8b7a9a
PA
1650 /*
1651 * XXX See if this is really a media
1652 * XXX change first?
984263bc 1653 */
1c8b7a9a
PA
1654 xpt_print(periph->path,
1655 "Invalidating pack\n");
984263bc
MD
1656 softc->flags |= DA_FLAG_PACK_INVALID;
1657 }
1658
1659 /*
af0aa0ac
MD
1660 * Return all queued write I/O's with EIO
1661 * so the client can retry these I/Os in the
984263bc 1662 * proper order should it attempt to recover.
af0aa0ac
MD
1663 *
1664 * Leave read I/O's alone.
984263bc 1665 */
af0aa0ac 1666 daflushbioq(&softc->bio_queue_wr, EIO);
984263bc
MD
1667 bp->b_error = error;
1668 bp->b_resid = bp->b_bcount;
1669 bp->b_flags |= B_ERROR;
1670 } else {
1671 bp->b_resid = csio->resid;
1672 bp->b_error = 0;
62ade751 1673 if (bp->b_resid != 0)
984263bc 1674 bp->b_flags |= B_ERROR;
984263bc
MD
1675 }
1676 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1677 cam_release_devq(done_ccb->ccb_h.path,
1678 /*relsim_flags*/0,
1679 /*reduction*/0,
1680 /*timeout*/0,
1681 /*getcount_only*/0);
1682 } else {
b05e84c9
PA
1683 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1684 panic("REQ_CMP with QFRZN");
984263bc 1685 bp->b_resid = csio->resid;
62ade751 1686 if (csio->resid > 0)
984263bc 1687 bp->b_flags |= B_ERROR;
984263bc
MD
1688 }
1689
1690 /*
1691 * Block out any asyncronous callbacks
1692 * while we touch the pending ccb list.
1693 */
984263bc 1694 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
af0aa0ac
MD
1695 if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1696 --softc->outstanding_cmds_wr;
1697 if (softc->flags & DA_FLAG_WR_LIMIT) {
1698 softc->flags &= ~DA_FLAG_WR_LIMIT;
1699 mustsched = 1;
1700 }
1701 } else {
1702 --softc->outstanding_cmds_rd;
1703 if (softc->flags & DA_FLAG_RD_LIMIT) {
1704 softc->flags &= ~DA_FLAG_RD_LIMIT;
1705 mustsched = 1;
1706 }
1707 }
984263bc
MD
1708
1709 devstat_end_transaction_buf(&softc->device_stats, bp);
e0fb398b
T
1710 if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1711 DA_CCB_TRIM) {
1712 struct trim_request *req =
1713 (struct trim_request *) csio->data_ptr;
1714 int i;
1715
1716 for (i = 1; i < softc->trim_max_ranges &&
1717 req->bios[i]; i++) {
1718 struct bio *bp1 = req->bios[i];
1719
1720 bp1->bio_buf->b_resid = bp->b_resid;
1721 bp1->bio_buf->b_error = bp->b_error;
1722 if (bp->b_flags & B_ERROR)
1723 bp1->bio_buf->b_flags |= B_ERROR;
1724 biodone(bp1);
1725 }
1726 softc->trim_running = 0;
1727 biodone(bio);
1728 xpt_schedule(periph,1);
1729 } else
1730 biodone(bio);
1731
af0aa0ac
MD
1732
1733 if (mustsched)
1734 xpt_schedule(periph, /*priority*/1);
1735
984263bc
MD
1736 break;
1737 }
1738 case DA_CCB_PROBE:
bdd58e03 1739 case DA_CCB_PROBE2:
984263bc
MD
1740 {
1741 struct scsi_read_capacity_data *rdcap;
0b0362e1 1742 struct scsi_read_capacity_data_16 *rcaplong;
984263bc
MD
1743 char announce_buf[80];
1744
bdd58e03
MD
1745 rdcap = NULL;
1746 rcaplong = NULL;
1747 if (softc->state == DA_STATE_PROBE)
1748 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1749 else
0b0362e1 1750 rcaplong = (struct scsi_read_capacity_data_16 *)
bdd58e03 1751 csio->data_ptr;
f7b26992
MD
1752
1753 bzero(&info, sizeof(info));
1754 info.d_type = DTYPE_SCSI;
1755 info.d_serialno = xpt_path_serialno(periph->path);
984263bc
MD
1756
1757 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1758 struct disk_params *dp;
bdd58e03
MD
1759 uint32_t block_size;
1760 uint64_t maxsector;
1761
1762 if (softc->state == DA_STATE_PROBE) {
1763 block_size = scsi_4btoul(rdcap->length);
1764 maxsector = scsi_4btoul(rdcap->addr);
984263bc 1765
bdd58e03
MD
1766 /*
1767 * According to SBC-2, if the standard 10
1768 * byte READ CAPACITY command returns 2^32,
1769 * we should issue the 16 byte version of
1770 * the command, since the device in question
1771 * has more sectors than can be represented
1772 * with the short version of the command.
1773 */
1774 if (maxsector == 0xffffffff) {
1775 softc->state = DA_STATE_PROBE2;
1c8b7a9a 1776 kfree(rdcap, M_SCSIDA);
bdd58e03
MD
1777 xpt_release_ccb(done_ccb);
1778 xpt_schedule(periph, /*priority*/5);
1779 return;
1780 }
1781 } else {
1782 block_size = scsi_4btoul(rcaplong->length);
1783 maxsector = scsi_8btou64(rcaplong->addr);
1784 }
1785 dasetgeom(periph, block_size, maxsector);
984263bc 1786 dp = &softc->params;
f8c7a42d 1787 ksnprintf(announce_buf, sizeof(announce_buf),
bdd58e03
MD
1788 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1789 (uintmax_t) (((uintmax_t)dp->secsize *
1790 dp->sectors) / (1024*1024)),
1791 (uintmax_t)dp->sectors,
984263bc
MD
1792 dp->secsize, dp->heads, dp->secs_per_track,
1793 dp->cylinders);
e0fb398b 1794
cd29885a 1795 CAM_SIM_UNLOCK(periph->sim);
cd29885a
MD
1796 info.d_media_blksize = softc->params.secsize;
1797 info.d_media_blocks = softc->params.sectors;
1798 info.d_media_size = 0;
1799 info.d_secpertrack = softc->params.secs_per_track;
1800 info.d_nheads = softc->params.heads;
1801 info.d_ncylinders = softc->params.cylinders;
1802 info.d_secpercyl = softc->params.heads *
1803 softc->params.secs_per_track;
55230951 1804 info.d_serialno = xpt_path_serialno(periph->path);
cd29885a
MD
1805 disk_setdiskinfo(&softc->disk, &info);
1806 CAM_SIM_LOCK(periph->sim);
984263bc
MD
1807 } else {
1808 int error;
1809
1810 announce_buf[0] = '\0';
1811
1812 /*
1813 * Retry any UNIT ATTENTION type errors. They
1814 * are expected at boot.
1815 */
b05e84c9
PA
1816 error = daerror(done_ccb, CAM_RETRY_SELTO,
1817 SF_RETRY_UA|SF_NO_PRINT);
984263bc
MD
1818 if (error == ERESTART) {
1819 /*
1820 * A retry was scheuled, so
1821 * just return.
1822 */
1823 return;
1824 } else if (error != 0) {
1825 struct scsi_sense_data *sense;
1826 int asc, ascq;
1827 int sense_key, error_code;
1828 int have_sense;
1829 cam_status status;
1830 struct ccb_getdev cgd;
1831
1832 /* Don't wedge this device's queue */
984263bc 1833 status = done_ccb->ccb_h.status;
b05e84c9
PA
1834 if ((status & CAM_DEV_QFRZN) != 0)
1835 cam_release_devq(done_ccb->ccb_h.path,
1836 /*relsim_flags*/0,
1837 /*reduction*/0,
1838 /*timeout*/0,
1839 /*getcount_only*/0);
1840
984263bc
MD
1841
1842 xpt_setup_ccb(&cgd.ccb_h,
1843 done_ccb->ccb_h.path,
1844 /* priority */ 1);
1845 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1846 xpt_action((union ccb *)&cgd);
1847
1848 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1849 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1850 || ((status & CAM_AUTOSNS_VALID) == 0))
1851 have_sense = FALSE;
1852 else
1853 have_sense = TRUE;
1854
1855 if (have_sense) {
1856 sense = &csio->sense_data;
1857 scsi_extract_sense(sense, &error_code,
1858 &sense_key,
1859 &asc, &ascq);
1860 }
1861 /*
1862 * Attach to anything that claims to be a
1863 * direct access or optical disk device,
1864 * as long as it doesn't return a "Logical
1865 * unit not supported" (0x25) error.
1866 */
1867 if ((have_sense) && (asc != 0x25)
b05e84c9
PA
1868 && (error_code == SSD_CURRENT_ERROR)) {
1869 const char *sense_key_desc;
1870 const char *asc_desc;
1871
1872 scsi_sense_desc(sense_key, asc, ascq,
1873 &cgd.inq_data,
1874 &sense_key_desc,
1875 &asc_desc);
f8c7a42d 1876 ksnprintf(announce_buf,
984263bc
MD
1877 sizeof(announce_buf),
1878 "Attempt to query device "
1879 "size failed: %s, %s",
b05e84c9
PA
1880 sense_key_desc,
1881 asc_desc);
f7b26992
MD
1882 info.d_media_blksize = 512;
1883 disk_setdiskinfo(&softc->disk, &info);
b05e84c9 1884 } else {
984263bc
MD
1885 if (have_sense)
1886 scsi_sense_print(
1887 &done_ccb->csio);
1888 else {
1c8b7a9a
PA
1889 xpt_print(periph->path,
1890 "got CAM status %#x\n",
1891 done_ccb->ccb_h.status);
984263bc
MD
1892 }
1893
1c8b7a9a
PA
1894 xpt_print(periph->path, "fatal error, "
1895 "failed to attach to device\n");
984263bc
MD
1896
1897 /*
1898 * Free up resources.
1899 */
1900 cam_periph_invalidate(periph);
1901 }
1902 }
1903 }
1c8b7a9a 1904 kfree(csio->data_ptr, M_SCSIDA);
62ade751 1905 if (announce_buf[0] != '\0') {
984263bc 1906 xpt_announce_periph(periph, announce_buf);
62ade751
MD
1907 /*
1908 * Create our sysctl variables, now that we know
1909 * we have successfully attached.
1910 */
b3504e03
JH
1911 taskqueue_enqueue(taskqueue_thread[mycpuid],
1912 &softc->sysctl_task);
62ade751 1913 }
e0fb398b
T
1914
1915 if (softc->trim_max_ranges) {
1916 softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1917 kprintf("%s%d: supports TRIM\n",
1918 periph->periph_name,
1919 periph->unit_number);
1920 }
b05e84c9 1921 softc->state = DA_STATE_NORMAL;
984263bc
MD
1922 /*
1923 * Since our peripheral may be invalidated by an error
1924 * above or an external event, we must release our CCB
1925 * before releasing the probe lock on the peripheral.
1926 * The peripheral will only go away once the last lock
1927 * is removed, and we need it around for the CCB release
1928 * operation.
1929 */
1930 xpt_release_ccb(done_ccb);
2d19cdd3 1931 cam_periph_unhold(periph, 0);
984263bc
MD
1932 return;
1933 }
1934 case DA_CCB_WAITING:
1935 {
1936 /* Caller will release the CCB */
1937 wakeup(&done_ccb->ccb_h.cbfcnp);
1938 return;
1939 }
1940 case DA_CCB_DUMP:
1941 /* No-op. We're polling */
1942 return;
a9f09b75
MD
1943 case DA_CCB_POLLED:
1944 /* Caller releases ccb */
1945 wakeup(&done_ccb->ccb_h.cbfcnp);
1946 return;
984263bc
MD
1947 default:
1948 break;
1949 }
1950 xpt_release_ccb(done_ccb);
1951}
1952
1953static int
1954daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1955{
1956 struct da_softc *softc;
1957 struct cam_periph *periph;
62ade751 1958 int error;
984263bc
MD
1959
1960 periph = xpt_path_periph(ccb->ccb_h.path);
1961 softc = (struct da_softc *)periph->softc;
1962
1963 /*
1964 * Automatically detect devices that do not support
1965 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
1966 */
1967 error = 0;
62ade751
MD
1968 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
1969 error = cmd6workaround(ccb);
1970 } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
1971 CAM_SCSI_STATUS_ERROR)
1972 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
1973 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
1974 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
1975 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
1976 int sense_key, error_code, asc, ascq;
1977
984263bc
MD
1978 scsi_extract_sense(&ccb->csio.sense_data,
1979 &error_code, &sense_key, &asc, &ascq);
1980 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
1981 error = cmd6workaround(ccb);
1982 }
1983 if (error == ERESTART)
1984 return (ERESTART);
1985
1986 /*
1987 * XXX
1988 * Until we have a better way of doing pack validation,
1989 * don't treat UAs as errors.
1990 */
1991 sense_flags |= SF_RETRY_UA;
1992 return(cam_periph_error(ccb, cam_flags, sense_flags,
1993 &softc->saved_ccb));
1994}
1995
1996static void
1997daprevent(struct cam_periph *periph, int action)
1998{
1999 struct da_softc *softc;
2000 union ccb *ccb;
2001 int error;
2002
2003 softc = (struct da_softc *)periph->softc;
2004
2005 if (((action == PR_ALLOW)
2006 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2007 || ((action == PR_PREVENT)
2008 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2009 return;
2010 }
2011
2012 ccb = cam_periph_getccb(periph, /*priority*/1);
a9f09b75 2013 ccb->ccb_h.ccb_state = DA_CCB_POLLED;
984263bc
MD
2014
2015 scsi_prevent(&ccb->csio,
2016 /*retries*/1,
2017 /*cbcfp*/dadone,
2018 MSG_SIMPLE_Q_TAG,
2019 action,
2020 SSD_FULL_SIZE,
2021 5000);
2022
3f499af5
PA
2023 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2024 SF_RETRY_UA, &softc->device_stats);
984263bc
MD
2025
2026 if (error == 0) {
2027 if (action == PR_ALLOW)
2028 softc->flags &= ~DA_FLAG_PACK_LOCKED;
2029 else
2030 softc->flags |= DA_FLAG_PACK_LOCKED;
2031 }
2032
2033 xpt_release_ccb(ccb);
2034}
2035
f7b26992
MD
2036/*
2037 * Check media on open, e.g. card reader devices which had no initial media.
2038 */
2039static int
2040dacheckmedia(struct cam_periph *periph)
2041{
2042 struct disk_params *dp;
2043 struct da_softc *softc;
2044 struct disk_info info;
2045 int error;
2046
2047 softc = (struct da_softc *)periph->softc;
2048 dp = &softc->params;
2049
2050 error = dagetcapacity(periph);
2051
2052 /*
2053 * Only reprobe on initial open and if the media is removable.
9670bdda
MD
2054 *
2055 * NOTE: If we setdiskinfo() it will take the device probe
2056 * a bit of time to probe the slices and partitions,
2057 * and mess up booting. So avoid if nothing has changed.
2058 * XXX
f7b26992
MD
2059 */
2060 if (softc->flags & DA_FLAG_OPEN)
2061 return (error);
2062 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2063 return (error);
2064
2065 bzero(&info, sizeof(info));
2066 info.d_type = DTYPE_SCSI;
2067 info.d_serialno = xpt_path_serialno(periph->path);
2068
2069 if (error == 0) {
f7b26992
MD
2070 CAM_SIM_UNLOCK(periph->sim);
2071 info.d_media_blksize = softc->params.secsize;
2072 info.d_media_blocks = softc->params.sectors;
2073 info.d_media_size = 0;
2074 info.d_secpertrack = softc->params.secs_per_track;
2075 info.d_nheads = softc->params.heads;
2076 info.d_ncylinders = softc->params.cylinders;
2077 info.d_secpercyl = softc->params.heads *
2078 softc->params.secs_per_track;
2079 info.d_serialno = xpt_path_serialno(periph->path);
9670bdda
MD
2080 if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2081 kprintf("%s%d: open removable media: "
2082 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2083 periph->periph_name, periph->unit_number,
2084 (uintmax_t)(((uintmax_t)dp->secsize *
2085 dp->sectors) / (1024*1024)),
2086 (uintmax_t)dp->sectors, dp->secsize,
2087 dp->heads, dp->secs_per_track, dp->cylinders);
2088 disk_setdiskinfo(&softc->disk, &info);
2089 }
f7b26992
MD
2090 CAM_SIM_LOCK(periph->sim);
2091 } else {
2092 kprintf("%s%d: open removable media: no media present\n",
2093 periph->periph_name, periph->unit_number);
2094 info.d_media_blksize = 512;
2095 disk_setdiskinfo(&softc->disk, &info);
2096 }
2097 return (error);
2098}
2099
bdd58e03
MD
2100static int
2101dagetcapacity(struct cam_periph *periph)
2102{
2103 struct da_softc *softc;
2104 union ccb *ccb;
2105 struct scsi_read_capacity_data *rcap;
0b0362e1 2106 struct scsi_read_capacity_data_16 *rcaplong;
bdd58e03
MD
2107 uint32_t block_len;
2108 uint64_t maxsector;
2109 int error;
2110
2111 softc = (struct da_softc *)periph->softc;
2112 block_len = 0;
2113 maxsector = 0;
2114 error = 0;
2115
2116 /* Do a read capacity */
1c8b7a9a
PA
2117 rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2118 M_SCSIDA, M_INTWAIT);
bdd58e03
MD
2119
2120 ccb = cam_periph_getccb(periph, /*priority*/1);
a9f09b75
MD
2121 ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2122
bdd58e03
MD
2123 scsi_read_capacity(&ccb->csio,
2124 /*retries*/4,
2125 /*cbfncp*/dadone,
2126 MSG_SIMPLE_Q_TAG,
2127 rcap,
2128 SSD_FULL_SIZE,
2129 /*timeout*/60000);
2130 ccb->ccb_h.ccb_bio = NULL;
2131
2132 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2133 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2134 /*sense_flags*/SF_RETRY_UA,
2135 &softc->device_stats);
2136
2137 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2138 cam_release_devq(ccb->ccb_h.path,
2139 /*relsim_flags*/0,
2140 /*reduction*/0,
2141 /*timeout*/0,
2142 /*getcount_only*/0);
2143
2144 if (error == 0) {
2145 block_len = scsi_4btoul(rcap->length);
2146 maxsector = scsi_4btoul(rcap->addr);
2147
2148 if (maxsector != 0xffffffff)
2149 goto done;
2150 } else
2151 goto done;
2152
0b0362e1 2153 rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
bdd58e03
MD
2154
2155 scsi_read_capacity_16(&ccb->csio,
2156 /*retries*/ 4,
2157 /*cbfcnp*/ dadone,
2158 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2159 /*lba*/ 0,
2160 /*reladr*/ 0,
2161 /*pmi*/ 0,
2162 rcaplong,
2163 /*sense_len*/ SSD_FULL_SIZE,
2164 /*timeout*/ 60000);
2165 ccb->ccb_h.ccb_bio = NULL;
2166
2167 error = cam_periph_runccb(ccb, daerror,
b05e84c9 2168 /*cam_flags*/CAM_RETRY_SELTO,
bdd58e03
MD
2169 /*sense_flags*/SF_RETRY_UA,
2170 &softc->device_stats);
2171
2172 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2173 cam_release_devq(ccb->ccb_h.path,
2174 /*relsim_flags*/0,
2175 /*reduction*/0,
2176 /*timeout*/0,
2177 /*getcount_only*/0);
2178
2179 if (error == 0) {
2180 block_len = scsi_4btoul(rcaplong->length);
2181 maxsector = scsi_8btou64(rcaplong->addr);
2182 }
2183
2184done:
2185
2186 if (error == 0)
2187 dasetgeom(periph, block_len, maxsector);
2188
2189 xpt_release_ccb(ccb);
2190
1c8b7a9a 2191 kfree(rcap, M_SCSIDA);
bdd58e03
MD
2192
2193 return (error);
2194}
2195
984263bc 2196static void
bdd58e03 2197dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
984263bc
MD
2198{
2199 struct ccb_calc_geometry ccg;
2200 struct da_softc *softc;
2201 struct disk_params *dp;
2202
2203 softc = (struct da_softc *)periph->softc;
2204
2205 dp = &softc->params;
bdd58e03
MD
2206 dp->secsize = block_len;
2207 dp->sectors = maxsector + 1;
984263bc
MD
2208 /*
2209 * Have the controller provide us with a geometry
2210 * for this disk. The only time the geometry
2211 * matters is when we boot and the controller
2212 * is the only one knowledgeable enough to come
2213 * up with something that will make this a bootable
2214 * device.
2215 */
2216 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
2217 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
2218 ccg.block_size = dp->secsize;
2219 ccg.volume_size = dp->sectors;
2220 ccg.heads = 0;
2221 ccg.secs_per_track = 0;
2222 ccg.cylinders = 0;
2223 xpt_action((union ccb*)&ccg);
eac73adf
PA
2224 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2225 /*
2226 * We don't know what went wrong here- but just pick
2227 * a geometry so we don't have nasty things like divide
2228 * by zero.
2229 */
2230 dp->heads = 255;
2231 dp->secs_per_track = 255;
2232 dp->cylinders = dp->sectors / (255 * 255);
2233 if (dp->cylinders == 0) {
2234 dp->cylinders = 1;
2235 }
2236 } else {
2237 dp->heads = ccg.heads;
2238 dp->secs_per_track = ccg.secs_per_track;
2239 dp->cylinders = ccg.cylinders;
2240 }
984263bc
MD
2241}
2242
984263bc
MD
2243/*
2244 * Step through all DA peripheral drivers, and if the device is still open,
2245 * sync the disk cache to physical media.
2246 */
2247static void
2248dashutdown(void * arg, int howto)
2249{
2250 struct cam_periph *periph;
2251 struct da_softc *softc;
2252
234289a4 2253 TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
984263bc 2254 union ccb ccb;
234289a4 2255
1c8b7a9a 2256 cam_periph_lock(periph);
984263bc
MD
2257 softc = (struct da_softc *)periph->softc;
2258
2259 /*
2260 * We only sync the cache if the drive is still open, and
2261 * if the drive is capable of it..
2262 */
2263 if (((softc->flags & DA_FLAG_OPEN) == 0)
1c8b7a9a
PA
2264 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2265 cam_periph_unlock(periph);
984263bc 2266 continue;
1c8b7a9a 2267 }
984263bc
MD
2268
2269 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
2270
2271 ccb.ccb_h.ccb_state = DA_CCB_DUMP;
2272 scsi_synchronize_cache(&ccb.csio,
2273 /*retries*/1,
2274 /*cbfcnp*/dadone,
2275 MSG_SIMPLE_Q_TAG,
2276 /*begin_lba*/0, /* whole disk */
2277 /*lb_count*/0,
2278 SSD_FULL_SIZE,
19a136fb 2279 60 * 60 * 1000);
984263bc
MD
2280
2281 xpt_polled_action(&ccb);
2282
2283 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2284 if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
2285 CAM_SCSI_STATUS_ERROR)
2286 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2287 int error_code, sense_key, asc, ascq;
2288
2289 scsi_extract_sense(&ccb.csio.sense_data,
2290 &error_code, &sense_key,
2291 &asc, &ascq);
2292
2293 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2294 scsi_sense_print(&ccb.csio);
2295 } else {
1c8b7a9a
PA
2296 xpt_print(periph->path, "Synchronize "
2297 "cache failed, status == 0x%x, scsi status "
2298 "== 0x%x\n", ccb.ccb_h.status,
2299 ccb.csio.scsi_status);
984263bc
MD
2300 }
2301 }
2302
2303 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
2304 cam_release_devq(ccb.ccb_h.path,
2305 /*relsim_flags*/0,
2306 /*reduction*/0,
2307 /*timeout*/0,
2308 /*getcount_only*/0);
2309
1c8b7a9a 2310 cam_periph_unlock(periph);
984263bc
MD
2311 }
2312}
2313
2314#else /* !_KERNEL */
2315
2316/*
2317 * XXX This is only left out of the kernel build to silence warnings. If,
2318 * for some reason this function is used in the kernel, the ifdefs should
2319 * be moved so it is included both in the kernel and userland.
2320 */
2321void
2322scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2323 void (*cbfcnp)(struct cam_periph *, union ccb *),
2324 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2325 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2326 u_int32_t timeout)
2327{
2328 struct scsi_format_unit *scsi_cmd;
2329
2330 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2331 scsi_cmd->opcode = FORMAT_UNIT;
2332 scsi_cmd->byte2 = byte2;
2333 scsi_ulto2b(ileave, scsi_cmd->interleave);
2334
2335 cam_fill_csio(csio,
2336 retries,
2337 cbfcnp,
2338 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2339 tag_action,
2340 data_ptr,
2341 dxfer_len,
2342 sense_len,
2343 sizeof(*scsi_cmd),
2344 timeout);
2345}
2346
2347#endif /* _KERNEL */