DEVFS - remove dev_ops_add(), dev_ops_get(), and get_dev()
[dragonfly.git] / sys / bus / cam / cam_xpt.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
e11d2676 30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.68 2008/08/23 17:13:31 pavalos Exp $
984263bc
MD
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
fef8985e 39#include <sys/device.h>
984263bc
MD
40#include <sys/fcntl.h>
41#include <sys/md5.h>
42#include <sys/devicestat.h>
43#include <sys/interrupt.h>
b05e84c9 44#include <sys/sbuf.h>
1c8b7a9a 45#include <sys/taskqueue.h>
984263bc 46#include <sys/bus.h>
3aed1355 47#include <sys/thread.h>
1c8b7a9a 48#include <sys/lock.h>
92cacebe
MD
49#include <sys/spinlock.h>
50#include <sys/thread2.h>
51#include <sys/spinlock2.h>
984263bc 52
984263bc 53#include <machine/clock.h>
1c8b7a9a 54#include <machine/stdarg.h>
984263bc 55
1f2de5d4
MD
56#include "cam.h"
57#include "cam_ccb.h"
58#include "cam_periph.h"
59#include "cam_sim.h"
60#include "cam_xpt.h"
61#include "cam_xpt_sim.h"
62#include "cam_xpt_periph.h"
63#include "cam_debug.h"
984263bc 64
1f2de5d4
MD
65#include "scsi/scsi_all.h"
66#include "scsi/scsi_message.h"
67#include "scsi/scsi_pass.h"
1c8b7a9a 68#include <sys/kthread.h>
984263bc
MD
69#include "opt_cam.h"
70
71/* Datastructures internal to the xpt layer */
bc6e3c73 72MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
984263bc 73
1c8b7a9a
PA
74/* Object for defering XPT actions to a taskqueue */
75struct xpt_task {
76 struct task task;
77 void *data1;
78 uintptr_t data2;
79};
80
984263bc
MD
81/*
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
84 */
85struct async_node {
86 SLIST_ENTRY(async_node) links;
87 u_int32_t event_enable; /* Async Event enables */
88 void (*callback)(void *arg, u_int32_t code,
89 struct cam_path *path, void *args);
90 void *callback_arg;
91};
92
93SLIST_HEAD(async_list, async_node);
94SLIST_HEAD(periph_list, cam_periph);
984263bc
MD
95
96/*
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
99 */
100#ifndef CAM_MAX_HIGHPOWER
101#define CAM_MAX_HIGHPOWER 4
102#endif
103
984263bc
MD
104/*
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
108 */
109struct cam_ed_qinfo {
110 cam_pinfo pinfo;
111 struct cam_ed *device;
112};
113
114/*
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
118 */
119struct cam_ed {
120 TAILQ_ENTRY(cam_ed) links;
121 struct cam_ed_qinfo alloc_ccb_entry;
122 struct cam_ed_qinfo send_ccb_entry;
123 struct cam_et *target;
1c8b7a9a 124 struct cam_sim *sim;
984263bc
MD
125 lun_id_t lun_id;
126 struct camq drvq; /*
127 * Queue of type drivers wanting to do
128 * work on this device.
129 */
130 struct cam_ccbq ccbq; /* Queue of pending ccbs */
131 struct async_list asyncs; /* Async callback info for this B/T/L */
132 struct periph_list periphs; /* All attached devices */
133 u_int generation; /* Generation number */
134 struct cam_periph *owner; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry *quirk; /* Oddities about this device */
136 /* Storage for the inquiry data */
b05e84c9
PA
137 cam_proto protocol;
138 u_int protocol_version;
139 cam_xport transport;
140 u_int transport_version;
b05e84c9 141 struct scsi_inquiry_data inq_data;
984263bc
MD
142 u_int8_t inq_flags; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
147 */
148 u_int8_t queue_flags; /* Queue flags from the control page */
149 u_int8_t serial_num_len;
b05e84c9 150 u_int8_t *serial_num;
984263bc
MD
151 u_int32_t qfrozen_cnt;
152 u_int32_t flags;
153#define CAM_DEV_UNCONFIGURED 0x01
154#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155#define CAM_DEV_REL_ON_COMPLETE 0x04
156#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158#define CAM_DEV_TAG_AFTER_COUNT 0x20
159#define CAM_DEV_INQUIRY_DATA_VALID 0x40
1c8b7a9a
PA
160#define CAM_DEV_IN_DV 0x80
161#define CAM_DEV_DV_HIT_BOTTOM 0x100
984263bc
MD
162 u_int32_t tag_delay_count;
163#define CAM_TAG_DELAY_COUNT 5
ad24965a 164 u_int32_t tag_saved_openings;
984263bc 165 u_int32_t refcount;
1c8b7a9a 166 struct callout callout;
984263bc
MD
167};
168
169/*
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
174 */
1c8b7a9a 175struct cam_et {
984263bc
MD
176 TAILQ_HEAD(, cam_ed) ed_entries;
177 TAILQ_ENTRY(cam_et) links;
1c8b7a9a 178 struct cam_eb *bus;
984263bc 179 target_id_t target_id;
1c8b7a9a 180 u_int32_t refcount;
984263bc 181 u_int generation;
88c4d2f6 182 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
183};
184
185/*
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
189 */
1c8b7a9a 190struct cam_eb {
984263bc
MD
191 TAILQ_HEAD(, cam_et) et_entries;
192 TAILQ_ENTRY(cam_eb) links;
193 path_id_t path_id;
194 struct cam_sim *sim;
88c4d2f6 195 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
196 u_int32_t flags;
197#define CAM_EB_RUNQ_SCHEDULED 0x01
198 u_int32_t refcount;
199 u_int generation;
200};
201
202struct cam_path {
203 struct cam_periph *periph;
204 struct cam_eb *bus;
205 struct cam_et *target;
206 struct cam_ed *device;
207};
208
209struct xpt_quirk_entry {
210 struct scsi_inquiry_pattern inq_pat;
211 u_int8_t quirks;
212#define CAM_QUIRK_NOLUNS 0x01
213#define CAM_QUIRK_NOSERIAL 0x02
214#define CAM_QUIRK_HILUNS 0x04
fb11ce58 215#define CAM_QUIRK_NOHILUNS 0x08
984263bc
MD
216 u_int mintags;
217 u_int maxtags;
218};
4cf33ad5
PA
219
220static int cam_srch_hi = 0;
221TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
222static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
223SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
224 sysctl_cam_search_luns, "I",
225 "allow search above LUN 7 for SCSI3 and greater devices");
226
984263bc 227#define CAM_SCSI2_MAXLUN 8
fb11ce58
PA
228/*
229 * If we're not quirked to search <= the first 8 luns
230 * and we are either quirked to search above lun 8,
4cf33ad5
PA
231 * or we're > SCSI-2 and we've enabled hilun searching,
232 * or we're > SCSI-2 and the last lun was a success,
233 * we can look for luns above lun 8.
fb11ce58 234 */
4cf33ad5
PA
235#define CAN_SRCH_HI_SPARSE(dv) \
236 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
237 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
238 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
239
240#define CAN_SRCH_HI_DENSE(dv) \
fb11ce58
PA
241 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
242 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
4cf33ad5 243 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
984263bc
MD
244
245typedef enum {
246 XPT_FLAG_OPEN = 0x01
247} xpt_flags;
248
249struct xpt_softc {
1c8b7a9a
PA
250 xpt_flags flags;
251 u_int32_t xpt_generation;
252
253 /* number of high powered commands that can go through right now */
254 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
255 int num_highpower;
256
257 /* queue for handling async rescan requests. */
689c9e50
MD
258 TAILQ_HEAD(, ccb_hdr) ccb_scanq;
259 int ccb_scanq_running;
1c8b7a9a
PA
260
261 /* Registered busses */
262 TAILQ_HEAD(,cam_eb) xpt_busses;
263 u_int bus_generation;
264
265 struct intr_config_hook *xpt_config_hook;
266
267 struct lock xpt_topo_lock;
268 struct lock xpt_lock;
984263bc
MD
269};
270
271static const char quantum[] = "QUANTUM";
272static const char sony[] = "SONY";
273static const char west_digital[] = "WDIGTL";
274static const char samsung[] = "SAMSUNG";
275static const char seagate[] = "SEAGATE";
276static const char microp[] = "MICROP";
277
1c8b7a9a 278static struct xpt_quirk_entry xpt_quirk_table[] =
984263bc
MD
279{
280 {
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /* Reports QUEUE FULL for temporary resource shortages */
287 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
288 /*quirks*/0, /*mintags*/24, /*maxtags*/32
289 },
290 {
291 /* Reports QUEUE FULL for temporary resource shortages */
292 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
293 /*quirks*/0, /*mintags*/24, /*maxtags*/32
294 },
295 {
296 /* Broken tagged queuing drive */
297 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
298 /*quirks*/0, /*mintags*/0, /*maxtags*/0
299 },
300 {
301 /* Broken tagged queuing drive */
302 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
303 /*quirks*/0, /*mintags*/0, /*maxtags*/0
304 },
305 {
306 /* Broken tagged queuing drive */
307 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
308 /*quirks*/0, /*mintags*/0, /*maxtags*/0
309 },
310 {
311 /*
312 * Unfortunately, the Quantum Atlas III has the same
313 * problem as the Atlas II drives above.
314 * Reported by: "Johan Granlund" <johan@granlund.nu>
315 *
316 * For future reference, the drive with the problem was:
317 * QUANTUM QM39100TD-SW N1B0
1c8b7a9a 318 *
984263bc
MD
319 * It's possible that Quantum will fix the problem in later
320 * firmware revisions. If that happens, the quirk entry
321 * will need to be made specific to the firmware revisions
322 * with the problem.
1c8b7a9a 323 *
984263bc
MD
324 */
325 /* Reports QUEUE FULL for temporary resource shortages */
326 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
327 /*quirks*/0, /*mintags*/24, /*maxtags*/32
328 },
329 {
330 /*
331 * 18 Gig Atlas III, same problem as the 9G version.
332 * Reported by: Andre Albsmeier
333 * <andre.albsmeier@mchp.siemens.de>
334 *
335 * For future reference, the drive with the problem was:
336 * QUANTUM QM318000TD-S N491
337 */
338 /* Reports QUEUE FULL for temporary resource shortages */
339 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
340 /*quirks*/0, /*mintags*/24, /*maxtags*/32
341 },
342 {
343 /*
344 * Broken tagged queuing drive
345 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
346 * and: Martin Renters <martin@tdc.on.ca>
347 */
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 /*
352 * The Seagate Medalist Pro drives have very poor write
353 * performance with anything more than 2 tags.
1c8b7a9a 354 *
984263bc
MD
355 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
356 * Drive: <SEAGATE ST36530N 1444>
357 *
358 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
359 * Drive: <SEAGATE ST34520W 1281>
360 *
361 * No one has actually reported that the 9G version
362 * (ST39140*) of the Medalist Pro has the same problem, but
363 * we're assuming that it does because the 4G and 6.5G
364 * versions of the drive are broken.
365 */
366 {
367 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
368 /*quirks*/0, /*mintags*/2, /*maxtags*/2
369 },
370 {
371 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
372 /*quirks*/0, /*mintags*/2, /*maxtags*/2
373 },
374 {
375 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
376 /*quirks*/0, /*mintags*/2, /*maxtags*/2
377 },
378 {
379 /*
380 * Slow when tagged queueing is enabled. Write performance
381 * steadily drops off with more and more concurrent
382 * transactions. Best sequential write performance with
383 * tagged queueing turned off and write caching turned on.
384 *
385 * PR: kern/10398
386 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
387 * Drive: DCAS-34330 w/ "S65A" firmware.
388 *
389 * The drive with the problem had the "S65A" firmware
390 * revision, and has also been reported (by Stephen J.
391 * Roznowski <sjr@home.net>) for a drive with the "S61A"
392 * firmware revision.
393 *
394 * Although no one has reported problems with the 2 gig
395 * version of the DCAS drive, the assumption is that it
396 * has the same problems as the 4 gig version. Therefore
397 * this quirk entries disables tagged queueing for all
398 * DCAS drives.
399 */
400 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
401 /*quirks*/0, /*mintags*/0, /*maxtags*/0
402 },
403 {
404 /* Broken tagged queuing drive */
405 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
406 /*quirks*/0, /*mintags*/0, /*maxtags*/0
407 },
408 {
1c8b7a9a 409 /* Broken tagged queuing drive */
984263bc
MD
410 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
411 /*quirks*/0, /*mintags*/0, /*maxtags*/0
412 },
413 {
d92d7552
PA
414 /* This does not support other than LUN 0 */
415 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
416 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
417 },
418 {
984263bc
MD
419 /*
420 * Broken tagged queuing drive.
421 * Submitted by:
422 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
423 * in PR kern/9535
424 */
425 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
426 /*quirks*/0, /*mintags*/0, /*maxtags*/0
427 },
428 {
429 /*
430 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * 8MB/sec.)
432 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
433 * Best performance with these drives is achieved with
434 * tagged queueing turned off, and write caching turned on.
435 */
436 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
437 /*quirks*/0, /*mintags*/0, /*maxtags*/0
438 },
439 {
440 /*
441 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * 8MB/sec.)
443 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
444 * Best performance with these drives is achieved with
445 * tagged queueing turned off, and write caching turned on.
446 */
447 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
448 /*quirks*/0, /*mintags*/0, /*maxtags*/0
449 },
450 {
451 /*
452 * Doesn't handle queue full condition correctly,
453 * so we need to limit maxtags to what the device
454 * can handle instead of determining this automatically.
455 */
456 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
457 /*quirks*/0, /*mintags*/2, /*maxtags*/32
458 },
459 {
460 /* Really only one LUN */
461 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
462 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
463 },
464 {
465 /* I can't believe we need a quirk for DPT volumes. */
466 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
1c8b7a9a 467 CAM_QUIRK_NOLUNS,
984263bc
MD
468 /*mintags*/0, /*maxtags*/255
469 },
470 {
471 /*
472 * Many Sony CDROM drives don't like multi-LUN probing.
473 */
474 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
475 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
476 },
477 {
478 /*
479 * This drive doesn't like multiple LUN probing.
480 * Submitted by: Parag Patel <parag@cgt.com>
481 */
482 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
484 },
485 {
486 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
487 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488 },
489 {
490 /*
491 * The 8200 doesn't like multi-lun probing, and probably
492 * don't like serial number requests either.
493 */
494 {
495 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
496 "EXB-8200*", "*"
497 },
1c8b7a9a 498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
984263bc
MD
499 },
500 {
501 /*
502 * Let's try the same as above, but for a drive that says
503 * it's an IPL-6860 but is actually an EXB 8200.
504 */
505 {
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
507 "IPL-6860*", "*"
508 },
1c8b7a9a 509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
984263bc
MD
510 },
511 {
512 /*
513 * These Hitachi drives don't like multi-lun probing.
514 * The PR submitter has a DK319H, but says that the Linux
515 * kernel has a similar work-around for the DK312 and DK314,
516 * so all DK31* drives are quirked here.
517 * PR: misc/18793
518 * Submitted by: Paul Haddad <paul@pth.com>
519 */
520 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
521 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
522 },
523 {
524 /*
d92d7552
PA
525 * The Hitachi CJ series with J8A8 firmware apparantly has
526 * problems with tagged commands.
527 * PR: 23536
528 * Reported by: amagai@nue.org
529 */
530 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
532 },
533 {
534 /*
535 * These are the large storage arrays.
536 * Submitted by: William Carrel <william.carrel@infospace.com>
537 */
538 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
539 CAM_QUIRK_HILUNS, 2, 1024
540 },
541 {
542 /*
984263bc
MD
543 * This old revision of the TDC3600 is also SCSI-1, and
544 * hangs upon serial number probing.
545 */
546 {
547 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
548 " TDC 3600", "U07:"
549 },
550 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
551 },
552 {
553 /*
554 * Would repond to all LUNs if asked for.
555 */
556 {
557 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
558 "CP150", "*"
559 },
560 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
561 },
562 {
563 /*
564 * Would repond to all LUNs if asked for.
565 */
566 {
567 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
568 "96X2*", "*"
569 },
570 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
571 },
572 {
573 /* Submitted by: Matthew Dodd <winter@jurai.net> */
574 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
575 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
576 },
577 {
578 /* Submitted by: Matthew Dodd <winter@jurai.net> */
579 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
580 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
581 },
582 {
583 /* TeraSolutions special settings for TRC-22 RAID */
584 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
585 /*quirks*/0, /*mintags*/55, /*maxtags*/255
586 },
587 {
588 /* Veritas Storage Appliance */
589 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
590 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
591 },
592 {
593 /*
594 * Would respond to all LUNs. Device type and removable
595 * flag are jumper-selectable.
596 */
597 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
598 "Tahiti 1", "*"
599 },
600 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
601 },
602 {
d92d7552
PA
603 /* EasyRAID E5A aka. areca ARC-6010 */
604 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
605 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
606 },
607 {
608 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
609 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
610 },
611 {
984263bc
MD
612 /* Default tagged queuing parameters for all devices */
613 {
614 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
615 /*vendor*/"*", /*product*/"*", /*revision*/"*"
616 },
617 /*quirks*/0, /*mintags*/2, /*maxtags*/255
618 },
619};
620
621static const int xpt_quirk_table_size =
622 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
623
624typedef enum {
625 DM_RET_COPY = 0x01,
626 DM_RET_FLAG_MASK = 0x0f,
627 DM_RET_NONE = 0x00,
628 DM_RET_STOP = 0x10,
629 DM_RET_DESCEND = 0x20,
630 DM_RET_ERROR = 0x30,
631 DM_RET_ACTION_MASK = 0xf0
632} dev_match_ret;
633
634typedef enum {
635 XPT_DEPTH_BUS,
636 XPT_DEPTH_TARGET,
637 XPT_DEPTH_DEVICE,
638 XPT_DEPTH_PERIPH
639} xpt_traverse_depth;
640
641struct xpt_traverse_config {
642 xpt_traverse_depth depth;
643 void *tr_func;
644 void *tr_arg;
645};
646
647typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
648typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
649typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
650typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
651typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
652
653/* Transport layer configuration information */
654static struct xpt_softc xsoftc;
655
656/* Queues for our software interrupt handler */
657typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
1c8b7a9a
PA
658typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
659static cam_simq_t cam_simq;
92cacebe 660static struct spinlock cam_simq_spin;
984263bc
MD
661
662struct cam_periph *xpt_periph;
663
664static periph_init_t xpt_periph_init;
665
666static periph_init_t probe_periph_init;
667
668static struct periph_driver xpt_driver =
669{
670 xpt_periph_init, "xpt",
671 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
672};
673
674static struct periph_driver probe_driver =
675{
676 probe_periph_init, "probe",
677 TAILQ_HEAD_INITIALIZER(probe_driver.units)
678};
679
2ad14cb5
PA
680PERIPHDRIVER_DECLARE(xpt, xpt_driver);
681PERIPHDRIVER_DECLARE(probe, probe_driver);
984263bc
MD
682
683#define XPT_CDEV_MAJOR 104
684
685static d_open_t xptopen;
686static d_close_t xptclose;
687static d_ioctl_t xptioctl;
688
fef8985e
MD
689static struct dev_ops xpt_ops = {
690 { "xpt", XPT_CDEV_MAJOR, 0 },
691 .d_open = xptopen,
692 .d_close = xptclose,
693 .d_ioctl = xptioctl
984263bc
MD
694};
695
c8f7fab0
PA
696static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
697static void dead_sim_poll(struct cam_sim *sim);
698
699/* Dummy SIM that is used when the real one has gone. */
2d19cdd3
MD
700static struct cam_sim cam_dead_sim;
701static struct lock cam_dead_lock;
c8f7fab0 702
984263bc
MD
703/* Storage for debugging datastructures */
704#ifdef CAMDEBUG
705struct cam_path *cam_dpath;
706u_int32_t cam_dflags;
707u_int32_t cam_debug_delay;
708#endif
709
710#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
711#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
712#endif
713
714/*
715 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
716 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
717 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
718 */
719#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
720 || defined(CAM_DEBUG_LUN)
721#ifdef CAMDEBUG
722#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
723 || !defined(CAM_DEBUG_LUN)
724#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
725 and CAM_DEBUG_LUN"
726#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
727#else /* !CAMDEBUG */
728#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
729#endif /* CAMDEBUG */
730#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
731
732/* Our boot-time initialization hook */
2263bab8
PA
733static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
734
735static moduledata_t cam_moduledata = {
736 "cam",
737 cam_module_event_handler,
738 NULL
739};
740
1c8b7a9a 741static int xpt_init(void *);
2263bab8
PA
742
743DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
744MODULE_VERSION(cam, 1);
745
984263bc
MD
746
747static cam_status xpt_compile_path(struct cam_path *new_path,
748 struct cam_periph *perph,
749 path_id_t path_id,
750 target_id_t target_id,
751 lun_id_t lun_id);
752
753static void xpt_release_path(struct cam_path *path);
754
755static void xpt_async_bcast(struct async_list *async_head,
756 u_int32_t async_code,
757 struct cam_path *path,
758 void *async_arg);
759static void xpt_dev_async(u_int32_t async_code,
760 struct cam_eb *bus,
761 struct cam_et *target,
762 struct cam_ed *device,
763 void *async_arg);
764static path_id_t xptnextfreepathid(void);
765static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
766static union ccb *xpt_get_ccb(struct cam_ed *device);
767static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
768 u_int32_t new_priority);
769static void xpt_run_dev_allocq(struct cam_eb *bus);
770static void xpt_run_dev_sendq(struct cam_eb *bus);
771static timeout_t xpt_release_devq_timeout;
984263bc
MD
772static void xpt_release_bus(struct cam_eb *bus);
773static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
774 int run_queue);
775static struct cam_et*
776 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
777static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
778static struct cam_ed*
779 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
780 lun_id_t lun_id);
781static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
782 struct cam_ed *device);
783static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
784static struct cam_eb*
785 xpt_find_bus(path_id_t path_id);
786static struct cam_et*
787 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
788static struct cam_ed*
789 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
790static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
791static void xpt_scan_lun(struct cam_periph *periph,
792 struct cam_path *path, cam_flags flags,
793 union ccb *ccb);
794static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
795static xpt_busfunc_t xptconfigbuscountfunc;
796static xpt_busfunc_t xptconfigfunc;
797static void xpt_config(void *arg);
798static xpt_devicefunc_t xptpassannouncefunc;
799static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
800static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
801static void xptpoll(struct cam_sim *sim);
ef0fdad1 802static inthand2_t swi_cambio;
1c8b7a9a 803static void camisr(void *);
92cacebe 804static void camisr_runqueue(struct cam_sim *);
984263bc 805static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
b05e84c9 806 u_int num_patterns, struct cam_eb *bus);
984263bc 807static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
b05e84c9
PA
808 u_int num_patterns,
809 struct cam_ed *device);
984263bc 810static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
b05e84c9 811 u_int num_patterns,
984263bc
MD
812 struct cam_periph *periph);
813static xpt_busfunc_t xptedtbusfunc;
814static xpt_targetfunc_t xptedttargetfunc;
815static xpt_devicefunc_t xptedtdevicefunc;
816static xpt_periphfunc_t xptedtperiphfunc;
817static xpt_pdrvfunc_t xptplistpdrvfunc;
818static xpt_periphfunc_t xptplistperiphfunc;
819static int xptedtmatch(struct ccb_dev_match *cdm);
820static int xptperiphlistmatch(struct ccb_dev_match *cdm);
821static int xptbustraverse(struct cam_eb *start_bus,
822 xpt_busfunc_t *tr_func, void *arg);
823static int xpttargettraverse(struct cam_eb *bus,
824 struct cam_et *start_target,
825 xpt_targetfunc_t *tr_func, void *arg);
826static int xptdevicetraverse(struct cam_et *target,
827 struct cam_ed *start_device,
828 xpt_devicefunc_t *tr_func, void *arg);
829static int xptperiphtraverse(struct cam_ed *device,
830 struct cam_periph *start_periph,
831 xpt_periphfunc_t *tr_func, void *arg);
832static int xptpdrvtraverse(struct periph_driver **start_pdrv,
833 xpt_pdrvfunc_t *tr_func, void *arg);
834static int xptpdperiphtraverse(struct periph_driver **pdrv,
835 struct cam_periph *start_periph,
836 xpt_periphfunc_t *tr_func,
837 void *arg);
838static xpt_busfunc_t xptdefbusfunc;
839static xpt_targetfunc_t xptdeftargetfunc;
840static xpt_devicefunc_t xptdefdevicefunc;
841static xpt_periphfunc_t xptdefperiphfunc;
842static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
984263bc
MD
843static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
844 void *arg);
984263bc
MD
845static xpt_devicefunc_t xptsetasyncfunc;
846static xpt_busfunc_t xptsetasyncbusfunc;
847static cam_status xptregister(struct cam_periph *periph,
848 void *arg);
849static cam_status proberegister(struct cam_periph *periph,
850 void *arg);
851static void probeschedule(struct cam_periph *probe_periph);
852static void probestart(struct cam_periph *periph, union ccb *start_ccb);
853static void proberequestdefaultnegotiation(struct cam_periph *periph);
1c8b7a9a
PA
854static int proberequestbackoff(struct cam_periph *periph,
855 struct cam_ed *device);
984263bc
MD
856static void probedone(struct cam_periph *periph, union ccb *done_ccb);
857static void probecleanup(struct cam_periph *periph);
858static void xpt_find_quirk(struct cam_ed *device);
b05e84c9 859static void xpt_devise_transport(struct cam_path *path);
984263bc
MD
860static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
861 struct cam_ed *device,
862 int async_update);
863static void xpt_toggle_tags(struct cam_path *path);
864static void xpt_start_tags(struct cam_path *path);
865static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
866 struct cam_ed *dev);
867static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
868 struct cam_ed *dev);
869static __inline int periph_is_queued(struct cam_periph *periph);
870static __inline int device_is_alloc_queued(struct cam_ed *device);
871static __inline int device_is_send_queued(struct cam_ed *device);
872static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
873
874static __inline int
875xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
876{
877 int retval;
878
e8876f9e 879 if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
984263bc
MD
880 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
881 cam_ccbq_resize(&dev->ccbq,
882 dev->ccbq.dev_openings
883 + dev->ccbq.dev_active);
884 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
885 }
886 /*
887 * The priority of a device waiting for CCB resources
888 * is that of the the highest priority peripheral driver
889 * enqueued.
890 */
891 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
892 &dev->alloc_ccb_entry.pinfo,
1c8b7a9a 893 CAMQ_GET_HEAD(&dev->drvq)->priority);
984263bc
MD
894 } else {
895 retval = 0;
896 }
897
898 return (retval);
899}
900
901static __inline int
902xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
903{
904 int retval;
905
e8876f9e 906 if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
984263bc
MD
907 /*
908 * The priority of a device waiting for controller
909 * resources is that of the the highest priority CCB
910 * enqueued.
911 */
912 retval =
913 xpt_schedule_dev(&bus->sim->devq->send_queue,
914 &dev->send_ccb_entry.pinfo,
915 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
916 } else {
917 retval = 0;
918 }
919 return (retval);
920}
921
922static __inline int
923periph_is_queued(struct cam_periph *periph)
924{
925 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
926}
927
928static __inline int
929device_is_alloc_queued(struct cam_ed *device)
930{
931 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
932}
933
934static __inline int
935device_is_send_queued(struct cam_ed *device)
936{
937 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
938}
939
940static __inline int
941dev_allocq_is_runnable(struct cam_devq *devq)
942{
943 /*
944 * Have work to do.
945 * Have space to do more work.
946 * Allowed to do work.
947 */
948 return ((devq->alloc_queue.qfrozen_cnt == 0)
949 && (devq->alloc_queue.entries > 0)
950 && (devq->alloc_openings > 0));
951}
952
953static void
0e224b5d 954xpt_periph_init(void)
984263bc 955{
fef8985e 956 make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
984263bc
MD
957}
958
959static void
0e224b5d 960probe_periph_init(void)
984263bc
MD
961{
962}
963
964
965static void
966xptdone(struct cam_periph *periph, union ccb *done_ccb)
967{
968 /* Caller will release the CCB */
969 wakeup(&done_ccb->ccb_h.cbfcnp);
970}
971
972static int
fef8985e 973xptopen(struct dev_open_args *ap)
984263bc 974{
b13267a5 975 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
976
977 /*
978 * Only allow read-write access.
979 */
fef8985e 980 if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
984263bc
MD
981 return(EPERM);
982
983 /*
984 * We don't allow nonblocking access.
985 */
fef8985e 986 if ((ap->a_oflags & O_NONBLOCK) != 0) {
1c8b7a9a 987 kprintf("%s: can't do nonblocking access\n", devtoname(dev));
984263bc
MD
988 return(ENODEV);
989 }
990
984263bc 991 /* Mark ourselves open */
1c8b7a9a 992 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
984263bc 993 xsoftc.flags |= XPT_FLAG_OPEN;
1c8b7a9a
PA
994 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
995
984263bc
MD
996 return(0);
997}
998
999static int
fef8985e 1000xptclose(struct dev_close_args *ap)
984263bc 1001{
984263bc
MD
1002
1003 /* Mark ourselves closed */
1c8b7a9a 1004 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
984263bc 1005 xsoftc.flags &= ~XPT_FLAG_OPEN;
1c8b7a9a 1006 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
984263bc
MD
1007
1008 return(0);
1009}
1010
1c8b7a9a
PA
1011/*
1012 * Don't automatically grab the xpt softc lock here even though this is going
1013 * through the xpt device. The xpt device is really just a back door for
1014 * accessing other devices and SIMs, so the right thing to do is to grab
1015 * the appropriate SIM lock once the bus/SIM is located.
1016 */
984263bc 1017static int
fef8985e 1018xptioctl(struct dev_ioctl_args *ap)
984263bc 1019{
1c8b7a9a 1020 int error;
984263bc
MD
1021
1022 error = 0;
984263bc 1023
fef8985e 1024 switch(ap->a_cmd) {
984263bc
MD
1025 /*
1026 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1027 * to accept CCB types that don't quite make sense to send through a
1028 * passthrough driver.
1029 */
1030 case CAMIOCOMMAND: {
1031 union ccb *ccb;
1032 union ccb *inccb;
1c8b7a9a 1033 struct cam_eb *bus;
984263bc 1034
fef8985e 1035 inccb = (union ccb *)ap->a_data;
984263bc 1036
1c8b7a9a
PA
1037 bus = xpt_find_bus(inccb->ccb_h.path_id);
1038 if (bus == NULL) {
1039 error = EINVAL;
1040 break;
1041 }
1042
984263bc
MD
1043 switch(inccb->ccb_h.func_code) {
1044 case XPT_SCAN_BUS:
1045 case XPT_RESET_BUS:
1046 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1047 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1048 error = EINVAL;
1049 break;
1050 }
1051 /* FALLTHROUGH */
1052 case XPT_PATH_INQ:
1053 case XPT_ENG_INQ:
1054 case XPT_SCAN_LUN:
1055
1056 ccb = xpt_alloc_ccb();
1057
1c8b7a9a
PA
1058 CAM_SIM_LOCK(bus->sim);
1059
984263bc
MD
1060 /*
1061 * Create a path using the bus, target, and lun the
1062 * user passed in.
1063 */
1064 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1065 inccb->ccb_h.path_id,
1066 inccb->ccb_h.target_id,
1067 inccb->ccb_h.target_lun) !=
1068 CAM_REQ_CMP){
1069 error = EINVAL;
1c8b7a9a 1070 CAM_SIM_UNLOCK(bus->sim);
984263bc
MD
1071 xpt_free_ccb(ccb);
1072 break;
1073 }
1074 /* Ensure all of our fields are correct */
1075 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1076 inccb->ccb_h.pinfo.priority);
1077 xpt_merge_ccb(ccb, inccb);
1078 ccb->ccb_h.cbfcnp = xptdone;
1079 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1080 bcopy(ccb, inccb, sizeof(union ccb));
1081 xpt_free_path(ccb->ccb_h.path);
1082 xpt_free_ccb(ccb);
1c8b7a9a 1083 CAM_SIM_UNLOCK(bus->sim);
984263bc
MD
1084 break;
1085
1086 case XPT_DEBUG: {
1087 union ccb ccb;
1088
1089 /*
1090 * This is an immediate CCB, so it's okay to
1091 * allocate it on the stack.
1092 */
1093
1c8b7a9a
PA
1094 CAM_SIM_LOCK(bus->sim);
1095
984263bc
MD
1096 /*
1097 * Create a path using the bus, target, and lun the
1098 * user passed in.
1099 */
1100 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1101 inccb->ccb_h.path_id,
1102 inccb->ccb_h.target_id,
1103 inccb->ccb_h.target_lun) !=
1104 CAM_REQ_CMP){
1105 error = EINVAL;
1c8b7a9a 1106 CAM_SIM_UNLOCK(bus->sim);
984263bc
MD
1107 break;
1108 }
1109 /* Ensure all of our fields are correct */
1110 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1111 inccb->ccb_h.pinfo.priority);
1112 xpt_merge_ccb(&ccb, inccb);
1113 ccb.ccb_h.cbfcnp = xptdone;
1114 xpt_action(&ccb);
1c8b7a9a 1115 CAM_SIM_UNLOCK(bus->sim);
984263bc
MD
1116 bcopy(&ccb, inccb, sizeof(union ccb));
1117 xpt_free_path(ccb.ccb_h.path);
1118 break;
1119
1120 }
1121 case XPT_DEV_MATCH: {
1122 struct cam_periph_map_info mapinfo;
1123 struct cam_path *old_path;
1124
1125 /*
1126 * We can't deal with physical addresses for this
1127 * type of transaction.
1128 */
1129 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1130 error = EINVAL;
1131 break;
1132 }
1133
1134 /*
1135 * Save this in case the caller had it set to
1136 * something in particular.
1137 */
1138 old_path = inccb->ccb_h.path;
1139
1140 /*
1141 * We really don't need a path for the matching
1142 * code. The path is needed because of the
1143 * debugging statements in xpt_action(). They
1144 * assume that the CCB has a valid path.
1145 */
1146 inccb->ccb_h.path = xpt_periph->path;
1147
1148 bzero(&mapinfo, sizeof(mapinfo));
1149
1150 /*
1151 * Map the pattern and match buffers into kernel
1152 * virtual address space.
1153 */
1154 error = cam_periph_mapmem(inccb, &mapinfo);
1155
1156 if (error) {
1157 inccb->ccb_h.path = old_path;
1158 break;
1159 }
1160
1161 /*
1162 * This is an immediate CCB, we can send it on directly.
1163 */
1164 xpt_action(inccb);
1165
1166 /*
1167 * Map the buffers back into user space.
1168 */
1169 cam_periph_unmapmem(inccb, &mapinfo);
1170
1171 inccb->ccb_h.path = old_path;
1172
1173 error = 0;
1174 break;
1175 }
1176 default:
1177 error = ENOTSUP;
1178 break;
1179 }
1c8b7a9a 1180 xpt_release_bus(bus);
984263bc
MD
1181 break;
1182 }
1183 /*
1184 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1185 * with the periphal driver name and unit name filled in. The other
1186 * fields don't really matter as input. The passthrough driver name
1187 * ("pass"), and unit number are passed back in the ccb. The current
1188 * device generation number, and the index into the device peripheral
1189 * driver list, and the status are also passed back. Note that
1190 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1191 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1192 * (or rather should be) impossible for the device peripheral driver
1193 * list to change since we look at the whole thing in one pass, and
1c8b7a9a
PA
1194 * we do it with lock protection.
1195 *
984263bc
MD
1196 */
1197 case CAMGETPASSTHRU: {
1198 union ccb *ccb;
1199 struct cam_periph *periph;
1200 struct periph_driver **p_drv;
1201 char *name;
b05e84c9
PA
1202 u_int unit;
1203 u_int cur_generation;
984263bc
MD
1204 int base_periph_found;
1205 int splbreaknum;
984263bc 1206
fef8985e 1207 ccb = (union ccb *)ap->a_data;
984263bc
MD
1208 unit = ccb->cgdl.unit_number;
1209 name = ccb->cgdl.periph_name;
1210 /*
1c8b7a9a
PA
1211 * Every 100 devices, we want to drop our lock protection to
1212 * give the software interrupt handler a chance to run.
984263bc
MD
1213 * Most systems won't run into this check, but this should
1214 * avoid starvation in the software interrupt handler in
1215 * large systems.
1216 */
1217 splbreaknum = 100;
1218
fef8985e 1219 ccb = (union ccb *)ap->a_data;
984263bc
MD
1220
1221 base_periph_found = 0;
1222
1223 /*
1224 * Sanity check -- make sure we don't get a null peripheral
1225 * driver name.
1226 */
1227 if (*ccb->cgdl.periph_name == '\0') {
1228 error = EINVAL;
1229 break;
1230 }
1231
1232 /* Keep the list from changing while we traverse it */
1c8b7a9a 1233 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
984263bc 1234ptstartover:
1c8b7a9a 1235 cur_generation = xsoftc.xpt_generation;
984263bc
MD
1236
1237 /* first find our driver in the list of drivers */
2ad14cb5 1238 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
984263bc
MD
1239 if (strcmp((*p_drv)->driver_name, name) == 0)
1240 break;
dc62b251 1241 }
984263bc
MD
1242
1243 if (*p_drv == NULL) {
1c8b7a9a 1244 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
984263bc
MD
1245 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1246 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1247 *ccb->cgdl.periph_name = '\0';
1248 ccb->cgdl.unit_number = 0;
1249 error = ENOENT;
1250 break;
1c8b7a9a 1251 }
984263bc
MD
1252
1253 /*
1254 * Run through every peripheral instance of this driver
1255 * and check to see whether it matches the unit passed
1256 * in by the user. If it does, get out of the loops and
1257 * find the passthrough driver associated with that
1258 * peripheral driver.
1259 */
cbe8f7dc 1260 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
984263bc
MD
1261
1262 if (periph->unit_number == unit) {
1263 break;
1264 } else if (--splbreaknum == 0) {
1c8b7a9a
PA
1265 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1266 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
984263bc 1267 splbreaknum = 100;
1c8b7a9a 1268 if (cur_generation != xsoftc.xpt_generation)
984263bc
MD
1269 goto ptstartover;
1270 }
1271 }
1272 /*
1273 * If we found the peripheral driver that the user passed
1274 * in, go through all of the peripheral drivers for that
1275 * particular device and look for a passthrough driver.
1276 */
1277 if (periph != NULL) {
1278 struct cam_ed *device;
1279 int i;
1280
1281 base_periph_found = 1;
1282 device = periph->path->device;
cbe8f7dc 1283 for (i = 0, periph = SLIST_FIRST(&device->periphs);
984263bc 1284 periph != NULL;
cbe8f7dc 1285 periph = SLIST_NEXT(periph, periph_links), i++) {
984263bc
MD
1286 /*
1287 * Check to see whether we have a
1c8b7a9a 1288 * passthrough device or not.
984263bc
MD
1289 */
1290 if (strcmp(periph->periph_name, "pass") == 0) {
1291 /*
1292 * Fill in the getdevlist fields.
1293 */
1294 strcpy(ccb->cgdl.periph_name,
1295 periph->periph_name);
1296 ccb->cgdl.unit_number =
1297 periph->unit_number;
cbe8f7dc 1298 if (SLIST_NEXT(periph, periph_links))
984263bc
MD
1299 ccb->cgdl.status =
1300 CAM_GDEVLIST_MORE_DEVS;
1301 else
1302 ccb->cgdl.status =
1303 CAM_GDEVLIST_LAST_DEVICE;
1304 ccb->cgdl.generation =
1305 device->generation;
1306 ccb->cgdl.index = i;
1307 /*
1308 * Fill in some CCB header fields
1309 * that the user may want.
1310 */
1311 ccb->ccb_h.path_id =
1312 periph->path->bus->path_id;
1313 ccb->ccb_h.target_id =
1314 periph->path->target->target_id;
1315 ccb->ccb_h.target_lun =
1316 periph->path->device->lun_id;
1317 ccb->ccb_h.status = CAM_REQ_CMP;
1318 break;
1319 }
1320 }
1321 }
1322
1323 /*
1324 * If the periph is null here, one of two things has
1325 * happened. The first possibility is that we couldn't
1326 * find the unit number of the particular peripheral driver
1327 * that the user is asking about. e.g. the user asks for
1328 * the passthrough driver for "da11". We find the list of
1329 * "da" peripherals all right, but there is no unit 11.
1330 * The other possibility is that we went through the list
1331 * of peripheral drivers attached to the device structure,
1332 * but didn't find one with the name "pass". Either way,
1333 * we return ENOENT, since we couldn't find something.
1334 */
1335 if (periph == NULL) {
1336 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1337 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1338 *ccb->cgdl.periph_name = '\0';
1339 ccb->cgdl.unit_number = 0;
1340 error = ENOENT;
1341 /*
1342 * It is unfortunate that this is even necessary,
1343 * but there are many, many clueless users out there.
1344 * If this is true, the user is looking for the
1345 * passthrough driver, but doesn't have one in his
1346 * kernel.
1347 */
1348 if (base_periph_found == 1) {
85f8e2ea 1349 kprintf("xptioctl: pass driver is not in the "
984263bc 1350 "kernel\n");
1c8b7a9a 1351 kprintf("xptioctl: put \"device pass\" in "
984263bc
MD
1352 "your kernel config file\n");
1353 }
1354 }
1c8b7a9a 1355 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
984263bc
MD
1356 break;
1357 }
1358 default:
1359 error = ENOTTY;
1360 break;
1361 }
1362
1363 return(error);
1364}
1365
2263bab8
PA
1366static int
1367cam_module_event_handler(module_t mod, int what, void *arg)
1368{
1c8b7a9a
PA
1369 int error;
1370
1371 switch (what) {
1372 case MOD_LOAD:
1373 if ((error = xpt_init(NULL)) != 0)
1374 return (error);
1375 break;
1376 case MOD_UNLOAD:
2263bab8 1377 return EBUSY;
1c8b7a9a 1378 default:
9356d588 1379 return EOPNOTSUPP;
2263bab8
PA
1380 }
1381
1382 return 0;
1383}
1384
689c9e50
MD
1385/*
1386 * Thread to handle asynchronous main-context requests.
1387 *
1388 * This function is typically used by drivers to perform complex actions
1389 * such as bus scans and engineering requests in a main context instead
1390 * of an interrupt context.
1391 */
984263bc 1392static void
1c8b7a9a
PA
1393xpt_scanner_thread(void *dummy)
1394{
1c8b7a9a 1395 union ccb *ccb;
689c9e50 1396#if 0
1c8b7a9a 1397 struct cam_sim *sim;
689c9e50 1398#endif
1c8b7a9a
PA
1399
1400 for (;;) {
1c8b7a9a 1401 xpt_lock_buses();
689c9e50
MD
1402 xsoftc.ccb_scanq_running = 1;
1403 while ((ccb = (void *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
1404 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h,
1405 sim_links.tqe);
1406 xpt_unlock_buses();
1407#if 0
1c8b7a9a
PA
1408 sim = ccb->ccb_h.path->bus->sim;
1409 CAM_SIM_LOCK(sim);
689c9e50
MD
1410#endif
1411 xpt_action(ccb);
1412#if 0
1c8b7a9a 1413 CAM_SIM_UNLOCK(sim);
689c9e50
MD
1414 xpt_lock_buses();
1415#endif
1c8b7a9a 1416 }
689c9e50 1417 xsoftc.ccb_scanq_running = 0;
ae8e83e6 1418 tsleep_interlock(&xsoftc.ccb_scanq, 0);
689c9e50 1419 xpt_unlock_buses();
d9345d3a 1420 tsleep(&xsoftc.ccb_scanq, PINTERLOCKED, "ccb_scanq", 0);
1c8b7a9a
PA
1421 }
1422}
1423
689c9e50
MD
1424/*
1425 * Issue an asynchronous asction
1426 */
1c8b7a9a 1427void
689c9e50 1428xpt_action_async(union ccb *ccb)
1c8b7a9a 1429{
1c8b7a9a 1430 xpt_lock_buses();
1c8b7a9a 1431 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
689c9e50
MD
1432 if (xsoftc.ccb_scanq_running == 0) {
1433 xsoftc.ccb_scanq_running = 1;
1434 wakeup(&xsoftc.ccb_scanq);
1435 }
1c8b7a9a
PA
1436 xpt_unlock_buses();
1437}
1438
1439
1440/* Functions accessed by the peripheral drivers */
1441static int
0e224b5d 1442xpt_init(void *dummy)
984263bc
MD
1443{
1444 struct cam_sim *xpt_sim;
1445 struct cam_path *path;
1446 struct cam_devq *devq;
1447 cam_status status;
1448
1c8b7a9a
PA
1449 TAILQ_INIT(&xsoftc.xpt_busses);
1450 TAILQ_INIT(&cam_simq);
1451 TAILQ_INIT(&xsoftc.ccb_scanq);
1452 STAILQ_INIT(&xsoftc.highpowerq);
1453 xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1454
92cacebe 1455 spin_init(&cam_simq_spin);
1c8b7a9a
PA
1456 lockinit(&xsoftc.xpt_lock, "XPT lock", 0, LK_CANRECURSE);
1457 lockinit(&xsoftc.xpt_topo_lock, "XPT topology lock", 0, LK_CANRECURSE);
984263bc 1458
2d19cdd3
MD
1459 SLIST_INIT(&cam_dead_sim.ccb_freeq);
1460 TAILQ_INIT(&cam_dead_sim.sim_doneq);
1461 spin_init(&cam_dead_sim.sim_spin);
1462 cam_dead_sim.sim_action = dead_sim_action;
1463 cam_dead_sim.sim_poll = dead_sim_poll;
1464 cam_dead_sim.sim_name = "dead_sim";
1465 cam_dead_sim.lock = &cam_dead_lock;
1466 lockinit(&cam_dead_lock, "XPT dead_sim lock", 0, LK_CANRECURSE);
1467 cam_dead_sim.flags |= CAM_SIM_DEREGISTERED;
1468
984263bc
MD
1469 /*
1470 * The xpt layer is, itself, the equivelent of a SIM.
1471 * Allow 16 ccbs in the ccb pool for it. This should
1472 * give decent parallelism when we probe busses and
1473 * perform other XPT functions.
1474 */
1475 devq = cam_simq_alloc(16);
1476 xpt_sim = cam_sim_alloc(xptaction,
1477 xptpoll,
1478 "xpt",
1479 /*softc*/NULL,
1480 /*unit*/0,
1c8b7a9a 1481 /*lock*/&xsoftc.xpt_lock,
984263bc
MD
1482 /*max_dev_transactions*/0,
1483 /*max_tagged_dev_transactions*/0,
1484 devq);
3aed1355 1485 cam_simq_release(devq);
1c8b7a9a
PA
1486 if (xpt_sim == NULL)
1487 return (ENOMEM);
1488
1489 xpt_sim->max_ccbs = 16;
1490
1491 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1492 if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1493 kprintf("xpt_init: xpt_bus_register failed with status %#x,"
1494 " failing attach\n", status);
1495 return (EINVAL);
1496 }
984263bc
MD
1497
1498 /*
1499 * Looking at the XPT from the SIM layer, the XPT is
1500 * the equivelent of a peripheral driver. Allocate
1501 * a peripheral driver entry for us.
1502 */
1503 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1504 CAM_TARGET_WILDCARD,
1505 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
85f8e2ea 1506 kprintf("xpt_init: xpt_create_path failed with status %#x,"
984263bc 1507 " failing attach\n", status);
1c8b7a9a 1508 return (EINVAL);
984263bc
MD
1509 }
1510
1511 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1c8b7a9a 1512 path, NULL, 0, xpt_sim);
984263bc
MD
1513 xpt_free_path(path);
1514
1c8b7a9a 1515 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
984263bc
MD
1516
1517 /*
1518 * Register a callback for when interrupts are enabled.
1519 */
1c8b7a9a
PA
1520 xsoftc.xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1521 M_CAMXPT, M_INTWAIT | M_ZERO);
1522 xsoftc.xpt_config_hook->ich_func = xpt_config;
1523 xsoftc.xpt_config_hook->ich_desc = "xpt";
1524 xsoftc.xpt_config_hook->ich_order = 1000;
1525 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1526 kfree (xsoftc.xpt_config_hook, M_CAMXPT);
85f8e2ea 1527 kprintf("xpt_init: config_intrhook_establish failed "
984263bc
MD
1528 "- failing attach\n");
1529 }
1530
1c8b7a9a
PA
1531 /* fire up rescan thread */
1532 if (kthread_create(xpt_scanner_thread, NULL, NULL, "xpt_thrd")) {
1533 kprintf("xpt_init: failed to create rescan thread\n");
1534 }
984263bc 1535 /* Install our software interrupt handlers */
477d3c1c 1536 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1c8b7a9a
PA
1537
1538 return (0);
984263bc
MD
1539}
1540
1541static cam_status
1542xptregister(struct cam_periph *periph, void *arg)
1543{
1c8b7a9a
PA
1544 struct cam_sim *xpt_sim;
1545
984263bc 1546 if (periph == NULL) {
85f8e2ea 1547 kprintf("xptregister: periph was NULL!!\n");
984263bc
MD
1548 return(CAM_REQ_CMP_ERR);
1549 }
1550
1c8b7a9a
PA
1551 xpt_sim = (struct cam_sim *)arg;
1552 xpt_sim->softc = periph;
984263bc 1553 xpt_periph = periph;
1c8b7a9a 1554 periph->softc = NULL;
984263bc
MD
1555
1556 return(CAM_REQ_CMP);
1557}
1558
1559int32_t
1560xpt_add_periph(struct cam_periph *periph)
1561{
1562 struct cam_ed *device;
1563 int32_t status;
1564 struct periph_list *periph_head;
1565
1c8b7a9a
PA
1566 sim_lock_assert_owned(periph->sim->lock);
1567
984263bc
MD
1568 device = periph->path->device;
1569
1570 periph_head = &device->periphs;
1571
1572 status = CAM_REQ_CMP;
1573
1574 if (device != NULL) {
984263bc
MD
1575 /*
1576 * Make room for this peripheral
1577 * so it will fit in the queue
1578 * when it's scheduled to run
1579 */
984263bc
MD
1580 status = camq_resize(&device->drvq,
1581 device->drvq.array_size + 1);
1582
1583 device->generation++;
1584
1585 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
984263bc
MD
1586 }
1587
1c8b7a9a
PA
1588 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1589 xsoftc.xpt_generation++;
1590 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
984263bc
MD
1591
1592 return (status);
1593}
1594
1595void
1596xpt_remove_periph(struct cam_periph *periph)
1597{
1598 struct cam_ed *device;
1599
1c8b7a9a
PA
1600 sim_lock_assert_owned(periph->sim->lock);
1601
984263bc
MD
1602 device = periph->path->device;
1603
1604 if (device != NULL) {
984263bc
MD
1605 struct periph_list *periph_head;
1606
1607 periph_head = &device->periphs;
1c8b7a9a 1608
984263bc 1609 /* Release the slot for this peripheral */
984263bc
MD
1610 camq_resize(&device->drvq, device->drvq.array_size - 1);
1611
1612 device->generation++;
1613
1614 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
984263bc
MD
1615 }
1616
1c8b7a9a
PA
1617 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1618 xsoftc.xpt_generation++;
1619 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
984263bc
MD
1620}
1621
b05e84c9
PA
1622void
1623xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1624{
1625 struct ccb_pathinq cpi;
1626 struct ccb_trans_settings cts;
1627 struct cam_path *path;
1628 u_int speed;
1629 u_int freq;
1630 u_int mb;
1631
1c8b7a9a
PA
1632 sim_lock_assert_owned(periph->sim->lock);
1633
b05e84c9 1634 path = periph->path;
a0ee42c5
MD
1635
1636 /* Report basic attachment and inquiry data */
e959ab2f 1637 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
b05e84c9
PA
1638 periph->periph_name, periph->unit_number,
1639 path->bus->sim->sim_name,
1640 path->bus->sim->unit_number,
1641 path->bus->sim->bus_id,
1642 path->target->target_id,
1643 path->device->lun_id);
e959ab2f 1644 kprintf("%s%d: ", periph->periph_name, periph->unit_number);
b05e84c9 1645 scsi_print_inquiry(&path->device->inq_data);
a0ee42c5
MD
1646
1647 /* Report serial number */
1648 if (path->device->serial_num_len > 0) {
b05e84c9 1649 /* Don't wrap the screen - print only the first 60 chars */
e959ab2f 1650 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
b05e84c9
PA
1651 periph->unit_number, path->device->serial_num);
1652 }
a0ee42c5
MD
1653
1654 /* Acquire and report transfer speed */
b05e84c9
PA
1655 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1656 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1657 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1658 xpt_action((union ccb*)&cts);
3be8ef24
PA
1659 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1660 return;
1661 }
b05e84c9
PA
1662
1663 /* Ask the SIM for its base transfer speed */
1664 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1665 cpi.ccb_h.func_code = XPT_PATH_INQ;
1666 xpt_action((union ccb *)&cpi);
1667
1668 speed = cpi.base_transfer_speed;
1669 freq = 0;
8eb7f593 1670 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
b05e84c9
PA
1671 struct ccb_trans_settings_spi *spi;
1672
1673 spi = &cts.xport_specific.spi;
1674 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1675 && spi->sync_offset != 0) {
1676 freq = scsi_calc_syncsrate(spi->sync_period);
1677 speed = freq;
1678 }
1679
1680 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1681 speed *= (0x01 << spi->bus_width);
1682 }
8eb7f593
PA
1683 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1684 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1685 if (fc->valid & CTS_FC_VALID_SPEED) {
1686 speed = fc->bitrate;
1687 }
21015567 1688 }
b05e84c9 1689
2306276c
PA
1690 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1691 struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1692 if (sas->valid & CTS_SAS_VALID_SPEED) {
1693 speed = sas->bitrate;
1694 }
1695 }
1696
b05e84c9
PA
1697 mb = speed / 1000;
1698 if (mb > 0)
e959ab2f 1699 kprintf("%s%d: %d.%03dMB/s transfers",
b05e84c9
PA
1700 periph->periph_name, periph->unit_number,
1701 mb, speed % 1000);
1702 else
e959ab2f 1703 kprintf("%s%d: %dKB/s transfers", periph->periph_name,
b05e84c9 1704 periph->unit_number, speed);
a0ee42c5 1705
b05e84c9 1706 /* Report additional information about SPI connections */
8eb7f593 1707 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
b05e84c9
PA
1708 struct ccb_trans_settings_spi *spi;
1709
1710 spi = &cts.xport_specific.spi;
1711 if (freq != 0) {
e959ab2f 1712 kprintf(" (%d.%03dMHz%s, offset %d", freq / 1000,
b05e84c9
PA
1713 freq % 1000,
1714 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1715 ? " DT" : "",
1716 spi->sync_offset);
1717 }
1718 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1719 && spi->bus_width > 0) {
1720 if (freq != 0) {
e959ab2f 1721 kprintf(", ");
b05e84c9 1722 } else {
e959ab2f 1723 kprintf(" (");
b05e84c9 1724 }
e959ab2f 1725 kprintf("%dbit)", 8 * (0x01 << spi->bus_width));
b05e84c9 1726 } else if (freq != 0) {
e959ab2f 1727 kprintf(")");
b05e84c9
PA
1728 }
1729 }
8eb7f593 1730 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
21015567
PA
1731 struct ccb_trans_settings_fc *fc;
1732
1733 fc = &cts.xport_specific.fc;
8eb7f593 1734 if (fc->valid & CTS_FC_VALID_WWNN)
e959ab2f 1735 kprintf(" WWNN 0x%llx", (long long) fc->wwnn);
8eb7f593 1736 if (fc->valid & CTS_FC_VALID_WWPN)
e959ab2f 1737 kprintf(" WWPN 0x%llx", (long long) fc->wwpn);
8eb7f593 1738 if (fc->valid & CTS_FC_VALID_PORT)
e959ab2f 1739 kprintf(" PortID 0x%x", fc->port);
21015567 1740 }
b05e84c9
PA
1741
1742 if (path->device->inq_flags & SID_CmdQue
1743 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1c8b7a9a 1744 kprintf("\n%s%d: Command Queueing Enabled",
b05e84c9
PA
1745 periph->periph_name, periph->unit_number);
1746 }
e959ab2f 1747 kprintf("\n");
b05e84c9
PA
1748
1749 /*
1750 * We only want to print the caller's announce string if they've
1751 * passed one in..
1752 */
1753 if (announce_string != NULL)
e959ab2f 1754 kprintf("%s%d: %s\n", periph->periph_name,
b05e84c9 1755 periph->unit_number, announce_string);
b05e84c9 1756}
984263bc
MD
1757
1758static dev_match_ret
b05e84c9 1759xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
984263bc
MD
1760 struct cam_eb *bus)
1761{
1762 dev_match_ret retval;
1763 int i;
1764
1765 retval = DM_RET_NONE;
1766
1767 /*
1768 * If we aren't given something to match against, that's an error.
1769 */
1770 if (bus == NULL)
1771 return(DM_RET_ERROR);
1772
1773 /*
1774 * If there are no match entries, then this bus matches no
1775 * matter what.
1776 */
1777 if ((patterns == NULL) || (num_patterns == 0))
1778 return(DM_RET_DESCEND | DM_RET_COPY);
1779
1780 for (i = 0; i < num_patterns; i++) {
1781 struct bus_match_pattern *cur_pattern;
1782
1783 /*
1784 * If the pattern in question isn't for a bus node, we
1785 * aren't interested. However, we do indicate to the
1786 * calling routine that we should continue descending the
1787 * tree, since the user wants to match against lower-level
1788 * EDT elements.
1789 */
1790 if (patterns[i].type != DEV_MATCH_BUS) {
1791 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1792 retval |= DM_RET_DESCEND;
1793 continue;
1794 }
1795
1796 cur_pattern = &patterns[i].pattern.bus_pattern;
1797
1798 /*
1799 * If they want to match any bus node, we give them any
1800 * device node.
1801 */
1802 if (cur_pattern->flags == BUS_MATCH_ANY) {
1803 /* set the copy flag */
1804 retval |= DM_RET_COPY;
1805
1806 /*
1807 * If we've already decided on an action, go ahead
1808 * and return.
1809 */
1810 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1811 return(retval);
1812 }
1813
1814 /*
1815 * Not sure why someone would do this...
1816 */
1817 if (cur_pattern->flags == BUS_MATCH_NONE)
1818 continue;
1819
1820 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1821 && (cur_pattern->path_id != bus->path_id))
1822 continue;
1823
1824 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1825 && (cur_pattern->bus_id != bus->sim->bus_id))
1826 continue;
1827
1828 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1829 && (cur_pattern->unit_number != bus->sim->unit_number))
1830 continue;
1831
1832 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1833 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1834 DEV_IDLEN) != 0))
1835 continue;
1836
1837 /*
1c8b7a9a 1838 * If we get to this point, the user definitely wants
984263bc
MD
1839 * information on this bus. So tell the caller to copy the
1840 * data out.
1841 */
1842 retval |= DM_RET_COPY;
1843
1844 /*
1845 * If the return action has been set to descend, then we
1846 * know that we've already seen a non-bus matching
1847 * expression, therefore we need to further descend the tree.
1848 * This won't change by continuing around the loop, so we
1849 * go ahead and return. If we haven't seen a non-bus
1850 * matching expression, we keep going around the loop until
1851 * we exhaust the matching expressions. We'll set the stop
1852 * flag once we fall out of the loop.
1853 */
1854 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1855 return(retval);
1856 }
1857
1858 /*
1859 * If the return action hasn't been set to descend yet, that means
1860 * we haven't seen anything other than bus matching patterns. So
1861 * tell the caller to stop descending the tree -- the user doesn't
1862 * want to match against lower level tree elements.
1863 */
1864 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1865 retval |= DM_RET_STOP;
1866
1867 return(retval);
1868}
1869
1870static dev_match_ret
b05e84c9 1871xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
984263bc
MD
1872 struct cam_ed *device)
1873{
1874 dev_match_ret retval;
1875 int i;
1876
1877 retval = DM_RET_NONE;
1878
1879 /*
1880 * If we aren't given something to match against, that's an error.
1881 */
1882 if (device == NULL)
1883 return(DM_RET_ERROR);
1884
1885 /*
1886 * If there are no match entries, then this device matches no
1887 * matter what.
1888 */
d910b20e 1889 if ((patterns == NULL) || (num_patterns == 0))
984263bc
MD
1890 return(DM_RET_DESCEND | DM_RET_COPY);
1891
1892 for (i = 0; i < num_patterns; i++) {
1893 struct device_match_pattern *cur_pattern;
1894
1895 /*
1896 * If the pattern in question isn't for a device node, we
1897 * aren't interested.
1898 */
1899 if (patterns[i].type != DEV_MATCH_DEVICE) {
1900 if ((patterns[i].type == DEV_MATCH_PERIPH)
1901 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1902 retval |= DM_RET_DESCEND;
1903 continue;
1904 }
1905
1906 cur_pattern = &patterns[i].pattern.device_pattern;
1907
1908 /*
1909 * If they want to match any device node, we give them any
1910 * device node.
1911 */
1912 if (cur_pattern->flags == DEV_MATCH_ANY) {
1913 /* set the copy flag */
1914 retval |= DM_RET_COPY;
1915
1c8b7a9a 1916
984263bc
MD
1917 /*
1918 * If we've already decided on an action, go ahead
1919 * and return.
1920 */
1921 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1922 return(retval);
1923 }
1924
1925 /*
1926 * Not sure why someone would do this...
1927 */
1928 if (cur_pattern->flags == DEV_MATCH_NONE)
1929 continue;
1930
1931 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1932 && (cur_pattern->path_id != device->target->bus->path_id))
1933 continue;
1934
1935 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1936 && (cur_pattern->target_id != device->target->target_id))
1937 continue;
1938
1939 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1940 && (cur_pattern->target_lun != device->lun_id))
1941 continue;
1942
1943 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1944 && (cam_quirkmatch((caddr_t)&device->inq_data,
1945 (caddr_t)&cur_pattern->inq_pat,
1946 1, sizeof(cur_pattern->inq_pat),
1947 scsi_static_inquiry_match) == NULL))
1948 continue;
1949
1950 /*
1c8b7a9a 1951 * If we get to this point, the user definitely wants
984263bc
MD
1952 * information on this device. So tell the caller to copy
1953 * the data out.
1954 */
1955 retval |= DM_RET_COPY;
1956
1957 /*
1958 * If the return action has been set to descend, then we
1959 * know that we've already seen a peripheral matching
1960 * expression, therefore we need to further descend the tree.
1961 * This won't change by continuing around the loop, so we
1962 * go ahead and return. If we haven't seen a peripheral
1963 * matching expression, we keep going around the loop until
1964 * we exhaust the matching expressions. We'll set the stop
1965 * flag once we fall out of the loop.
1966 */
1967 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1968 return(retval);
1969 }
1970
1971 /*
1972 * If the return action hasn't been set to descend yet, that means
1973 * we haven't seen any peripheral matching patterns. So tell the
1974 * caller to stop descending the tree -- the user doesn't want to
1975 * match against lower level tree elements.
1976 */
1977 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1978 retval |= DM_RET_STOP;
1979
1980 return(retval);
1981}
1982
1983/*
1984 * Match a single peripheral against any number of match patterns.
1985 */
1986static dev_match_ret
b05e84c9 1987xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
984263bc
MD
1988 struct cam_periph *periph)
1989{
1990 dev_match_ret retval;
1991 int i;
1992
1993 /*
1994 * If we aren't given something to match against, that's an error.
1995 */
1996 if (periph == NULL)
1997 return(DM_RET_ERROR);
1998
1999 /*
2000 * If there are no match entries, then this peripheral matches no
2001 * matter what.
2002 */
2003 if ((patterns == NULL) || (num_patterns == 0))
2004 return(DM_RET_STOP | DM_RET_COPY);
2005
2006 /*
2007 * There aren't any nodes below a peripheral node, so there's no
2008 * reason to descend the tree any further.
2009 */
2010 retval = DM_RET_STOP;
2011
2012 for (i = 0; i < num_patterns; i++) {
2013 struct periph_match_pattern *cur_pattern;
2014
2015 /*
2016 * If the pattern in question isn't for a peripheral, we
2017 * aren't interested.
2018 */
2019 if (patterns[i].type != DEV_MATCH_PERIPH)
2020 continue;
2021
2022 cur_pattern = &patterns[i].pattern.periph_pattern;
2023
2024 /*
2025 * If they want to match on anything, then we will do so.
2026 */
2027 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2028 /* set the copy flag */
2029 retval |= DM_RET_COPY;
2030
2031 /*
2032 * We've already set the return action to stop,
2033 * since there are no nodes below peripherals in
2034 * the tree.
2035 */
2036 return(retval);
2037 }
2038
2039 /*
2040 * Not sure why someone would do this...
2041 */
2042 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2043 continue;
2044
2045 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2046 && (cur_pattern->path_id != periph->path->bus->path_id))
2047 continue;
2048
2049 /*
2050 * For the target and lun id's, we have to make sure the
2051 * target and lun pointers aren't NULL. The xpt peripheral
2052 * has a wildcard target and device.
2053 */
2054 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2055 && ((periph->path->target == NULL)
2056 ||(cur_pattern->target_id != periph->path->target->target_id)))
2057 continue;
2058
2059 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2060 && ((periph->path->device == NULL)
2061 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2062 continue;
2063
2064 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2065 && (cur_pattern->unit_number != periph->unit_number))
2066 continue;
2067
2068 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2069 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2070 DEV_IDLEN) != 0))
2071 continue;
2072
2073 /*
1c8b7a9a 2074 * If we get to this point, the user definitely wants
984263bc
MD
2075 * information on this peripheral. So tell the caller to
2076 * copy the data out.
2077 */
2078 retval |= DM_RET_COPY;
2079
2080 /*
2081 * The return action has already been set to stop, since
2082 * peripherals don't have any nodes below them in the EDT.
2083 */
2084 return(retval);
2085 }
2086
2087 /*
2088 * If we get to this point, the peripheral that was passed in
2089 * doesn't match any of the patterns.
2090 */
2091 return(retval);
2092}
2093
2094static int
2095xptedtbusfunc(struct cam_eb *bus, void *arg)
2096{
2097 struct ccb_dev_match *cdm;
2098 dev_match_ret retval;
2099
2100 cdm = (struct ccb_dev_match *)arg;
2101
2102 /*
2103 * If our position is for something deeper in the tree, that means
2104 * that we've already seen this node. So, we keep going down.
2105 */
2106 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2107 && (cdm->pos.cookie.bus == bus)
2108 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2109 && (cdm->pos.cookie.target != NULL))
2110 retval = DM_RET_DESCEND;
2111 else
2112 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2113
2114 /*
2115 * If we got an error, bail out of the search.
2116 */
2117 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2118 cdm->status = CAM_DEV_MATCH_ERROR;
2119 return(0);
2120 }
2121
2122 /*
2123 * If the copy flag is set, copy this bus out.
2124 */
2125 if (retval & DM_RET_COPY) {
2126 int spaceleft, j;
2127
2128 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2129 sizeof(struct dev_match_result));
2130
2131 /*
2132 * If we don't have enough space to put in another
2133 * match result, save our position and tell the
2134 * user there are more devices to check.
2135 */
2136 if (spaceleft < sizeof(struct dev_match_result)) {
2137 bzero(&cdm->pos, sizeof(cdm->pos));
1c8b7a9a 2138 cdm->pos.position_type =
984263bc
MD
2139 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2140
2141 cdm->pos.cookie.bus = bus;
2142 cdm->pos.generations[CAM_BUS_GENERATION]=
1c8b7a9a 2143 xsoftc.bus_generation;
984263bc
MD
2144 cdm->status = CAM_DEV_MATCH_MORE;
2145 return(0);
2146 }
2147 j = cdm->num_matches;
2148 cdm->num_matches++;
2149 cdm->matches[j].type = DEV_MATCH_BUS;
2150 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2151 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2152 cdm->matches[j].result.bus_result.unit_number =
2153 bus->sim->unit_number;
2154 strncpy(cdm->matches[j].result.bus_result.dev_name,
2155 bus->sim->sim_name, DEV_IDLEN);
2156 }
2157
2158 /*
2159 * If the user is only interested in busses, there's no
2160 * reason to descend to the next level in the tree.
2161 */
2162 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2163 return(1);
2164
2165 /*
2166 * If there is a target generation recorded, check it to
2167 * make sure the target list hasn't changed.
2168 */
2169 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2170 && (bus == cdm->pos.cookie.bus)
2171 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2172 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2173 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2174 bus->generation)) {
2175 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2176 return(0);
2177 }
2178
2179 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2180 && (cdm->pos.cookie.bus == bus)
2181 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2182 && (cdm->pos.cookie.target != NULL))
2183 return(xpttargettraverse(bus,
2184 (struct cam_et *)cdm->pos.cookie.target,
2185 xptedttargetfunc, arg));
2186 else
2187 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2188}
2189
2190static int
2191xptedttargetfunc(struct cam_et *target, void *arg)
2192{
2193 struct ccb_dev_match *cdm;
2194
2195 cdm = (struct ccb_dev_match *)arg;
2196
2197 /*
2198 * If there is a device list generation recorded, check it to
2199 * make sure the device list hasn't changed.
2200 */
2201 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2202 && (cdm->pos.cookie.bus == target->bus)
2203 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2204 && (cdm->pos.cookie.target == target)
2205 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2206 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2207 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2208 target->generation)) {
2209 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2210 return(0);
2211 }
2212
2213 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2214 && (cdm->pos.cookie.bus == target->bus)
2215 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2216 && (cdm->pos.cookie.target == target)
2217 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2218 && (cdm->pos.cookie.device != NULL))
2219 return(xptdevicetraverse(target,
2220 (struct cam_ed *)cdm->pos.cookie.device,
2221 xptedtdevicefunc, arg));
2222 else
2223 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2224}
2225
2226static int
2227xptedtdevicefunc(struct cam_ed *device, void *arg)
2228{
2229
2230 struct ccb_dev_match *cdm;
2231 dev_match_ret retval;
2232
2233 cdm = (struct ccb_dev_match *)arg;
2234
2235 /*
2236 * If our position is for something deeper in the tree, that means
2237 * that we've already seen this node. So, we keep going down.
2238 */
2239 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2240 && (cdm->pos.cookie.device == device)
2241 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2242 && (cdm->pos.cookie.periph != NULL))
2243 retval = DM_RET_DESCEND;
2244 else
2245 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2246 device);
2247
2248 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2249 cdm->status = CAM_DEV_MATCH_ERROR;
2250 return(0);
2251 }
2252
2253 /*
2254 * If the copy flag is set, copy this device out.
2255 */
2256 if (retval & DM_RET_COPY) {
2257 int spaceleft, j;
2258
2259 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2260 sizeof(struct dev_match_result));
2261
2262 /*
2263 * If we don't have enough space to put in another
2264 * match result, save our position and tell the
2265 * user there are more devices to check.
2266 */
2267 if (spaceleft < sizeof(struct dev_match_result)) {
2268 bzero(&cdm->pos, sizeof(cdm->pos));
1c8b7a9a 2269 cdm->pos.position_type =
984263bc
MD
2270 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2271 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2272
2273 cdm->pos.cookie.bus = device->target->bus;
2274 cdm->pos.generations[CAM_BUS_GENERATION]=
1c8b7a9a 2275 xsoftc.bus_generation;
984263bc
MD
2276 cdm->pos.cookie.target = device->target;
2277 cdm->pos.generations[CAM_TARGET_GENERATION] =
2278 device->target->bus->generation;
2279 cdm->pos.cookie.device = device;
1c8b7a9a 2280 cdm->pos.generations[CAM_DEV_GENERATION] =
984263bc
MD
2281 device->target->generation;
2282 cdm->status = CAM_DEV_MATCH_MORE;
2283 return(0);
2284 }
2285 j = cdm->num_matches;
2286 cdm->num_matches++;
2287 cdm->matches[j].type = DEV_MATCH_DEVICE;
2288 cdm->matches[j].result.device_result.path_id =
2289 device->target->bus->path_id;
2290 cdm->matches[j].result.device_result.target_id =
2291 device->target->target_id;
2292 cdm->matches[j].result.device_result.target_lun =
2293 device->lun_id;
2294 bcopy(&device->inq_data,
2295 &cdm->matches[j].result.device_result.inq_data,
2296 sizeof(struct scsi_inquiry_data));
2297
2298 /* Let the user know whether this device is unconfigured */
2299 if (device->flags & CAM_DEV_UNCONFIGURED)
2300 cdm->matches[j].result.device_result.flags =
2301 DEV_RESULT_UNCONFIGURED;
2302 else
2303 cdm->matches[j].result.device_result.flags =
2304 DEV_RESULT_NOFLAG;
2305 }
2306
2307 /*
2308 * If the user isn't interested in peripherals, don't descend
2309 * the tree any further.
2310 */
2311 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2312 return(1);
2313
2314 /*
2315 * If there is a peripheral list generation recorded, make sure
2316 * it hasn't changed.
2317 */
2318 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2319 && (device->target->bus == cdm->pos.cookie.bus)
2320 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2321 && (device->target == cdm->pos.cookie.target)
2322 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2323 && (device == cdm->pos.cookie.device)
2324 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2325 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2326 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2327 device->generation)){
2328 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2329 return(0);
2330 }
2331
2332 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2333 && (cdm->pos.cookie.bus == device->target->bus)
2334 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2335 && (cdm->pos.cookie.target == device->target)
2336 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2337 && (cdm->pos.cookie.device == device)
2338 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2339 && (cdm->pos.cookie.periph != NULL))
2340 return(xptperiphtraverse(device,
2341 (struct cam_periph *)cdm->pos.cookie.periph,
2342 xptedtperiphfunc, arg));
2343 else
2344 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2345}
2346
2347static int
2348xptedtperiphfunc(struct cam_periph *periph, void *arg)
2349{
2350 struct ccb_dev_match *cdm;
2351 dev_match_ret retval;
2352
2353 cdm = (struct ccb_dev_match *)arg;
2354
2355 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2356
2357 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2358 cdm->status = CAM_DEV_MATCH_ERROR;
2359 return(0);
2360 }
2361
2362 /*
2363 * If the copy flag is set, copy this peripheral out.
2364 */
2365 if (retval & DM_RET_COPY) {
2366 int spaceleft, j;
2367
2368 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2369 sizeof(struct dev_match_result));
2370
2371 /*
2372 * If we don't have enough space to put in another
2373 * match result, save our position and tell the
2374 * user there are more devices to check.
2375 */
2376 if (spaceleft < sizeof(struct dev_match_result)) {
2377 bzero(&cdm->pos, sizeof(cdm->pos));
1c8b7a9a 2378 cdm->pos.position_type =
984263bc
MD
2379 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2380 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2381 CAM_DEV_POS_PERIPH;
2382
2383 cdm->pos.cookie.bus = periph->path->bus;
2384 cdm->pos.generations[CAM_BUS_GENERATION]=
1c8b7a9a 2385 xsoftc.bus_generation;
984263bc
MD
2386 cdm->pos.cookie.target = periph->path->target;
2387 cdm->pos.generations[CAM_TARGET_GENERATION] =
2388 periph->path->bus->generation;
2389 cdm->pos.cookie.device = periph->path->device;
1c8b7a9a 2390 cdm->pos.generations[CAM_DEV_GENERATION] =
984263bc
MD
2391 periph->path->target->generation;
2392 cdm->pos.cookie.periph = periph;
2393 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2394 periph->path->device->generation;
2395 cdm->status = CAM_DEV_MATCH_MORE;
2396 return(0);
2397 }
2398
2399 j = cdm->num_matches;
2400 cdm->num_matches++;
2401 cdm->matches[j].type = DEV_MATCH_PERIPH;
2402 cdm->matches[j].result.periph_result.path_id =
2403 periph->path->bus->path_id;
2404 cdm->matches[j].result.periph_result.target_id =
2405 periph->path->target->target_id;
2406 cdm->matches[j].result.periph_result.target_lun =
2407 periph->path->device->lun_id;
2408 cdm->matches[j].result.periph_result.unit_number =
2409 periph->unit_number;
2410 strncpy(cdm->matches[j].result.periph_result.periph_name,
2411 periph->periph_name, DEV_IDLEN);
2412 }
2413
2414 return(1);
2415}
2416
2417static int
2418xptedtmatch(struct ccb_dev_match *cdm)
2419{
2420 int ret;
2421
2422 cdm->num_matches = 0;
2423
2424 /*
2425 * Check the bus list generation. If it has changed, the user
2426 * needs to reset everything and start over.
2427 */
2428 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2429 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
1c8b7a9a 2430 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
984263bc
MD
2431 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2432 return(0);
2433 }
2434
2435 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2436 && (cdm->pos.cookie.bus != NULL))
2437 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2438 xptedtbusfunc, cdm);
2439 else
2440 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2441
2442 /*
2443 * If we get back 0, that means that we had to stop before fully
2444 * traversing the EDT. It also means that one of the subroutines
2445 * has set the status field to the proper value. If we get back 1,
2446 * we've fully traversed the EDT and copied out any matching entries.
2447 */
2448 if (ret == 1)
2449 cdm->status = CAM_DEV_MATCH_LAST;
2450
2451 return(ret);
2452}
2453
2454static int
2455xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2456{
2457 struct ccb_dev_match *cdm;
2458
2459 cdm = (struct ccb_dev_match *)arg;
2460
2461 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2462 && (cdm->pos.cookie.pdrv == pdrv)
2463 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2464 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2465 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2466 (*pdrv)->generation)) {
2467 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2468 return(0);
2469 }
2470
2471 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2472 && (cdm->pos.cookie.pdrv == pdrv)
2473 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2474 && (cdm->pos.cookie.periph != NULL))
2475 return(xptpdperiphtraverse(pdrv,
2476 (struct cam_periph *)cdm->pos.cookie.periph,
2477 xptplistperiphfunc, arg));
2478 else
2479 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2480}
2481
2482static int
2483xptplistperiphfunc(struct cam_periph *periph, void *arg)
2484{
2485 struct ccb_dev_match *cdm;
2486 dev_match_ret retval;
2487
2488 cdm = (struct ccb_dev_match *)arg;
2489
2490 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2491
2492 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2493 cdm->status = CAM_DEV_MATCH_ERROR;
2494 return(0);
2495 }
2496
2497 /*
2498 * If the copy flag is set, copy this peripheral out.
2499 */
2500 if (retval & DM_RET_COPY) {
2501 int spaceleft, j;
2502
2503 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2504 sizeof(struct dev_match_result));
2505
2506 /*
2507 * If we don't have enough space to put in another
2508 * match result, save our position and tell the
2509 * user there are more devices to check.
2510 */
2511 if (spaceleft < sizeof(struct dev_match_result)) {
2512 struct periph_driver **pdrv;
2513
2514 pdrv = NULL;
2515 bzero(&cdm->pos, sizeof(cdm->pos));
1c8b7a9a 2516 cdm->pos.position_type =
984263bc
MD
2517 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2518 CAM_DEV_POS_PERIPH;
2519
2520 /*
2521 * This may look a bit non-sensical, but it is
2522 * actually quite logical. There are very few
2523 * peripheral drivers, and bloating every peripheral
2524 * structure with a pointer back to its parent
2525 * peripheral driver linker set entry would cost
2526 * more in the long run than doing this quick lookup.
2527 */
2ad14cb5 2528 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
984263bc
MD
2529 if (strcmp((*pdrv)->driver_name,
2530 periph->periph_name) == 0)
2531 break;
2532 }
2533
beac9491 2534 if (*pdrv == NULL) {
984263bc
MD
2535 cdm->status = CAM_DEV_MATCH_ERROR;
2536 return(0);
2537 }
2538
2539 cdm->pos.cookie.pdrv = pdrv;
2540 /*
2541 * The periph generation slot does double duty, as
2542 * does the periph pointer slot. They are used for
2543 * both edt and pdrv lookups and positioning.
2544 */
2545 cdm->pos.cookie.periph = periph;
2546 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2547 (*pdrv)->generation;
2548 cdm->status = CAM_DEV_MATCH_MORE;
2549 return(0);
2550 }
2551
2552 j = cdm->num_matches;
2553 cdm->num_matches++;
2554 cdm->matches[j].type = DEV_MATCH_PERIPH;
2555 cdm->matches[j].result.periph_result.path_id =
2556 periph->path->bus->path_id;
2557
2558 /*
2559 * The transport layer peripheral doesn't have a target or
2560 * lun.
2561 */
2562 if (periph->path->target)
2563 cdm->matches[j].result.periph_result.target_id =
2564 periph->path->target->target_id;
2565 else
2566 cdm->matches[j].result.periph_result.target_id = -1;
2567
2568 if (periph->path->device)
2569 cdm->matches[j].result.periph_result.target_lun =
2570 periph->path->device->lun_id;
2571 else
2572 cdm->matches[j].result.periph_result.target_lun = -1;
2573
2574 cdm->matches[j].result.periph_result.unit_number =
2575 periph->unit_number;
2576 strncpy(cdm->matches[j].result.periph_result.periph_name,
2577 periph->periph_name, DEV_IDLEN);
2578 }
2579
2580 return(1);
2581}
2582
2583static int
2584xptperiphlistmatch(struct ccb_dev_match *cdm)
2585{
2586 int ret;
2587
2588 cdm->num_matches = 0;
2589
2590 /*
2591 * At this point in the edt traversal function, we check the bus
2592 * list generation to make sure that no busses have been added or
2593 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2594 * For the peripheral driver list traversal function, however, we
2595 * don't have to worry about new peripheral driver types coming or
2596 * going; they're in a linker set, and therefore can't change
2597 * without a recompile.
2598 */
2599
2600 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2601 && (cdm->pos.cookie.pdrv != NULL))
2602 ret = xptpdrvtraverse(
2603 (struct periph_driver **)cdm->pos.cookie.pdrv,
2604 xptplistpdrvfunc, cdm);
2605 else
2606 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2607
2608 /*
2609 * If we get back 0, that means that we had to stop before fully
2610 * traversing the peripheral driver tree. It also means that one of
2611 * the subroutines has set the status field to the proper value. If
2612 * we get back 1, we've fully traversed the EDT and copied out any
2613 * matching entries.
2614 */
2615 if (ret == 1)
2616 cdm->status = CAM_DEV_MATCH_LAST;
2617
2618 return(ret);
2619}
2620
2621static int
2622xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2623{
2624 struct cam_eb *bus, *next_bus;
2625 int retval;
2626
2627 retval = 1;
2628
1c8b7a9a
PA
2629 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2630 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
984263bc
MD
2631 bus != NULL;
2632 bus = next_bus) {
2633 next_bus = TAILQ_NEXT(bus, links);
2634
1c8b7a9a
PA
2635 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2636 CAM_SIM_LOCK(bus->sim);
984263bc 2637 retval = tr_func(bus, arg);
1c8b7a9a 2638 CAM_SIM_UNLOCK(bus->sim);
984263bc
MD
2639 if (retval == 0)
2640 return(retval);
1c8b7a9a 2641 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
984263bc 2642 }
1c8b7a9a 2643 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
984263bc
MD
2644
2645 return(retval);
2646}
2647
2648static int
2649xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2650 xpt_targetfunc_t *tr_func, void *arg)
2651{
2652 struct cam_et *target, *next_target;
2653 int retval;
2654
2655 retval = 1;
2656 for (target = (start_target ? start_target :
2657 TAILQ_FIRST(&bus->et_entries));
2658 target != NULL; target = next_target) {
2659
2660 next_target = TAILQ_NEXT(target, links);
2661
2662 retval = tr_func(target, arg);
2663
2664 if (retval == 0)
2665 return(retval);
2666 }
2667
2668 return(retval);
2669}
2670
2671static int
2672xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2673 xpt_devicefunc_t *tr_func, void *arg)
2674{
2675 struct cam_ed *device, *next_device;
2676 int retval;
2677
2678 retval = 1;
2679 for (device = (start_device ? start_device :
2680 TAILQ_FIRST(&target->ed_entries));
2681 device != NULL;
2682 device = next_device) {
2683
2684 next_device = TAILQ_NEXT(device, links);
2685
2686 retval = tr_func(device, arg);
2687
2688 if (retval == 0)
2689 return(retval);
2690 }
2691
2692 return(retval);
2693}
2694
2695static int
2696xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2697 xpt_periphfunc_t *tr_func, void *arg)
2698{
2699 struct cam_periph *periph, *next_periph;
2700 int retval;
2701
2702 retval = 1;
2703
2704 for (periph = (start_periph ? start_periph :
2705 SLIST_FIRST(&device->periphs));
2706 periph != NULL;
2707 periph = next_periph) {
2708
2709 next_periph = SLIST_NEXT(periph, periph_links);
2710
2711 retval = tr_func(periph, arg);
2712 if (retval == 0)
2713 return(retval);
2714 }
2715
2716 return(retval);
2717}
2718
2719static int
2720xptpdrvtraverse(struct periph_driver **start_pdrv,
2721 xpt_pdrvfunc_t *tr_func, void *arg)
2722{
2723 struct periph_driver **pdrv;
2724 int retval;
2725
2726 retval = 1;
2727
2728 /*
2729 * We don't traverse the peripheral driver list like we do the
2730 * other lists, because it is a linker set, and therefore cannot be
2731 * changed during runtime. If the peripheral driver list is ever
2732 * re-done to be something other than a linker set (i.e. it can
2733 * change while the system is running), the list traversal should
2734 * be modified to work like the other traversal functions.
2735 */
2ad14cb5
PA
2736 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2737 *pdrv != NULL; pdrv++) {
2738 retval = tr_func(pdrv, arg);
2739
2740 if (retval == 0)
2741 return(retval);
984263bc 2742 }
2ad14cb5 2743
984263bc
MD
2744 return(retval);
2745}
2746
2747static int
2748xptpdperiphtraverse(struct periph_driver **pdrv,
2749 struct cam_periph *start_periph,
2750 xpt_periphfunc_t *tr_func, void *arg)
2751{
2752 struct cam_periph *periph, *next_periph;
2753 int retval;
2754
2755 retval = 1;
2756
2757 for (periph = (start_periph ? start_periph :
2758 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2759 periph = next_periph) {
2760
2761 next_periph = TAILQ_NEXT(periph, unit_links);
2762
2763 retval = tr_func(periph, arg);
2764 if (retval == 0)
2765 return(retval);
2766 }
2767 return(retval);
2768}
2769
2770static int
2771xptdefbusfunc(struct cam_eb *bus, void *arg)
2772{
2773 struct xpt_traverse_config *tr_config;
2774
2775 tr_config = (struct xpt_traverse_config *)arg;
2776
2777 if (tr_config->depth == XPT_DEPTH_BUS) {
2778 xpt_busfunc_t *tr_func;
2779
2780 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2781
2782 return(tr_func(bus, tr_config->tr_arg));
2783 } else
2784 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2785}
2786
2787static int
2788xptdeftargetfunc(struct cam_et *target, void *arg)
2789{
2790 struct xpt_traverse_config *tr_config;
2791
2792 tr_config = (struct xpt_traverse_config *)arg;
2793
2794 if (tr_config->depth == XPT_DEPTH_TARGET) {
2795 xpt_targetfunc_t *tr_func;
2796
2797 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2798
2799 return(tr_func(target, tr_config->tr_arg));
2800 } else
2801 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2802}
2803
2804static int
2805xptdefdevicefunc(struct cam_ed *device, void *arg)
2806{
2807 struct xpt_traverse_config *tr_config;
2808
2809 tr_config = (struct xpt_traverse_config *)arg;
2810
2811 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2812 xpt_devicefunc_t *tr_func;
2813
2814 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2815
2816 return(tr_func(device, tr_config->tr_arg));
2817 } else
2818 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2819}
2820
2821static int
2822xptdefperiphfunc(struct cam_periph *periph, void *arg)
2823{
2824 struct xpt_traverse_config *tr_config;
2825 xpt_periphfunc_t *tr_func;
2826
2827 tr_config = (struct xpt_traverse_config *)arg;
2828
2829 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2830
2831 /*
2832 * Unlike the other default functions, we don't check for depth
2833 * here. The peripheral driver level is the last level in the EDT,
2834 * so if we're here, we should execute the function in question.
2835 */
2836 return(tr_func(periph, tr_config->tr_arg));
2837}
2838
2839/*
2840 * Execute the given function for every bus in the EDT.
2841 */
2842static int
2843xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2844{
2845 struct xpt_traverse_config tr_config;
2846
2847 tr_config.depth = XPT_DEPTH_BUS;
2848 tr_config.tr_func = tr_func;
2849 tr_config.tr_arg = arg;
2850
2851 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2852}
2853
984263bc
MD
2854/*
2855 * Execute the given function for every device in the EDT.
2856 */
2857static int
2858xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2859{
2860 struct xpt_traverse_config tr_config;
2861
2862 tr_config.depth = XPT_DEPTH_DEVICE;
2863 tr_config.tr_func = tr_func;
2864 tr_config.tr_arg = arg;
2865
2866 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2867}
2868
984263bc
MD
2869static int
2870xptsetasyncfunc(struct cam_ed *device, void *arg)
2871{
2872 struct cam_path path;
2873 struct ccb_getdev cgd;
2874 struct async_node *cur_entry;
2875
2876 cur_entry = (struct async_node *)arg;
2877
2878 /*
2879 * Don't report unconfigured devices (Wildcard devs,
2880 * devices only for target mode, device instances
2881 * that have been invalidated but are waiting for
2882 * their last reference count to be released).
2883 */
2884 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2885 return (1);
2886
2887 xpt_compile_path(&path,
2888 NULL,
2889 device->target->bus->path_id,
2890 device->target->target_id,
2891 device->lun_id);
2892 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2893 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2894 xpt_action((union ccb *)&cgd);
2895 cur_entry->callback(cur_entry->callback_arg,
2896 AC_FOUND_DEVICE,
2897 &path, &cgd);
2898 xpt_release_path(&path);
2899
2900 return(1);
2901}
2902
2903static int
2904xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2905{
2906 struct cam_path path;
2907 struct ccb_pathinq cpi;
2908 struct async_node *cur_entry;
2909
2910 cur_entry = (struct async_node *)arg;
2911
2912 xpt_compile_path(&path, /*periph*/NULL,
2913 bus->sim->path_id,
2914 CAM_TARGET_WILDCARD,
2915 CAM_LUN_WILDCARD);
2916 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2917 cpi.ccb_h.func_code = XPT_PATH_INQ;
2918 xpt_action((union ccb *)&cpi);
2919 cur_entry->callback(cur_entry->callback_arg,
2920 AC_PATH_REGISTERED,
2921 &path, &cpi);
2922 xpt_release_path(&path);
2923
2924 return(1);
2925}
2926
1c8b7a9a
PA
2927static void
2928xpt_action_sasync_cb(void *context, int pending)
2929{
2930 struct async_node *cur_entry;
2931 struct xpt_task *task;
2932 uint32_t added;
2933
2934 task = (struct xpt_task *)context;
2935 cur_entry = (struct async_node *)task->data1;
2936 added = task->data2;
2937
2938 if ((added & AC_FOUND_DEVICE) != 0) {
2939 /*
2940 * Get this peripheral up to date with all
2941 * the currently existing devices.
2942 */
2943 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2944 }
2945 if ((added & AC_PATH_REGISTERED) != 0) {
2946 /*
2947 * Get this peripheral up to date with all
2948 * the currently existing busses.
2949 */
2950 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
e11d2676 2951 }
1c8b7a9a
PA
2952
2953 kfree(task, M_CAMXPT);
2954}
2955
984263bc
MD
2956void
2957xpt_action(union ccb *start_ccb)
2958{
984263bc
MD
2959 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2960
2961 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2962
984263bc
MD
2963 switch (start_ccb->ccb_h.func_code) {
2964 case XPT_SCSI_IO:
2965 {
b05e84c9 2966 struct cam_ed *device;
984263bc
MD
2967#ifdef CAMDEBUG
2968 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2969 struct cam_path *path;
2970
2971 path = start_ccb->ccb_h.path;
2972#endif
2973
2974 /*
2975 * For the sake of compatibility with SCSI-1
2976 * devices that may not understand the identify
2977 * message, we include lun information in the
2978 * second byte of all commands. SCSI-1 specifies
2979 * that luns are a 3 bit value and reserves only 3
2980 * bits for lun information in the CDB. Later
2981 * revisions of the SCSI spec allow for more than 8
2982 * luns, but have deprecated lun information in the
2983 * CDB. So, if the lun won't fit, we must omit.
2984 *
2985 * Also be aware that during initial probing for devices,
2986 * the inquiry information is unknown but initialized to 0.
2987 * This means that this code will be exercised while probing
2988 * devices with an ANSI revision greater than 2.
2989 */
b05e84c9
PA
2990 device = start_ccb->ccb_h.path->device;
2991 if (device->protocol_version <= SCSI_REV_2
984263bc
MD
2992 && start_ccb->ccb_h.target_lun < 8
2993 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2994
2995 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2996 start_ccb->ccb_h.target_lun << 5;
2997 }
2998 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2999 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3000 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3001 &path->device->inq_data),
3002 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3003 cdb_str, sizeof(cdb_str))));
3004 /* FALLTHROUGH */
3005 }
3006 case XPT_TARGET_IO:
3007 case XPT_CONT_TARGET_IO:
3008 start_ccb->csio.sense_resid = 0;
3009 start_ccb->csio.resid = 0;
3010 /* FALLTHROUGH */
3011 case XPT_RESET_DEV:
3012 case XPT_ENG_EXEC:
3013 {
3014 struct cam_path *path;
c8f7fab0 3015 struct cam_sim *sim;
984263bc
MD
3016 int runq;
3017
3018 path = start_ccb->ccb_h.path;
984263bc 3019
c8f7fab0 3020 sim = path->bus->sim;
2d19cdd3 3021 if (sim == &cam_dead_sim) {
c8f7fab0
PA
3022 /* The SIM has gone; just execute the CCB directly. */
3023 cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3024 (*(sim->sim_action))(sim, start_ccb);
3025 break;
3026 }
3027
984263bc
MD
3028 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3029 if (path->device->qfrozen_cnt == 0)
3030 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3031 else
3032 runq = 0;
984263bc
MD
3033 if (runq != 0)
3034 xpt_run_dev_sendq(path->bus);
3035 break;
3036 }
3037 case XPT_SET_TRAN_SETTINGS:
3038 {
3039 xpt_set_transfer_settings(&start_ccb->cts,
3040 start_ccb->ccb_h.path->device,
3041 /*async_update*/FALSE);
3042 break;
3043 }
3044 case XPT_CALC_GEOMETRY:
3045 {
3046 struct cam_sim *sim;
3047
3048 /* Filter out garbage */
3049 if (start_ccb->ccg.block_size == 0
3050 || start_ccb->ccg.volume_size == 0) {
3051 start_ccb->ccg.cylinders = 0;
3052 start_ccb->ccg.heads = 0;
3053 start_ccb->ccg.secs_per_track = 0;
3054 start_ccb->ccb_h.status = CAM_REQ_CMP;
3055 break;
3056 }
984263bc
MD
3057 sim = start_ccb->ccb_h.path->bus->sim;
3058 (*(sim->sim_action))(sim, start_ccb);
3059 break;
3060 }
3061 case XPT_ABORT:
3062 {
3063 union ccb* abort_ccb;
984263bc
MD
3064
3065 abort_ccb = start_ccb->cab.abort_ccb;
3066 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3067
3068 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3069 struct cam_ccbq *ccbq;
3070
3071 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3072 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3073 abort_ccb->ccb_h.status =
3074 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3075 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
984263bc 3076 xpt_done(abort_ccb);
984263bc
MD
3077 start_ccb->ccb_h.status = CAM_REQ_CMP;
3078 break;
3079 }
3080 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3081 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3082 /*
3083 * We've caught this ccb en route to
3084 * the SIM. Flag it for abort and the
3085 * SIM will do so just before starting
3086 * real work on the CCB.
3087 */
3088 abort_ccb->ccb_h.status =
3089 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3090 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3091 start_ccb->ccb_h.status = CAM_REQ_CMP;
3092 break;
3093 }
1c8b7a9a 3094 }
984263bc
MD
3095 if (XPT_FC_IS_QUEUED(abort_ccb)
3096 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3097 /*
3098 * It's already completed but waiting
3099 * for our SWI to get to it.
3100 */
3101 start_ccb->ccb_h.status = CAM_UA_ABORT;
3102 break;
3103 }
3104 /*
3105 * If we weren't able to take care of the abort request
3106 * in the XPT, pass the request down to the SIM for processing.
3107 */
3108 /* FALLTHROUGH */
3109 }
3110 case XPT_ACCEPT_TARGET_IO:
3111 case XPT_EN_LUN:
3112 case XPT_IMMED_NOTIFY:
3113 case XPT_NOTIFY_ACK:
3114 case XPT_GET_TRAN_SETTINGS:
3115 case XPT_RESET_BUS:
3116 {
3117 struct cam_sim *sim;
3118
3119 sim = start_ccb->ccb_h.path->bus->sim;
3120 (*(sim->sim_action))(sim, start_ccb);
3121 break;
3122 }
3123 case XPT_PATH_INQ:
3124 {
3125 struct cam_sim *sim;
3126
3127 sim = start_ccb->ccb_h.path->bus->sim;
3128 (*(sim->sim_action))(sim, start_ccb);
3129 break;
3130 }
3131 case XPT_PATH_STATS:
3132 start_ccb->cpis.last_reset =
3133 start_ccb->ccb_h.path->bus->last_reset;
3134 start_ccb->ccb_h.status = CAM_REQ_CMP;
3135 break;
3136 case XPT_GDEV_TYPE:
3137 {
3138 struct cam_ed *dev;
984263bc
MD
3139
3140 dev = start_ccb->ccb_h.path->device;
984263bc
MD
3141 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3142 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3143 } else {
3144 struct ccb_getdev *cgd;
3145 struct cam_eb *bus;
3146 struct cam_et *tar;
3147
3148 cgd = &start_ccb->cgd;
3149 bus = cgd->ccb_h.path->bus;
3150 tar = cgd->ccb_h.path->target;
3151 cgd->inq_data = dev->inq_data;
3152 cgd->ccb_h.status = CAM_REQ_CMP;
3153 cgd->serial_num_len = dev->serial_num_len;
3154 if ((dev->serial_num_len > 0)
3155 && (dev->serial_num != NULL))
3156 bcopy(dev->serial_num, cgd->serial_num,
3157 dev->serial_num_len);
3158 }
1c8b7a9a 3159 break;
984263bc
MD
3160 }
3161 case XPT_GDEV_STATS:
3162 {
3163 struct cam_ed *dev;
984263bc
MD
3164
3165 dev = start_ccb->ccb_h.path->device;
984263bc
MD
3166 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3167 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3168 } else {
3169 struct ccb_getdevstats *cgds;
3170 struct cam_eb *bus;
3171 struct cam_et *tar;
3172
3173 cgds = &start_ccb->cgds;
3174 bus = cgds->ccb_h.path->bus;
3175 tar = cgds->ccb_h.path->target;
3176 cgds->dev_openings = dev->ccbq.dev_openings;
3177 cgds->dev_active = dev->ccbq.dev_active;
3178 cgds->devq_openings = dev->ccbq.devq_openings;
3179 cgds->devq_queued = dev->ccbq.queue.entries;
3180 cgds->held = dev->ccbq.held;
3181 cgds->last_reset = tar->last_reset;
3182 cgds->maxtags = dev->quirk->maxtags;
3183 cgds->mintags = dev->quirk->mintags;
3184 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3185 cgds->last_reset = bus->last_reset;
3186 cgds->ccb_h.status = CAM_REQ_CMP;
3187 }
984263bc
MD
3188 break;
3189 }
3190 case XPT_GDEVLIST:
3191 {
3192 struct cam_periph *nperiph;
3193 struct periph_list *periph_head;
3194 struct ccb_getdevlist *cgdl;
b05e84c9 3195 u_int i;
984263bc
MD
3196 struct cam_ed *device;
3197 int found;
3198
3199
3200 found = 0;
3201
3202 /*
3203 * Don't want anyone mucking with our data.
3204 */
984263bc
MD
3205 device = start_ccb->ccb_h.path->device;
3206 periph_head = &device->periphs;
3207 cgdl = &start_ccb->cgdl;
3208
3209 /*
3210 * Check and see if the list has changed since the user
3211 * last requested a list member. If so, tell them that the
1c8b7a9a 3212 * list has changed, and therefore they need to start over
984263bc
MD
3213 * from the beginning.
3214 */
1c8b7a9a 3215 if ((cgdl->index != 0) &&
984263bc
MD
3216 (cgdl->generation != device->generation)) {
3217 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
984263bc
MD
3218 break;
3219 }
3220
3221 /*
1c8b7a9a 3222 * Traverse the list of peripherals and attempt to find
984263bc
MD
3223 * the requested peripheral.
3224 */
cbe8f7dc 3225 for (nperiph = SLIST_FIRST(periph_head), i = 0;
984263bc 3226 (nperiph != NULL) && (i <= cgdl->index);
cbe8f7dc 3227 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
984263bc
MD
3228 if (i == cgdl->index) {
3229 strncpy(cgdl->periph_name,
3230 nperiph->periph_name,
3231 DEV_IDLEN);
3232 cgdl->unit_number = nperiph->unit_number;
3233 found = 1;
3234 }
3235 }
3236 if (found == 0) {
3237 cgdl->status = CAM_GDEVLIST_ERROR;
984263bc
MD
3238 break;
3239 }
3240
3241 if (nperiph == NULL)
3242 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3243 else
3244 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3245
3246 cgdl->index++;
3247 cgdl->generation = device->generation;
3248
984263bc
MD
3249 cgdl->ccb_h.status = CAM_REQ_CMP;
3250 break;
3251 }
3252 case XPT_DEV_MATCH:
3253 {
984263bc
MD
3254 dev_pos_type position_type;
3255 struct ccb_dev_match *cdm;
3256 int ret;
3257
3258 cdm = &start_ccb->cdm;
3259
3260 /*
984263bc
MD
3261 * There are two ways of getting at information in the EDT.
3262 * The first way is via the primary EDT tree. It starts
3263 * with a list of busses, then a list of targets on a bus,
3264 * then devices/luns on a target, and then peripherals on a
3265 * device/lun. The "other" way is by the peripheral driver
3266 * lists. The peripheral driver lists are organized by
3267 * peripheral driver. (obviously) So it makes sense to
3268 * use the peripheral driver list if the user is looking
3269 * for something like "da1", or all "da" devices. If the
3270 * user is looking for something on a particular bus/target
3271 * or lun, it's generally better to go through the EDT tree.
3272 */
3273
3274 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3275 position_type = cdm->pos.position_type;
3276 else {
b05e84c9 3277 u_int i;
984263bc
MD
3278
3279 position_type = CAM_DEV_POS_NONE;
3280
3281 for (i = 0; i < cdm->num_patterns; i++) {
3282 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3283 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3284 position_type = CAM_DEV_POS_EDT;
3285 break;
3286 }
3287 }
3288
3289 if (cdm->num_patterns == 0)
3290 position_type = CAM_DEV_POS_EDT;
3291 else if (position_type == CAM_DEV_POS_NONE)
3292 position_type = CAM_DEV_POS_PDRV;
3293 }
3294
3295 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3296 case CAM_DEV_POS_EDT:
3297 ret = xptedtmatch(cdm);
3298 break;
3299 case CAM_DEV_POS_PDRV:
3300 ret = xptperiphlistmatch(cdm);
3301 break;
3302 default:
3303 cdm->status = CAM_DEV_MATCH_ERROR;
3304 break;
3305 }
3306
984263bc
MD
3307 if (cdm->status == CAM_DEV_MATCH_ERROR)
3308 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3309 else
3310 start_ccb->ccb_h.status = CAM_REQ_CMP;
3311
3312 break;
3313 }
3314 case XPT_SASYNC_CB:
3315 {
3316 struct ccb_setasync *csa;
3317 struct async_node *cur_entry;
3318 struct async_list *async_head;
3319 u_int32_t added;
984263bc
MD
3320
3321 csa = &start_ccb->csa;
3322 added = csa->event_enable;
3323 async_head = &csa->ccb_h.path->device->asyncs;
3324
3325 /*
3326 * If there is already an entry for us, simply
3327 * update it.
3328 */
984263bc
MD
3329 cur_entry = SLIST_FIRST(async_head);
3330 while (cur_entry != NULL) {
3331 if ((cur_entry->callback_arg == csa->callback_arg)
3332 && (cur_entry->callback == csa->callback))
3333 break;
3334 cur_entry = SLIST_NEXT(cur_entry, links);
3335 }
3336
3337 if (cur_entry != NULL) {
3338 /*
3339 * If the request has no flags set,
3340 * remove the entry.
3341 */
3342 added &= ~cur_entry->event_enable;
3343 if (csa->event_enable == 0) {
3344 SLIST_REMOVE(async_head, cur_entry,
3345 async_node, links);
3346 csa->ccb_h.path->device->refcount--;
bc6e3c73 3347 kfree(cur_entry, M_CAMXPT);
984263bc
MD
3348 } else {
3349 cur_entry->event_enable = csa->event_enable;
3350 }
3351 } else {
1c8b7a9a
PA
3352 cur_entry = kmalloc(sizeof(*cur_entry), M_CAMXPT,
3353 M_INTWAIT);
984263bc
MD
3354 cur_entry->event_enable = csa->event_enable;
3355 cur_entry->callback_arg = csa->callback_arg;
3356 cur_entry->callback = csa->callback;
3357 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3358 csa->ccb_h.path->device->refcount++;
3359 }
3360
1c8b7a9a
PA
3361 /*
3362 * Need to decouple this operation via a taskqueue so that
3363 * the locking doesn't become a mess.
3364 */
3365 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3366 struct xpt_task *task;
3367
3368 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT,
3369 M_INTWAIT);
3370
3371 TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3372 task->data1 = cur_entry;
3373 task->data2 = added;
3374 taskqueue_enqueue(taskqueue_thread[mycpuid],
3375 &task->task);
984263bc 3376 }
1c8b7a9a 3377
984263bc
MD
3378 start_ccb->ccb_h.status = CAM_REQ_CMP;
3379 break;
3380 }
3381 case XPT_REL_SIMQ:
3382 {
3383 struct ccb_relsim *crs;
3384 struct cam_ed *dev;
984263bc
MD
3385
3386 crs = &start_ccb->crs;
3387 dev = crs->ccb_h.path->device;
3388 if (dev == NULL) {
3389
3390 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3391 break;
3392 }
3393
984263bc
MD
3394 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3395
9d4fe89a 3396 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
984263bc
MD
3397 /* Don't ever go below one opening */
3398 if (crs->openings > 0) {
3399 xpt_dev_ccbq_resize(crs->ccb_h.path,
3400 crs->openings);
3401
3402 if (bootverbose) {
1c8b7a9a
PA
3403 xpt_print(crs->ccb_h.path,
3404 "tagged openings now %d\n",
3405 crs->openings);
984263bc
MD
3406 }
3407 }
3408 }
3409 }
3410
3411 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3412
3413 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3414
3415 /*
3416 * Just extend the old timeout and decrement
3417 * the freeze count so that a single timeout
3418 * is sufficient for releasing the queue.
3419 */
3420 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
1c8b7a9a 3421 callout_stop(&dev->callout);
984263bc
MD
3422 } else {
3423
3424 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3425 }
3426
1c8b7a9a 3427 callout_reset(&dev->callout,
eaa58895
JS
3428 (crs->release_timeout * hz) / 1000,
3429 xpt_release_devq_timeout, dev);
984263bc
MD
3430
3431 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3432
3433 }
3434
3435 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3436
3437 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3438 /*
3439 * Decrement the freeze count so that a single
3440 * completion is still sufficient to unfreeze
3441 * the queue.
3442 */
3443 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3444 } else {
1c8b7a9a 3445
984263bc
MD
3446 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3447 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3448 }
3449 }
3450
3451 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3452
3453 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3454 || (dev->ccbq.dev_active == 0)) {
3455
3456 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3457 } else {
1c8b7a9a 3458
984263bc
MD
3459 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3460 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3461 }
3462 }
984263bc
MD
3463
3464 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3465
3466 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3467 /*run_queue*/TRUE);
3468 }
3469 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3470 start_ccb->ccb_h.status = CAM_REQ_CMP;
3471 break;
3472 }
3473 case XPT_SCAN_BUS:
3474 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3475 break;
3476 case XPT_SCAN_LUN:
3477 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3478 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3479 start_ccb);
3480 break;
3481 case XPT_DEBUG: {
3482#ifdef CAMDEBUG
984263bc
MD
3483#ifdef CAM_DEBUG_DELAY
3484 cam_debug_delay = CAM_DEBUG_DELAY;
3485#endif
3486 cam_dflags = start_ccb->cdbg.flags;
3487 if (cam_dpath != NULL) {
3488 xpt_free_path(cam_dpath);
3489 cam_dpath = NULL;
3490 }
3491
3492 if (cam_dflags != CAM_DEBUG_NONE) {
3493 if (xpt_create_path(&cam_dpath, xpt_periph,
3494 start_ccb->ccb_h.path_id,
3495 start_ccb->ccb_h.target_id,
3496 start_ccb->ccb_h.target_lun) !=
3497 CAM_REQ_CMP) {
3498 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3499 cam_dflags = CAM_DEBUG_NONE;
3500 } else {
3501 start_ccb->ccb_h.status = CAM_REQ_CMP;
1c8b7a9a
PA
3502 xpt_print(cam_dpath, "debugging flags now %x\n",
3503 cam_dflags);
984263bc
MD
3504 }
3505 } else {
3506 cam_dpath = NULL;
3507 start_ccb->ccb_h.status = CAM_REQ_CMP;
3508 }
984263bc
MD
3509#else /* !CAMDEBUG */
3510 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3511#endif /* CAMDEBUG */
3512 break;
3513 }
3514 case XPT_NOOP:
3515 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3516 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3517 start_ccb->ccb_h.status = CAM_REQ_CMP;
3518 break;
3519 default:
3520 case XPT_SDEV_TYPE:
3521 case XPT_TERM_IO:
3522 case XPT_ENG_INQ:
3523 /* XXX Implement */
3524 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3525 break;
3526 }
984263bc
MD
3527}
3528
3529void
3530xpt_polled_action(union ccb *start_ccb)
3531{
984263bc 3532 u_int32_t timeout;
1c8b7a9a 3533 struct cam_sim *sim;
984263bc
MD
3534 struct cam_devq *devq;
3535 struct cam_ed *dev;
3536
3537 timeout = start_ccb->ccb_h.timeout;
3538 sim = start_ccb->ccb_h.path->bus->sim;
3539 devq = sim->devq;
3540 dev = start_ccb->ccb_h.path->device;
3541
1c8b7a9a 3542 sim_lock_assert_owned(sim->lock);
984263bc
MD
3543
3544 /*
3545 * Steal an opening so that no other queued requests
3546 * can get it before us while we simulate interrupts.
3547 */
3548 dev->ccbq.devq_openings--;
1c8b7a9a
PA
3549 dev->ccbq.dev_openings--;
3550
e8876f9e 3551 while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
984263bc
MD
3552 && (--timeout > 0)) {
3553 DELAY(1000);
3554 (*(sim->sim_poll))(sim);
92cacebe 3555 camisr_runqueue(sim);
984263bc 3556 }
1c8b7a9a 3557
984263bc
MD
3558 dev->ccbq.devq_openings++;
3559 dev->ccbq.dev_openings++;
1c8b7a9a 3560
984263bc
MD
3561 if (timeout != 0) {
3562 xpt_action(start_ccb);
3563 while(--timeout > 0) {
3564 (*(sim->sim_poll))(sim);
92cacebe 3565 camisr_runqueue(sim);
984263bc
MD
3566 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3567 != CAM_REQ_INPROG)
3568 break;
3569 DELAY(1000);
3570 }
3571 if (timeout == 0) {
3572 /*
3573 * XXX Is it worth adding a sim_timeout entry
3574 * point so we can attempt recovery? If
3575 * this is only used for dumps, I don't think
3576 * it is.
3577 */
3578 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3579 }
3580 } else {
3581 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3582 }
984263bc 3583}
1c8b7a9a 3584
984263bc
MD
3585/*
3586 * Schedule a peripheral driver to receive a ccb when it's
3587 * target device has space for more transactions.
3588 */
3589void
3590xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3591{
3592 struct cam_ed *device;
c8f7fab0 3593 union ccb *work_ccb;
984263bc
MD
3594 int runq;
3595
1c8b7a9a
PA
3596 sim_lock_assert_owned(perph->sim->lock);
3597
984263bc
MD
3598 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3599 device = perph->path->device;
984263bc
MD
3600 if (periph_is_queued(perph)) {
3601 /* Simply reorder based on new priority */
3602 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3603 (" change priority to %d\n", new_priority));
3604 if (new_priority < perph->pinfo.priority) {
3605 camq_change_priority(&device->drvq,
3606 perph->pinfo.index,
3607 new_priority);
3608 }
3609 runq = 0;
2d19cdd3 3610 } else if (perph->path->bus->sim == &cam_dead_sim) {
c8f7fab0
PA
3611 /* The SIM is gone so just call periph_start directly. */
3612 work_ccb = xpt_get_ccb(perph->path->device);
c8f7fab0
PA
3613 if (work_ccb == NULL)
3614 return; /* XXX */
3615 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3616 perph->pinfo.priority = new_priority;
3617 perph->periph_start(perph, work_ccb);
3618 return;
984263bc
MD
3619 } else {
3620 /* New entry on the queue */
3621 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3622 (" added periph to queue\n"));
3623 perph->pinfo.priority = new_priority;
3624 perph->pinfo.generation = ++device->drvq.generation;
3625 camq_insert(&device->drvq, &perph->pinfo);
3626 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3627 }
984263bc
MD
3628 if (runq != 0) {
3629 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3630 (" calling xpt_run_devq\n"));
3631 xpt_run_dev_allocq(perph->path->bus);
3632 }
3633}
3634
3635
3636/*
3637 * Schedule a device to run on a given queue.
3638 * If the device was inserted as a new entry on the queue,
3639 * return 1 meaning the device queue should be run. If we
3640 * were already queued, implying someone else has already
3641 * started the queue, return 0 so the caller doesn't attempt
1c8b7a9a 3642 * to run the queue.
984263bc
MD
3643 */
3644static int
3645xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3646 u_int32_t new_priority)
3647{
3648 int retval;
3649 u_int32_t old_priority;
3650
3651 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3652
3653 old_priority = pinfo->priority;
3654
3655 /*
3656 * Are we already queued?
3657 */
3658 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3659 /* Simply reorder based on new priority */
3660 if (new_priority < old_priority) {
3661 camq_change_priority(queue, pinfo->index,
3662 new_priority);
3663 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3664 ("changed priority to %d\n",
3665 new_priority));
3666 }
3667 retval = 0;
3668 } else {
3669 /* New entry on the queue */
3670 if (new_priority < old_priority)
3671 pinfo->priority = new_priority;
3672
3673 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3674 ("Inserting onto queue\n"));
3675 pinfo->generation = ++queue->generation;
3676 camq_insert(queue, pinfo);
3677 retval = 1;
3678 }
3679 return (retval);
3680}
3681
3682static void
3683xpt_run_dev_allocq(struct cam_eb *bus)
3684{
3685 struct cam_devq *devq;
984263bc 3686
e8876f9e
MD
3687 if ((devq = bus->sim->devq) == NULL) {
3688 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3689 return;
3690 }
984263bc 3691 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
984263bc
MD
3692
3693 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3694 (" qfrozen_cnt == 0x%x, entries == %d, "
3695 "openings == %d, active == %d\n",
3696 devq->alloc_queue.qfrozen_cnt,
3697 devq->alloc_queue.entries,
3698 devq->alloc_openings,
3699 devq->alloc_active));
3700
984263bc
MD
3701 devq->alloc_queue.qfrozen_cnt++;
3702 while ((devq->alloc_queue.entries > 0)
3703 && (devq->alloc_openings > 0)
3704 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3705 struct cam_ed_qinfo *qinfo;
3706 struct cam_ed *device;
3707 union ccb *work_ccb;
3708 struct cam_periph *drv;
3709 struct camq *drvq;
1c8b7a9a 3710
984263bc
MD
3711 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3712 CAMQ_HEAD);
3713 device = qinfo->device;
3714
3715 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3716 ("running device %p\n", device));
3717
3718 drvq = &device->drvq;
3719
3720#ifdef CAMDEBUG
3721 if (drvq->entries <= 0) {
3722 panic("xpt_run_dev_allocq: "
3723 "Device on queue without any work to do");
3724 }
3725#endif
3726 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3727 devq->alloc_openings--;
3728 devq->alloc_active++;
3729 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
984263bc
MD
3730 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3731 drv->pinfo.priority);
3732 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3733 ("calling periph start\n"));
3734 drv->periph_start(drv, work_ccb);
3735 } else {
3736 /*
3737 * Malloc failure in alloc_ccb
3738 */
3739 /*
3740 * XXX add us to a list to be run from free_ccb
3741 * if we don't have any ccbs active on this
3742 * device queue otherwise we may never get run
3743 * again.
3744 */
3745 break;
3746 }
984263bc
MD
3747
3748 if (drvq->entries > 0) {
3749 /* We have more work. Attempt to reschedule */
3750 xpt_schedule_dev_allocq(bus, device);
3751 }
3752 }
3753 devq->alloc_queue.qfrozen_cnt--;
984263bc
MD
3754}