Fully synchronize sys/boot from FreeBSD-5.x, but add / to the module path
[dragonfly.git] / sys / bus / cam / cam_xpt.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
dc62b251 30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.7 2003/11/10 06:12:00 dillon Exp $
984263bc
MD
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/devicestat.h>
42#include <sys/interrupt.h>
43#include <sys/bus.h>
44
45#ifdef PC98
46#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
47#endif
48
49#include <machine/clock.h>
50#include <machine/ipl.h>
51
1f2de5d4
MD
52#include "cam.h"
53#include "cam_ccb.h"
54#include "cam_periph.h"
55#include "cam_sim.h"
56#include "cam_xpt.h"
57#include "cam_xpt_sim.h"
58#include "cam_xpt_periph.h"
59#include "cam_debug.h"
984263bc 60
1f2de5d4
MD
61#include "scsi/scsi_all.h"
62#include "scsi/scsi_message.h"
63#include "scsi/scsi_pass.h"
984263bc
MD
64#include "opt_cam.h"
65
66/* Datastructures internal to the xpt layer */
67
68/*
69 * Definition of an async handler callback block. These are used to add
70 * SIMs and peripherals to the async callback lists.
71 */
72struct async_node {
73 SLIST_ENTRY(async_node) links;
74 u_int32_t event_enable; /* Async Event enables */
75 void (*callback)(void *arg, u_int32_t code,
76 struct cam_path *path, void *args);
77 void *callback_arg;
78};
79
80SLIST_HEAD(async_list, async_node);
81SLIST_HEAD(periph_list, cam_periph);
82static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
83
84/*
85 * This is the maximum number of high powered commands (e.g. start unit)
86 * that can be outstanding at a particular time.
87 */
88#ifndef CAM_MAX_HIGHPOWER
89#define CAM_MAX_HIGHPOWER 4
90#endif
91
92/* number of high powered commands that can go through right now */
93static int num_highpower = CAM_MAX_HIGHPOWER;
94
95/*
96 * Structure for queueing a device in a run queue.
97 * There is one run queue for allocating new ccbs,
98 * and another for sending ccbs to the controller.
99 */
100struct cam_ed_qinfo {
101 cam_pinfo pinfo;
102 struct cam_ed *device;
103};
104
105/*
106 * The CAM EDT (Existing Device Table) contains the device information for
107 * all devices for all busses in the system. The table contains a
108 * cam_ed structure for each device on the bus.
109 */
110struct cam_ed {
111 TAILQ_ENTRY(cam_ed) links;
112 struct cam_ed_qinfo alloc_ccb_entry;
113 struct cam_ed_qinfo send_ccb_entry;
114 struct cam_et *target;
115 lun_id_t lun_id;
116 struct camq drvq; /*
117 * Queue of type drivers wanting to do
118 * work on this device.
119 */
120 struct cam_ccbq ccbq; /* Queue of pending ccbs */
121 struct async_list asyncs; /* Async callback info for this B/T/L */
122 struct periph_list periphs; /* All attached devices */
123 u_int generation; /* Generation number */
124 struct cam_periph *owner; /* Peripheral driver's ownership tag */
125 struct xpt_quirk_entry *quirk; /* Oddities about this device */
126 /* Storage for the inquiry data */
127 struct scsi_inquiry_data inq_data;
128 u_int8_t inq_flags; /*
129 * Current settings for inquiry flags.
130 * This allows us to override settings
131 * like disconnection and tagged
132 * queuing for a device.
133 */
134 u_int8_t queue_flags; /* Queue flags from the control page */
135 u_int8_t serial_num_len;
136 u_int8_t *serial_num;
137 u_int32_t qfrozen_cnt;
138 u_int32_t flags;
139#define CAM_DEV_UNCONFIGURED 0x01
140#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
141#define CAM_DEV_REL_ON_COMPLETE 0x04
142#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
143#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
144#define CAM_DEV_TAG_AFTER_COUNT 0x20
145#define CAM_DEV_INQUIRY_DATA_VALID 0x40
146 u_int32_t tag_delay_count;
147#define CAM_TAG_DELAY_COUNT 5
148 u_int32_t refcount;
149 struct callout_handle c_handle;
150};
151
152/*
153 * Each target is represented by an ET (Existing Target). These
154 * entries are created when a target is successfully probed with an
155 * identify, and removed when a device fails to respond after a number
156 * of retries, or a bus rescan finds the device missing.
157 */
158struct cam_et {
159 TAILQ_HEAD(, cam_ed) ed_entries;
160 TAILQ_ENTRY(cam_et) links;
161 struct cam_eb *bus;
162 target_id_t target_id;
163 u_int32_t refcount;
164 u_int generation;
165 struct timeval last_reset;
166};
167
168/*
169 * Each bus is represented by an EB (Existing Bus). These entries
170 * are created by calls to xpt_bus_register and deleted by calls to
171 * xpt_bus_deregister.
172 */
173struct cam_eb {
174 TAILQ_HEAD(, cam_et) et_entries;
175 TAILQ_ENTRY(cam_eb) links;
176 path_id_t path_id;
177 struct cam_sim *sim;
178 struct timeval last_reset;
179 u_int32_t flags;
180#define CAM_EB_RUNQ_SCHEDULED 0x01
181 u_int32_t refcount;
182 u_int generation;
183};
184
185struct cam_path {
186 struct cam_periph *periph;
187 struct cam_eb *bus;
188 struct cam_et *target;
189 struct cam_ed *device;
190};
191
192struct xpt_quirk_entry {
193 struct scsi_inquiry_pattern inq_pat;
194 u_int8_t quirks;
195#define CAM_QUIRK_NOLUNS 0x01
196#define CAM_QUIRK_NOSERIAL 0x02
197#define CAM_QUIRK_HILUNS 0x04
198 u_int mintags;
199 u_int maxtags;
200};
201#define CAM_SCSI2_MAXLUN 8
202
203typedef enum {
204 XPT_FLAG_OPEN = 0x01
205} xpt_flags;
206
207struct xpt_softc {
208 xpt_flags flags;
209 u_int32_t generation;
210};
211
212static const char quantum[] = "QUANTUM";
213static const char sony[] = "SONY";
214static const char west_digital[] = "WDIGTL";
215static const char samsung[] = "SAMSUNG";
216static const char seagate[] = "SEAGATE";
217static const char microp[] = "MICROP";
218
219static struct xpt_quirk_entry xpt_quirk_table[] =
220{
221 {
222 /* Reports QUEUE FULL for temporary resource shortages */
223 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
224 /*quirks*/0, /*mintags*/24, /*maxtags*/32
225 },
226 {
227 /* Reports QUEUE FULL for temporary resource shortages */
228 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
229 /*quirks*/0, /*mintags*/24, /*maxtags*/32
230 },
231 {
232 /* Reports QUEUE FULL for temporary resource shortages */
233 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
234 /*quirks*/0, /*mintags*/24, /*maxtags*/32
235 },
236 {
237 /* Broken tagged queuing drive */
238 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
239 /*quirks*/0, /*mintags*/0, /*maxtags*/0
240 },
241 {
242 /* Broken tagged queuing drive */
243 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
244 /*quirks*/0, /*mintags*/0, /*maxtags*/0
245 },
246 {
247 /* Broken tagged queuing drive */
248 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
249 /*quirks*/0, /*mintags*/0, /*maxtags*/0
250 },
251 {
252 /*
253 * Unfortunately, the Quantum Atlas III has the same
254 * problem as the Atlas II drives above.
255 * Reported by: "Johan Granlund" <johan@granlund.nu>
256 *
257 * For future reference, the drive with the problem was:
258 * QUANTUM QM39100TD-SW N1B0
259 *
260 * It's possible that Quantum will fix the problem in later
261 * firmware revisions. If that happens, the quirk entry
262 * will need to be made specific to the firmware revisions
263 * with the problem.
264 *
265 */
266 /* Reports QUEUE FULL for temporary resource shortages */
267 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
268 /*quirks*/0, /*mintags*/24, /*maxtags*/32
269 },
270 {
271 /*
272 * 18 Gig Atlas III, same problem as the 9G version.
273 * Reported by: Andre Albsmeier
274 * <andre.albsmeier@mchp.siemens.de>
275 *
276 * For future reference, the drive with the problem was:
277 * QUANTUM QM318000TD-S N491
278 */
279 /* Reports QUEUE FULL for temporary resource shortages */
280 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
281 /*quirks*/0, /*mintags*/24, /*maxtags*/32
282 },
283 {
284 /*
285 * Broken tagged queuing drive
286 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
287 * and: Martin Renters <martin@tdc.on.ca>
288 */
289 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
290 /*quirks*/0, /*mintags*/0, /*maxtags*/0
291 },
292 /*
293 * The Seagate Medalist Pro drives have very poor write
294 * performance with anything more than 2 tags.
295 *
296 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
297 * Drive: <SEAGATE ST36530N 1444>
298 *
299 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
300 * Drive: <SEAGATE ST34520W 1281>
301 *
302 * No one has actually reported that the 9G version
303 * (ST39140*) of the Medalist Pro has the same problem, but
304 * we're assuming that it does because the 4G and 6.5G
305 * versions of the drive are broken.
306 */
307 {
308 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
309 /*quirks*/0, /*mintags*/2, /*maxtags*/2
310 },
311 {
312 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
313 /*quirks*/0, /*mintags*/2, /*maxtags*/2
314 },
315 {
316 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
317 /*quirks*/0, /*mintags*/2, /*maxtags*/2
318 },
319 {
320 /*
321 * Slow when tagged queueing is enabled. Write performance
322 * steadily drops off with more and more concurrent
323 * transactions. Best sequential write performance with
324 * tagged queueing turned off and write caching turned on.
325 *
326 * PR: kern/10398
327 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
328 * Drive: DCAS-34330 w/ "S65A" firmware.
329 *
330 * The drive with the problem had the "S65A" firmware
331 * revision, and has also been reported (by Stephen J.
332 * Roznowski <sjr@home.net>) for a drive with the "S61A"
333 * firmware revision.
334 *
335 * Although no one has reported problems with the 2 gig
336 * version of the DCAS drive, the assumption is that it
337 * has the same problems as the 4 gig version. Therefore
338 * this quirk entries disables tagged queueing for all
339 * DCAS drives.
340 */
341 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
342 /*quirks*/0, /*mintags*/0, /*maxtags*/0
343 },
344 {
345 /* Broken tagged queuing drive */
346 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
347 /*quirks*/0, /*mintags*/0, /*maxtags*/0
348 },
349 {
350 /* Broken tagged queuing drive */
351 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
352 /*quirks*/0, /*mintags*/0, /*maxtags*/0
353 },
354 {
355 /*
356 * Broken tagged queuing drive.
357 * Submitted by:
358 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
359 * in PR kern/9535
360 */
361 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
362 /*quirks*/0, /*mintags*/0, /*maxtags*/0
363 },
364 {
365 /*
366 * Slow when tagged queueing is enabled. (1.5MB/sec versus
367 * 8MB/sec.)
368 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
369 * Best performance with these drives is achieved with
370 * tagged queueing turned off, and write caching turned on.
371 */
372 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
373 /*quirks*/0, /*mintags*/0, /*maxtags*/0
374 },
375 {
376 /*
377 * Slow when tagged queueing is enabled. (1.5MB/sec versus
378 * 8MB/sec.)
379 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
380 * Best performance with these drives is achieved with
381 * tagged queueing turned off, and write caching turned on.
382 */
383 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
384 /*quirks*/0, /*mintags*/0, /*maxtags*/0
385 },
386 {
387 /*
388 * Doesn't handle queue full condition correctly,
389 * so we need to limit maxtags to what the device
390 * can handle instead of determining this automatically.
391 */
392 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
393 /*quirks*/0, /*mintags*/2, /*maxtags*/32
394 },
395 {
396 /* Really only one LUN */
397 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
398 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
399 },
400 {
401 /* I can't believe we need a quirk for DPT volumes. */
402 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
403 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
404 /*mintags*/0, /*maxtags*/255
405 },
406 {
407 /*
408 * Many Sony CDROM drives don't like multi-LUN probing.
409 */
410 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
411 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
412 },
413 {
414 /*
415 * This drive doesn't like multiple LUN probing.
416 * Submitted by: Parag Patel <parag@cgt.com>
417 */
418 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
419 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
420 },
421 {
422 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
423 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
424 },
425 {
426 /*
427 * The 8200 doesn't like multi-lun probing, and probably
428 * don't like serial number requests either.
429 */
430 {
431 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
432 "EXB-8200*", "*"
433 },
434 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
435 },
436 {
437 /*
438 * Let's try the same as above, but for a drive that says
439 * it's an IPL-6860 but is actually an EXB 8200.
440 */
441 {
442 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
443 "IPL-6860*", "*"
444 },
445 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
446 },
447 {
448 /*
449 * These Hitachi drives don't like multi-lun probing.
450 * The PR submitter has a DK319H, but says that the Linux
451 * kernel has a similar work-around for the DK312 and DK314,
452 * so all DK31* drives are quirked here.
453 * PR: misc/18793
454 * Submitted by: Paul Haddad <paul@pth.com>
455 */
456 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
457 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
458 },
459 {
460 /*
461 * This old revision of the TDC3600 is also SCSI-1, and
462 * hangs upon serial number probing.
463 */
464 {
465 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
466 " TDC 3600", "U07:"
467 },
468 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
469 },
470 {
471 /*
472 * Would repond to all LUNs if asked for.
473 */
474 {
475 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
476 "CP150", "*"
477 },
478 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
479 },
480 {
481 /*
482 * Would repond to all LUNs if asked for.
483 */
484 {
485 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
486 "96X2*", "*"
487 },
488 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
489 },
490 {
491 /* Submitted by: Matthew Dodd <winter@jurai.net> */
492 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
493 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
494 },
495 {
496 /* Submitted by: Matthew Dodd <winter@jurai.net> */
497 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
499 },
500 {
501 /* TeraSolutions special settings for TRC-22 RAID */
502 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
503 /*quirks*/0, /*mintags*/55, /*maxtags*/255
504 },
505 {
506 /* Veritas Storage Appliance */
507 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
508 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
509 },
510 {
511 /*
512 * Would respond to all LUNs. Device type and removable
513 * flag are jumper-selectable.
514 */
515 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
516 "Tahiti 1", "*"
517 },
518 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
519 },
520 {
521 /* Default tagged queuing parameters for all devices */
522 {
523 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
524 /*vendor*/"*", /*product*/"*", /*revision*/"*"
525 },
526 /*quirks*/0, /*mintags*/2, /*maxtags*/255
527 },
528};
529
530static const int xpt_quirk_table_size =
531 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
532
533typedef enum {
534 DM_RET_COPY = 0x01,
535 DM_RET_FLAG_MASK = 0x0f,
536 DM_RET_NONE = 0x00,
537 DM_RET_STOP = 0x10,
538 DM_RET_DESCEND = 0x20,
539 DM_RET_ERROR = 0x30,
540 DM_RET_ACTION_MASK = 0xf0
541} dev_match_ret;
542
543typedef enum {
544 XPT_DEPTH_BUS,
545 XPT_DEPTH_TARGET,
546 XPT_DEPTH_DEVICE,
547 XPT_DEPTH_PERIPH
548} xpt_traverse_depth;
549
550struct xpt_traverse_config {
551 xpt_traverse_depth depth;
552 void *tr_func;
553 void *tr_arg;
554};
555
556typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
557typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
558typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
559typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
560typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
561
562/* Transport layer configuration information */
563static struct xpt_softc xsoftc;
564
565/* Queues for our software interrupt handler */
566typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
567static cam_isrq_t cam_bioq;
568static cam_isrq_t cam_netq;
569
570/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
571static SLIST_HEAD(,ccb_hdr) ccb_freeq;
572static u_int xpt_max_ccbs; /*
573 * Maximum size of ccb pool. Modified as
574 * devices are added/removed or have their
575 * opening counts changed.
576 */
577static u_int xpt_ccb_count; /* Current count of allocated ccbs */
578
579struct cam_periph *xpt_periph;
580
581static periph_init_t xpt_periph_init;
582
583static periph_init_t probe_periph_init;
584
585static struct periph_driver xpt_driver =
586{
587 xpt_periph_init, "xpt",
588 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
589};
590
591static struct periph_driver probe_driver =
592{
593 probe_periph_init, "probe",
594 TAILQ_HEAD_INITIALIZER(probe_driver.units)
595};
596
597DATA_SET(periphdriver_set, xpt_driver);
598DATA_SET(periphdriver_set, probe_driver);
599
600#define XPT_CDEV_MAJOR 104
601
602static d_open_t xptopen;
603static d_close_t xptclose;
604static d_ioctl_t xptioctl;
605
606static struct cdevsw xpt_cdevsw = {
fabb8ceb
MD
607 /* name */ "xpt",
608 /* maj */ XPT_CDEV_MAJOR,
609 /* flags */ 0,
610 /* port */ NULL,
611 /* autoq */ 0,
612
984263bc
MD
613 /* open */ xptopen,
614 /* close */ xptclose,
615 /* read */ noread,
616 /* write */ nowrite,
617 /* ioctl */ xptioctl,
618 /* poll */ nopoll,
619 /* mmap */ nommap,
620 /* strategy */ nostrategy,
984263bc 621 /* dump */ nodump,
fabb8ceb 622 /* psize */ nopsize
984263bc
MD
623};
624
625static struct intr_config_hook *xpt_config_hook;
626
627/* Registered busses */
628static TAILQ_HEAD(,cam_eb) xpt_busses;
629static u_int bus_generation;
630
631/* Storage for debugging datastructures */
632#ifdef CAMDEBUG
633struct cam_path *cam_dpath;
634u_int32_t cam_dflags;
635u_int32_t cam_debug_delay;
636#endif
637
638#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
639#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
640#endif
641
642/*
643 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
644 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
645 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
646 */
647#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
648 || defined(CAM_DEBUG_LUN)
649#ifdef CAMDEBUG
650#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
651 || !defined(CAM_DEBUG_LUN)
652#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
653 and CAM_DEBUG_LUN"
654#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
655#else /* !CAMDEBUG */
656#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
657#endif /* CAMDEBUG */
658#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
659
660/* Our boot-time initialization hook */
661static void xpt_init(void *);
662SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
663
664static cam_status xpt_compile_path(struct cam_path *new_path,
665 struct cam_periph *perph,
666 path_id_t path_id,
667 target_id_t target_id,
668 lun_id_t lun_id);
669
670static void xpt_release_path(struct cam_path *path);
671
672static void xpt_async_bcast(struct async_list *async_head,
673 u_int32_t async_code,
674 struct cam_path *path,
675 void *async_arg);
676static void xpt_dev_async(u_int32_t async_code,
677 struct cam_eb *bus,
678 struct cam_et *target,
679 struct cam_ed *device,
680 void *async_arg);
681static path_id_t xptnextfreepathid(void);
682static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
683static union ccb *xpt_get_ccb(struct cam_ed *device);
684static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
685 u_int32_t new_priority);
686static void xpt_run_dev_allocq(struct cam_eb *bus);
687static void xpt_run_dev_sendq(struct cam_eb *bus);
688static timeout_t xpt_release_devq_timeout;
689static timeout_t xpt_release_simq_timeout;
690static void xpt_release_bus(struct cam_eb *bus);
691static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
692 int run_queue);
693static struct cam_et*
694 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
695static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
696static struct cam_ed*
697 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
698 lun_id_t lun_id);
699static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
700 struct cam_ed *device);
701static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
702static struct cam_eb*
703 xpt_find_bus(path_id_t path_id);
704static struct cam_et*
705 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
706static struct cam_ed*
707 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
708static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
709static void xpt_scan_lun(struct cam_periph *periph,
710 struct cam_path *path, cam_flags flags,
711 union ccb *ccb);
712static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
713static xpt_busfunc_t xptconfigbuscountfunc;
714static xpt_busfunc_t xptconfigfunc;
715static void xpt_config(void *arg);
716static xpt_devicefunc_t xptpassannouncefunc;
717static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
718static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
719static void xptpoll(struct cam_sim *sim);
ef0fdad1
MD
720static inthand2_t swi_camnet;
721static inthand2_t swi_cambio;
984263bc
MD
722static void camisr(cam_isrq_t *queue);
723#if 0
724static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
725static void xptasync(struct cam_periph *periph,
726 u_int32_t code, cam_path *path);
727#endif
728static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
729 int num_patterns, struct cam_eb *bus);
730static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
731 int num_patterns, struct cam_ed *device);
732static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
733 int num_patterns,
734 struct cam_periph *periph);
735static xpt_busfunc_t xptedtbusfunc;
736static xpt_targetfunc_t xptedttargetfunc;
737static xpt_devicefunc_t xptedtdevicefunc;
738static xpt_periphfunc_t xptedtperiphfunc;
739static xpt_pdrvfunc_t xptplistpdrvfunc;
740static xpt_periphfunc_t xptplistperiphfunc;
741static int xptedtmatch(struct ccb_dev_match *cdm);
742static int xptperiphlistmatch(struct ccb_dev_match *cdm);
743static int xptbustraverse(struct cam_eb *start_bus,
744 xpt_busfunc_t *tr_func, void *arg);
745static int xpttargettraverse(struct cam_eb *bus,
746 struct cam_et *start_target,
747 xpt_targetfunc_t *tr_func, void *arg);
748static int xptdevicetraverse(struct cam_et *target,
749 struct cam_ed *start_device,
750 xpt_devicefunc_t *tr_func, void *arg);
751static int xptperiphtraverse(struct cam_ed *device,
752 struct cam_periph *start_periph,
753 xpt_periphfunc_t *tr_func, void *arg);
754static int xptpdrvtraverse(struct periph_driver **start_pdrv,
755 xpt_pdrvfunc_t *tr_func, void *arg);
756static int xptpdperiphtraverse(struct periph_driver **pdrv,
757 struct cam_periph *start_periph,
758 xpt_periphfunc_t *tr_func,
759 void *arg);
760static xpt_busfunc_t xptdefbusfunc;
761static xpt_targetfunc_t xptdeftargetfunc;
762static xpt_devicefunc_t xptdefdevicefunc;
763static xpt_periphfunc_t xptdefperiphfunc;
764static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
765#ifdef notusedyet
766static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
767 void *arg);
768#endif
769static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
770 void *arg);
771#ifdef notusedyet
772static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
773 void *arg);
774#endif
775static xpt_devicefunc_t xptsetasyncfunc;
776static xpt_busfunc_t xptsetasyncbusfunc;
777static cam_status xptregister(struct cam_periph *periph,
778 void *arg);
779static cam_status proberegister(struct cam_periph *periph,
780 void *arg);
781static void probeschedule(struct cam_periph *probe_periph);
782static void probestart(struct cam_periph *periph, union ccb *start_ccb);
783static void proberequestdefaultnegotiation(struct cam_periph *periph);
784static void probedone(struct cam_periph *periph, union ccb *done_ccb);
785static void probecleanup(struct cam_periph *periph);
786static void xpt_find_quirk(struct cam_ed *device);
787static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
788 struct cam_ed *device,
789 int async_update);
790static void xpt_toggle_tags(struct cam_path *path);
791static void xpt_start_tags(struct cam_path *path);
792static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
793 struct cam_ed *dev);
794static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
795 struct cam_ed *dev);
796static __inline int periph_is_queued(struct cam_periph *periph);
797static __inline int device_is_alloc_queued(struct cam_ed *device);
798static __inline int device_is_send_queued(struct cam_ed *device);
799static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
800
801static __inline int
802xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
803{
804 int retval;
805
806 if (dev->ccbq.devq_openings > 0) {
807 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
808 cam_ccbq_resize(&dev->ccbq,
809 dev->ccbq.dev_openings
810 + dev->ccbq.dev_active);
811 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
812 }
813 /*
814 * The priority of a device waiting for CCB resources
815 * is that of the the highest priority peripheral driver
816 * enqueued.
817 */
818 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
819 &dev->alloc_ccb_entry.pinfo,
820 CAMQ_GET_HEAD(&dev->drvq)->priority);
821 } else {
822 retval = 0;
823 }
824
825 return (retval);
826}
827
828static __inline int
829xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
830{
831 int retval;
832
833 if (dev->ccbq.dev_openings > 0) {
834 /*
835 * The priority of a device waiting for controller
836 * resources is that of the the highest priority CCB
837 * enqueued.
838 */
839 retval =
840 xpt_schedule_dev(&bus->sim->devq->send_queue,
841 &dev->send_ccb_entry.pinfo,
842 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
843 } else {
844 retval = 0;
845 }
846 return (retval);
847}
848
849static __inline int
850periph_is_queued(struct cam_periph *periph)
851{
852 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
853}
854
855static __inline int
856device_is_alloc_queued(struct cam_ed *device)
857{
858 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
859}
860
861static __inline int
862device_is_send_queued(struct cam_ed *device)
863{
864 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
865}
866
867static __inline int
868dev_allocq_is_runnable(struct cam_devq *devq)
869{
870 /*
871 * Have work to do.
872 * Have space to do more work.
873 * Allowed to do work.
874 */
875 return ((devq->alloc_queue.qfrozen_cnt == 0)
876 && (devq->alloc_queue.entries > 0)
877 && (devq->alloc_openings > 0));
878}
879
880static void
881xpt_periph_init()
882{
883 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
884}
885
886static void
887probe_periph_init()
888{
889}
890
891
892static void
893xptdone(struct cam_periph *periph, union ccb *done_ccb)
894{
895 /* Caller will release the CCB */
896 wakeup(&done_ccb->ccb_h.cbfcnp);
897}
898
899static int
41c20dac 900xptopen(dev_t dev, int flags, int fmt, struct thread *td)
984263bc
MD
901{
902 int unit;
903
904 unit = minor(dev) & 0xff;
905
906 /*
907 * Only allow read-write access.
908 */
909 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
910 return(EPERM);
911
912 /*
913 * We don't allow nonblocking access.
914 */
915 if ((flags & O_NONBLOCK) != 0) {
916 printf("xpt%d: can't do nonblocking access\n", unit);
917 return(ENODEV);
918 }
919
920 /*
921 * We only have one transport layer right now. If someone accesses
922 * us via something other than minor number 1, point out their
923 * mistake.
924 */
925 if (unit != 0) {
926 printf("xptopen: got invalid xpt unit %d\n", unit);
927 return(ENXIO);
928 }
929
930 /* Mark ourselves open */
931 xsoftc.flags |= XPT_FLAG_OPEN;
932
933 return(0);
934}
935
936static int
41c20dac 937xptclose(dev_t dev, int flag, int fmt, struct thread *td)
984263bc
MD
938{
939 int unit;
940
941 unit = minor(dev) & 0xff;
942
943 /*
944 * We only have one transport layer right now. If someone accesses
945 * us via something other than minor number 1, point out their
946 * mistake.
947 */
948 if (unit != 0) {
949 printf("xptclose: got invalid xpt unit %d\n", unit);
950 return(ENXIO);
951 }
952
953 /* Mark ourselves closed */
954 xsoftc.flags &= ~XPT_FLAG_OPEN;
955
956 return(0);
957}
958
959static int
41c20dac 960xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
984263bc
MD
961{
962 int unit, error;
963
964 error = 0;
965 unit = minor(dev) & 0xff;
966
967 /*
968 * We only have one transport layer right now. If someone accesses
969 * us via something other than minor number 1, point out their
970 * mistake.
971 */
972 if (unit != 0) {
973 printf("xptioctl: got invalid xpt unit %d\n", unit);
974 return(ENXIO);
975 }
976
977 switch(cmd) {
978 /*
979 * For the transport layer CAMIOCOMMAND ioctl, we really only want
980 * to accept CCB types that don't quite make sense to send through a
981 * passthrough driver.
982 */
983 case CAMIOCOMMAND: {
984 union ccb *ccb;
985 union ccb *inccb;
986
987 inccb = (union ccb *)addr;
988
989 switch(inccb->ccb_h.func_code) {
990 case XPT_SCAN_BUS:
991 case XPT_RESET_BUS:
992 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
993 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
994 error = EINVAL;
995 break;
996 }
997 /* FALLTHROUGH */
998 case XPT_PATH_INQ:
999 case XPT_ENG_INQ:
1000 case XPT_SCAN_LUN:
1001
1002 ccb = xpt_alloc_ccb();
1003
1004 /*
1005 * Create a path using the bus, target, and lun the
1006 * user passed in.
1007 */
1008 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1009 inccb->ccb_h.path_id,
1010 inccb->ccb_h.target_id,
1011 inccb->ccb_h.target_lun) !=
1012 CAM_REQ_CMP){
1013 error = EINVAL;
1014 xpt_free_ccb(ccb);
1015 break;
1016 }
1017 /* Ensure all of our fields are correct */
1018 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1019 inccb->ccb_h.pinfo.priority);
1020 xpt_merge_ccb(ccb, inccb);
1021 ccb->ccb_h.cbfcnp = xptdone;
1022 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1023 bcopy(ccb, inccb, sizeof(union ccb));
1024 xpt_free_path(ccb->ccb_h.path);
1025 xpt_free_ccb(ccb);
1026 break;
1027
1028 case XPT_DEBUG: {
1029 union ccb ccb;
1030
1031 /*
1032 * This is an immediate CCB, so it's okay to
1033 * allocate it on the stack.
1034 */
1035
1036 /*
1037 * Create a path using the bus, target, and lun the
1038 * user passed in.
1039 */
1040 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1041 inccb->ccb_h.path_id,
1042 inccb->ccb_h.target_id,
1043 inccb->ccb_h.target_lun) !=
1044 CAM_REQ_CMP){
1045 error = EINVAL;
1046 break;
1047 }
1048 /* Ensure all of our fields are correct */
1049 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1050 inccb->ccb_h.pinfo.priority);
1051 xpt_merge_ccb(&ccb, inccb);
1052 ccb.ccb_h.cbfcnp = xptdone;
1053 xpt_action(&ccb);
1054 bcopy(&ccb, inccb, sizeof(union ccb));
1055 xpt_free_path(ccb.ccb_h.path);
1056 break;
1057
1058 }
1059 case XPT_DEV_MATCH: {
1060 struct cam_periph_map_info mapinfo;
1061 struct cam_path *old_path;
1062
1063 /*
1064 * We can't deal with physical addresses for this
1065 * type of transaction.
1066 */
1067 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1068 error = EINVAL;
1069 break;
1070 }
1071
1072 /*
1073 * Save this in case the caller had it set to
1074 * something in particular.
1075 */
1076 old_path = inccb->ccb_h.path;
1077
1078 /*
1079 * We really don't need a path for the matching
1080 * code. The path is needed because of the
1081 * debugging statements in xpt_action(). They
1082 * assume that the CCB has a valid path.
1083 */
1084 inccb->ccb_h.path = xpt_periph->path;
1085
1086 bzero(&mapinfo, sizeof(mapinfo));
1087
1088 /*
1089 * Map the pattern and match buffers into kernel
1090 * virtual address space.
1091 */
1092 error = cam_periph_mapmem(inccb, &mapinfo);
1093
1094 if (error) {
1095 inccb->ccb_h.path = old_path;
1096 break;
1097 }
1098
1099 /*
1100 * This is an immediate CCB, we can send it on directly.
1101 */
1102 xpt_action(inccb);
1103
1104 /*
1105 * Map the buffers back into user space.
1106 */
1107 cam_periph_unmapmem(inccb, &mapinfo);
1108
1109 inccb->ccb_h.path = old_path;
1110
1111 error = 0;
1112 break;
1113 }
1114 default:
1115 error = ENOTSUP;
1116 break;
1117 }
1118 break;
1119 }
1120 /*
1121 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1122 * with the periphal driver name and unit name filled in. The other
1123 * fields don't really matter as input. The passthrough driver name
1124 * ("pass"), and unit number are passed back in the ccb. The current
1125 * device generation number, and the index into the device peripheral
1126 * driver list, and the status are also passed back. Note that
1127 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1128 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1129 * (or rather should be) impossible for the device peripheral driver
1130 * list to change since we look at the whole thing in one pass, and
1131 * we do it with splcam protection.
1132 *
1133 */
1134 case CAMGETPASSTHRU: {
1135 union ccb *ccb;
1136 struct cam_periph *periph;
1137 struct periph_driver **p_drv;
1138 char *name;
1139 int unit;
1140 int cur_generation;
1141 int base_periph_found;
1142 int splbreaknum;
1143 int s;
1144
1145 ccb = (union ccb *)addr;
1146 unit = ccb->cgdl.unit_number;
1147 name = ccb->cgdl.periph_name;
1148 /*
1149 * Every 100 devices, we want to drop our spl protection to
1150 * give the software interrupt handler a chance to run.
1151 * Most systems won't run into this check, but this should
1152 * avoid starvation in the software interrupt handler in
1153 * large systems.
1154 */
1155 splbreaknum = 100;
1156
1157 ccb = (union ccb *)addr;
1158
1159 base_periph_found = 0;
1160
1161 /*
1162 * Sanity check -- make sure we don't get a null peripheral
1163 * driver name.
1164 */
1165 if (*ccb->cgdl.periph_name == '\0') {
1166 error = EINVAL;
1167 break;
1168 }
1169
1170 /* Keep the list from changing while we traverse it */
1171 s = splcam();
1172ptstartover:
1173 cur_generation = xsoftc.generation;
1174
1175 /* first find our driver in the list of drivers */
dc62b251 1176 SET_FOREACH(p_drv, periphdriver_set) {
984263bc
MD
1177 if (strcmp((*p_drv)->driver_name, name) == 0)
1178 break;
dc62b251 1179 }
984263bc
MD
1180
1181 if (*p_drv == NULL) {
1182 splx(s);
1183 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1184 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1185 *ccb->cgdl.periph_name = '\0';
1186 ccb->cgdl.unit_number = 0;
1187 error = ENOENT;
1188 break;
1189 }
1190
1191 /*
1192 * Run through every peripheral instance of this driver
1193 * and check to see whether it matches the unit passed
1194 * in by the user. If it does, get out of the loops and
1195 * find the passthrough driver associated with that
1196 * peripheral driver.
1197 */
1198 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1199 periph = TAILQ_NEXT(periph, unit_links)) {
1200
1201 if (periph->unit_number == unit) {
1202 break;
1203 } else if (--splbreaknum == 0) {
1204 splx(s);
1205 s = splcam();
1206 splbreaknum = 100;
1207 if (cur_generation != xsoftc.generation)
1208 goto ptstartover;
1209 }
1210 }
1211 /*
1212 * If we found the peripheral driver that the user passed
1213 * in, go through all of the peripheral drivers for that
1214 * particular device and look for a passthrough driver.
1215 */
1216 if (periph != NULL) {
1217 struct cam_ed *device;
1218 int i;
1219
1220 base_periph_found = 1;
1221 device = periph->path->device;
1222 for (i = 0, periph = device->periphs.slh_first;
1223 periph != NULL;
1224 periph = periph->periph_links.sle_next, i++) {
1225 /*
1226 * Check to see whether we have a
1227 * passthrough device or not.
1228 */
1229 if (strcmp(periph->periph_name, "pass") == 0) {
1230 /*
1231 * Fill in the getdevlist fields.
1232 */
1233 strcpy(ccb->cgdl.periph_name,
1234 periph->periph_name);
1235 ccb->cgdl.unit_number =
1236 periph->unit_number;
1237 if (periph->periph_links.sle_next)
1238 ccb->cgdl.status =
1239 CAM_GDEVLIST_MORE_DEVS;
1240 else
1241 ccb->cgdl.status =
1242 CAM_GDEVLIST_LAST_DEVICE;
1243 ccb->cgdl.generation =
1244 device->generation;
1245 ccb->cgdl.index = i;
1246 /*
1247 * Fill in some CCB header fields
1248 * that the user may want.
1249 */
1250 ccb->ccb_h.path_id =
1251 periph->path->bus->path_id;
1252 ccb->ccb_h.target_id =
1253 periph->path->target->target_id;
1254 ccb->ccb_h.target_lun =
1255 periph->path->device->lun_id;
1256 ccb->ccb_h.status = CAM_REQ_CMP;
1257 break;
1258 }
1259 }
1260 }
1261
1262 /*
1263 * If the periph is null here, one of two things has
1264 * happened. The first possibility is that we couldn't
1265 * find the unit number of the particular peripheral driver
1266 * that the user is asking about. e.g. the user asks for
1267 * the passthrough driver for "da11". We find the list of
1268 * "da" peripherals all right, but there is no unit 11.
1269 * The other possibility is that we went through the list
1270 * of peripheral drivers attached to the device structure,
1271 * but didn't find one with the name "pass". Either way,
1272 * we return ENOENT, since we couldn't find something.
1273 */
1274 if (periph == NULL) {
1275 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1276 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1277 *ccb->cgdl.periph_name = '\0';
1278 ccb->cgdl.unit_number = 0;
1279 error = ENOENT;
1280 /*
1281 * It is unfortunate that this is even necessary,
1282 * but there are many, many clueless users out there.
1283 * If this is true, the user is looking for the
1284 * passthrough driver, but doesn't have one in his
1285 * kernel.
1286 */
1287 if (base_periph_found == 1) {
1288 printf("xptioctl: pass driver is not in the "
1289 "kernel\n");
1290 printf("xptioctl: put \"device pass0\" in "
1291 "your kernel config file\n");
1292 }
1293 }
1294 splx(s);
1295 break;
1296 }
1297 default:
1298 error = ENOTTY;
1299 break;
1300 }
1301
1302 return(error);
1303}
1304
1305/* Functions accessed by the peripheral drivers */
1306static void
1307xpt_init(dummy)
1308 void *dummy;
1309{
1310 struct cam_sim *xpt_sim;
1311 struct cam_path *path;
1312 struct cam_devq *devq;
1313 cam_status status;
1314
1315 TAILQ_INIT(&xpt_busses);
1316 TAILQ_INIT(&cam_bioq);
1317 TAILQ_INIT(&cam_netq);
1318 SLIST_INIT(&ccb_freeq);
1319 STAILQ_INIT(&highpowerq);
1320
1321 /*
1322 * The xpt layer is, itself, the equivelent of a SIM.
1323 * Allow 16 ccbs in the ccb pool for it. This should
1324 * give decent parallelism when we probe busses and
1325 * perform other XPT functions.
1326 */
1327 devq = cam_simq_alloc(16);
1328 xpt_sim = cam_sim_alloc(xptaction,
1329 xptpoll,
1330 "xpt",
1331 /*softc*/NULL,
1332 /*unit*/0,
1333 /*max_dev_transactions*/0,
1334 /*max_tagged_dev_transactions*/0,
1335 devq);
1336 xpt_max_ccbs = 16;
1337
1338 xpt_bus_register(xpt_sim, /*bus #*/0);
1339
1340 /*
1341 * Looking at the XPT from the SIM layer, the XPT is
1342 * the equivelent of a peripheral driver. Allocate
1343 * a peripheral driver entry for us.
1344 */
1345 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1346 CAM_TARGET_WILDCARD,
1347 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1348 printf("xpt_init: xpt_create_path failed with status %#x,"
1349 " failing attach\n", status);
1350 return;
1351 }
1352
1353 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1354 path, NULL, 0, NULL);
1355 xpt_free_path(path);
1356
1357 xpt_sim->softc = xpt_periph;
1358
1359 /*
1360 * Register a callback for when interrupts are enabled.
1361 */
1362 xpt_config_hook =
1363 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1364 M_TEMP, M_NOWAIT | M_ZERO);
1365 if (xpt_config_hook == NULL) {
1366 printf("xpt_init: Cannot malloc config hook "
1367 "- failing attach\n");
1368 return;
1369 }
1370
1371 xpt_config_hook->ich_func = xpt_config;
1372 if (config_intrhook_establish(xpt_config_hook) != 0) {
1373 free (xpt_config_hook, M_TEMP);
1374 printf("xpt_init: config_intrhook_establish failed "
1375 "- failing attach\n");
1376 }
1377
1378 /* Install our software interrupt handlers */
ef0fdad1
MD
1379 register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet");
1380 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio");
984263bc
MD
1381}
1382
1383static cam_status
1384xptregister(struct cam_periph *periph, void *arg)
1385{
1386 if (periph == NULL) {
1387 printf("xptregister: periph was NULL!!\n");
1388 return(CAM_REQ_CMP_ERR);
1389 }
1390
1391 periph->softc = NULL;
1392
1393 xpt_periph = periph;
1394
1395 return(CAM_REQ_CMP);
1396}
1397
1398int32_t
1399xpt_add_periph(struct cam_periph *periph)
1400{
1401 struct cam_ed *device;
1402 int32_t status;
1403 struct periph_list *periph_head;
1404
1405 device = periph->path->device;
1406
1407 periph_head = &device->periphs;
1408
1409 status = CAM_REQ_CMP;
1410
1411 if (device != NULL) {
1412 int s;
1413
1414 /*
1415 * Make room for this peripheral
1416 * so it will fit in the queue
1417 * when it's scheduled to run
1418 */
1419 s = splsoftcam();
1420 status = camq_resize(&device->drvq,
1421 device->drvq.array_size + 1);
1422
1423 device->generation++;
1424
1425 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1426
1427 splx(s);
1428 }
1429
1430 xsoftc.generation++;
1431
1432 return (status);
1433}
1434
1435void
1436xpt_remove_periph(struct cam_periph *periph)
1437{
1438 struct cam_ed *device;
1439
1440 device = periph->path->device;
1441
1442 if (device != NULL) {
1443 int s;
1444 struct periph_list *periph_head;
1445
1446 periph_head = &device->periphs;
1447
1448 /* Release the slot for this peripheral */
1449 s = splsoftcam();
1450 camq_resize(&device->drvq, device->drvq.array_size - 1);
1451
1452 device->generation++;
1453
1454 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1455
1456 splx(s);
1457 }
1458
1459 xsoftc.generation++;
1460
1461}
1462
1463void
1464xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1465{
1466 int s;
1467 u_int mb;
1468 struct cam_path *path;
1469 struct ccb_trans_settings cts;
1470
1471 path = periph->path;
1472 /*
1473 * To ensure that this is printed in one piece,
1474 * mask out CAM interrupts.
1475 */
1476 s = splsoftcam();
1477 printf("%s%d at %s%d bus %d target %d lun %d\n",
1478 periph->periph_name, periph->unit_number,
1479 path->bus->sim->sim_name,
1480 path->bus->sim->unit_number,
1481 path->bus->sim->bus_id,
1482 path->target->target_id,
1483 path->device->lun_id);
1484 printf("%s%d: ", periph->periph_name, periph->unit_number);
1485 scsi_print_inquiry(&path->device->inq_data);
1486 if ((bootverbose)
1487 && (path->device->serial_num_len > 0)) {
1488 /* Don't wrap the screen - print only the first 60 chars */
1489 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1490 periph->unit_number, path->device->serial_num);
1491 }
1492 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1493 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1494 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1495 xpt_action((union ccb*)&cts);
1496 if (cts.ccb_h.status == CAM_REQ_CMP) {
1497 u_int speed;
1498 u_int freq;
1499
1500 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1501 && cts.sync_offset != 0) {
1502 freq = scsi_calc_syncsrate(cts.sync_period);
1503 speed = freq;
1504 } else {
1505 struct ccb_pathinq cpi;
1506
1507 /* Ask the SIM for its base transfer speed */
1508 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1509 cpi.ccb_h.func_code = XPT_PATH_INQ;
1510 xpt_action((union ccb *)&cpi);
1511
1512 speed = cpi.base_transfer_speed;
1513 freq = 0;
1514 }
1515 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1516 speed *= (0x01 << cts.bus_width);
1517 mb = speed / 1000;
1518 if (mb > 0)
1519 printf("%s%d: %d.%03dMB/s transfers",
1520 periph->periph_name, periph->unit_number,
1521 mb, speed % 1000);
1522 else
1523 printf("%s%d: %dKB/s transfers", periph->periph_name,
1524 periph->unit_number, speed);
1525 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1526 && cts.sync_offset != 0) {
1527 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1528 freq % 1000, cts.sync_offset);
1529 }
1530 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1531 && cts.bus_width > 0) {
1532 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1533 && cts.sync_offset != 0) {
1534 printf(", ");
1535 } else {
1536 printf(" (");
1537 }
1538 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1539 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1540 && cts.sync_offset != 0) {
1541 printf(")");
1542 }
1543
1544 if (path->device->inq_flags & SID_CmdQue
1545 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1546 printf(", Tagged Queueing Enabled");
1547 }
1548
1549 printf("\n");
1550 } else if (path->device->inq_flags & SID_CmdQue
1551 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1552 printf("%s%d: Tagged Queueing Enabled\n",
1553 periph->periph_name, periph->unit_number);
1554 }
1555
1556 /*
1557 * We only want to print the caller's announce string if they've
1558 * passed one in..
1559 */
1560 if (announce_string != NULL)
1561 printf("%s%d: %s\n", periph->periph_name,
1562 periph->unit_number, announce_string);
1563 splx(s);
1564}
1565
1566
1567static dev_match_ret
1568xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1569 struct cam_eb *bus)
1570{
1571 dev_match_ret retval;
1572 int i;
1573
1574 retval = DM_RET_NONE;
1575
1576 /*
1577 * If we aren't given something to match against, that's an error.
1578 */
1579 if (bus == NULL)
1580 return(DM_RET_ERROR);
1581
1582 /*
1583 * If there are no match entries, then this bus matches no
1584 * matter what.
1585 */
1586 if ((patterns == NULL) || (num_patterns == 0))
1587 return(DM_RET_DESCEND | DM_RET_COPY);
1588
1589 for (i = 0; i < num_patterns; i++) {
1590 struct bus_match_pattern *cur_pattern;
1591
1592 /*
1593 * If the pattern in question isn't for a bus node, we
1594 * aren't interested. However, we do indicate to the
1595 * calling routine that we should continue descending the
1596 * tree, since the user wants to match against lower-level
1597 * EDT elements.
1598 */
1599 if (patterns[i].type != DEV_MATCH_BUS) {
1600 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1601 retval |= DM_RET_DESCEND;
1602 continue;
1603 }
1604
1605 cur_pattern = &patterns[i].pattern.bus_pattern;
1606
1607 /*
1608 * If they want to match any bus node, we give them any
1609 * device node.
1610 */
1611 if (cur_pattern->flags == BUS_MATCH_ANY) {
1612 /* set the copy flag */
1613 retval |= DM_RET_COPY;
1614
1615 /*
1616 * If we've already decided on an action, go ahead
1617 * and return.
1618 */
1619 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1620 return(retval);
1621 }
1622
1623 /*
1624 * Not sure why someone would do this...
1625 */
1626 if (cur_pattern->flags == BUS_MATCH_NONE)
1627 continue;
1628
1629 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1630 && (cur_pattern->path_id != bus->path_id))
1631 continue;
1632
1633 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1634 && (cur_pattern->bus_id != bus->sim->bus_id))
1635 continue;
1636
1637 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1638 && (cur_pattern->unit_number != bus->sim->unit_number))
1639 continue;
1640
1641 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1642 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1643 DEV_IDLEN) != 0))
1644 continue;
1645
1646 /*
1647 * If we get to this point, the user definitely wants
1648 * information on this bus. So tell the caller to copy the
1649 * data out.
1650 */
1651 retval |= DM_RET_COPY;
1652
1653 /*
1654 * If the return action has been set to descend, then we
1655 * know that we've already seen a non-bus matching
1656 * expression, therefore we need to further descend the tree.
1657 * This won't change by continuing around the loop, so we
1658 * go ahead and return. If we haven't seen a non-bus
1659 * matching expression, we keep going around the loop until
1660 * we exhaust the matching expressions. We'll set the stop
1661 * flag once we fall out of the loop.
1662 */
1663 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1664 return(retval);
1665 }
1666
1667 /*
1668 * If the return action hasn't been set to descend yet, that means
1669 * we haven't seen anything other than bus matching patterns. So
1670 * tell the caller to stop descending the tree -- the user doesn't
1671 * want to match against lower level tree elements.
1672 */
1673 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1674 retval |= DM_RET_STOP;
1675
1676 return(retval);
1677}
1678
1679static dev_match_ret
1680xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1681 struct cam_ed *device)
1682{
1683 dev_match_ret retval;
1684 int i;
1685
1686 retval = DM_RET_NONE;
1687
1688 /*
1689 * If we aren't given something to match against, that's an error.
1690 */
1691 if (device == NULL)
1692 return(DM_RET_ERROR);
1693
1694 /*
1695 * If there are no match entries, then this device matches no
1696 * matter what.
1697 */
1698 if ((patterns == NULL) || (patterns == 0))
1699 return(DM_RET_DESCEND | DM_RET_COPY);
1700
1701 for (i = 0; i < num_patterns; i++) {
1702 struct device_match_pattern *cur_pattern;
1703
1704 /*
1705 * If the pattern in question isn't for a device node, we
1706 * aren't interested.
1707 */
1708 if (patterns[i].type != DEV_MATCH_DEVICE) {
1709 if ((patterns[i].type == DEV_MATCH_PERIPH)
1710 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1711 retval |= DM_RET_DESCEND;
1712 continue;
1713 }
1714
1715 cur_pattern = &patterns[i].pattern.device_pattern;
1716
1717 /*
1718 * If they want to match any device node, we give them any
1719 * device node.
1720 */
1721 if (cur_pattern->flags == DEV_MATCH_ANY) {
1722 /* set the copy flag */
1723 retval |= DM_RET_COPY;
1724
1725
1726 /*
1727 * If we've already decided on an action, go ahead
1728 * and return.
1729 */
1730 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1731 return(retval);
1732 }
1733
1734 /*
1735 * Not sure why someone would do this...
1736 */
1737 if (cur_pattern->flags == DEV_MATCH_NONE)
1738 continue;
1739
1740 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1741 && (cur_pattern->path_id != device->target->bus->path_id))
1742 continue;
1743
1744 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1745 && (cur_pattern->target_id != device->target->target_id))
1746 continue;
1747
1748 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1749 && (cur_pattern->target_lun != device->lun_id))
1750 continue;
1751
1752 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1753 && (cam_quirkmatch((caddr_t)&device->inq_data,
1754 (caddr_t)&cur_pattern->inq_pat,
1755 1, sizeof(cur_pattern->inq_pat),
1756 scsi_static_inquiry_match) == NULL))
1757 continue;
1758
1759 /*
1760 * If we get to this point, the user definitely wants
1761 * information on this device. So tell the caller to copy
1762 * the data out.
1763 */
1764 retval |= DM_RET_COPY;
1765
1766 /*
1767 * If the return action has been set to descend, then we
1768 * know that we've already seen a peripheral matching
1769 * expression, therefore we need to further descend the tree.
1770 * This won't change by continuing around the loop, so we
1771 * go ahead and return. If we haven't seen a peripheral
1772 * matching expression, we keep going around the loop until
1773 * we exhaust the matching expressions. We'll set the stop
1774 * flag once we fall out of the loop.
1775 */
1776 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1777 return(retval);
1778 }
1779
1780 /*
1781 * If the return action hasn't been set to descend yet, that means
1782 * we haven't seen any peripheral matching patterns. So tell the
1783 * caller to stop descending the tree -- the user doesn't want to
1784 * match against lower level tree elements.
1785 */
1786 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1787 retval |= DM_RET_STOP;
1788
1789 return(retval);
1790}
1791
1792/*
1793 * Match a single peripheral against any number of match patterns.
1794 */
1795static dev_match_ret
1796xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1797 struct cam_periph *periph)
1798{
1799 dev_match_ret retval;
1800 int i;
1801
1802 /*
1803 * If we aren't given something to match against, that's an error.
1804 */
1805 if (periph == NULL)
1806 return(DM_RET_ERROR);
1807
1808 /*
1809 * If there are no match entries, then this peripheral matches no
1810 * matter what.
1811 */
1812 if ((patterns == NULL) || (num_patterns == 0))
1813 return(DM_RET_STOP | DM_RET_COPY);
1814
1815 /*
1816 * There aren't any nodes below a peripheral node, so there's no
1817 * reason to descend the tree any further.
1818 */
1819 retval = DM_RET_STOP;
1820
1821 for (i = 0; i < num_patterns; i++) {
1822 struct periph_match_pattern *cur_pattern;
1823
1824 /*
1825 * If the pattern in question isn't for a peripheral, we
1826 * aren't interested.
1827 */
1828 if (patterns[i].type != DEV_MATCH_PERIPH)
1829 continue;
1830
1831 cur_pattern = &patterns[i].pattern.periph_pattern;
1832
1833 /*
1834 * If they want to match on anything, then we will do so.
1835 */
1836 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1837 /* set the copy flag */
1838 retval |= DM_RET_COPY;
1839
1840 /*
1841 * We've already set the return action to stop,
1842 * since there are no nodes below peripherals in
1843 * the tree.
1844 */
1845 return(retval);
1846 }
1847
1848 /*
1849 * Not sure why someone would do this...
1850 */
1851 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1852 continue;
1853
1854 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1855 && (cur_pattern->path_id != periph->path->bus->path_id))
1856 continue;
1857
1858 /*
1859 * For the target and lun id's, we have to make sure the
1860 * target and lun pointers aren't NULL. The xpt peripheral
1861 * has a wildcard target and device.
1862 */
1863 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1864 && ((periph->path->target == NULL)
1865 ||(cur_pattern->target_id != periph->path->target->target_id)))
1866 continue;
1867
1868 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1869 && ((periph->path->device == NULL)
1870 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1871 continue;
1872
1873 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1874 && (cur_pattern->unit_number != periph->unit_number))
1875 continue;
1876
1877 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1878 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1879 DEV_IDLEN) != 0))
1880 continue;
1881
1882 /*
1883 * If we get to this point, the user definitely wants
1884 * information on this peripheral. So tell the caller to
1885 * copy the data out.
1886 */
1887 retval |= DM_RET_COPY;
1888
1889 /*
1890 * The return action has already been set to stop, since
1891 * peripherals don't have any nodes below them in the EDT.
1892 */
1893 return(retval);
1894 }
1895
1896 /*
1897 * If we get to this point, the peripheral that was passed in
1898 * doesn't match any of the patterns.
1899 */
1900 return(retval);
1901}
1902
1903static int
1904xptedtbusfunc(struct cam_eb *bus, void *arg)
1905{
1906 struct ccb_dev_match *cdm;
1907 dev_match_ret retval;
1908
1909 cdm = (struct ccb_dev_match *)arg;
1910
1911 /*
1912 * If our position is for something deeper in the tree, that means
1913 * that we've already seen this node. So, we keep going down.
1914 */
1915 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1916 && (cdm->pos.cookie.bus == bus)
1917 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1918 && (cdm->pos.cookie.target != NULL))
1919 retval = DM_RET_DESCEND;
1920 else
1921 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1922
1923 /*
1924 * If we got an error, bail out of the search.
1925 */
1926 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1927 cdm->status = CAM_DEV_MATCH_ERROR;
1928 return(0);
1929 }
1930
1931 /*
1932 * If the copy flag is set, copy this bus out.
1933 */
1934 if (retval & DM_RET_COPY) {
1935 int spaceleft, j;
1936
1937 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1938 sizeof(struct dev_match_result));
1939
1940 /*
1941 * If we don't have enough space to put in another
1942 * match result, save our position and tell the
1943 * user there are more devices to check.
1944 */
1945 if (spaceleft < sizeof(struct dev_match_result)) {
1946 bzero(&cdm->pos, sizeof(cdm->pos));
1947 cdm->pos.position_type =
1948 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1949
1950 cdm->pos.cookie.bus = bus;
1951 cdm->pos.generations[CAM_BUS_GENERATION]=
1952 bus_generation;
1953 cdm->status = CAM_DEV_MATCH_MORE;
1954 return(0);
1955 }
1956 j = cdm->num_matches;
1957 cdm->num_matches++;
1958 cdm->matches[j].type = DEV_MATCH_BUS;
1959 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1960 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1961 cdm->matches[j].result.bus_result.unit_number =
1962 bus->sim->unit_number;
1963 strncpy(cdm->matches[j].result.bus_result.dev_name,
1964 bus->sim->sim_name, DEV_IDLEN);
1965 }
1966
1967 /*
1968 * If the user is only interested in busses, there's no
1969 * reason to descend to the next level in the tree.
1970 */
1971 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1972 return(1);
1973
1974 /*
1975 * If there is a target generation recorded, check it to
1976 * make sure the target list hasn't changed.
1977 */
1978 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1979 && (bus == cdm->pos.cookie.bus)
1980 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1981 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1982 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1983 bus->generation)) {
1984 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1985 return(0);
1986 }
1987
1988 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1989 && (cdm->pos.cookie.bus == bus)
1990 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1991 && (cdm->pos.cookie.target != NULL))
1992 return(xpttargettraverse(bus,
1993 (struct cam_et *)cdm->pos.cookie.target,
1994 xptedttargetfunc, arg));
1995 else
1996 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1997}
1998
1999static int
2000xptedttargetfunc(struct cam_et *target, void *arg)
2001{
2002 struct ccb_dev_match *cdm;
2003
2004 cdm = (struct ccb_dev_match *)arg;
2005
2006 /*
2007 * If there is a device list generation recorded, check it to
2008 * make sure the device list hasn't changed.
2009 */
2010 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2011 && (cdm->pos.cookie.bus == target->bus)
2012 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2013 && (cdm->pos.cookie.target == target)
2014 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2015 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2016 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2017 target->generation)) {
2018 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2019 return(0);
2020 }
2021
2022 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2023 && (cdm->pos.cookie.bus == target->bus)
2024 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2025 && (cdm->pos.cookie.target == target)
2026 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2027 && (cdm->pos.cookie.device != NULL))
2028 return(xptdevicetraverse(target,
2029 (struct cam_ed *)cdm->pos.cookie.device,
2030 xptedtdevicefunc, arg));
2031 else
2032 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2033}
2034
2035static int
2036xptedtdevicefunc(struct cam_ed *device, void *arg)
2037{
2038
2039 struct ccb_dev_match *cdm;
2040 dev_match_ret retval;
2041
2042 cdm = (struct ccb_dev_match *)arg;
2043
2044 /*
2045 * If our position is for something deeper in the tree, that means
2046 * that we've already seen this node. So, we keep going down.
2047 */
2048 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2049 && (cdm->pos.cookie.device == device)
2050 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2051 && (cdm->pos.cookie.periph != NULL))
2052 retval = DM_RET_DESCEND;
2053 else
2054 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2055 device);
2056
2057 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2058 cdm->status = CAM_DEV_MATCH_ERROR;
2059 return(0);
2060 }
2061
2062 /*
2063 * If the copy flag is set, copy this device out.
2064 */
2065 if (retval & DM_RET_COPY) {
2066 int spaceleft, j;
2067
2068 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2069 sizeof(struct dev_match_result));
2070
2071 /*
2072 * If we don't have enough space to put in another
2073 * match result, save our position and tell the
2074 * user there are more devices to check.
2075 */
2076 if (spaceleft < sizeof(struct dev_match_result)) {
2077 bzero(&cdm->pos, sizeof(cdm->pos));
2078 cdm->pos.position_type =
2079 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2080 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2081
2082 cdm->pos.cookie.bus = device->target->bus;
2083 cdm->pos.generations[CAM_BUS_GENERATION]=
2084 bus_generation;
2085 cdm->pos.cookie.target = device->target;
2086 cdm->pos.generations[CAM_TARGET_GENERATION] =
2087 device->target->bus->generation;
2088 cdm->pos.cookie.device = device;
2089 cdm->pos.generations[CAM_DEV_GENERATION] =
2090 device->target->generation;
2091 cdm->status = CAM_DEV_MATCH_MORE;
2092 return(0);
2093 }
2094 j = cdm->num_matches;
2095 cdm->num_matches++;
2096 cdm->matches[j].type = DEV_MATCH_DEVICE;
2097 cdm->matches[j].result.device_result.path_id =
2098 device->target->bus->path_id;
2099 cdm->matches[j].result.device_result.target_id =
2100 device->target->target_id;
2101 cdm->matches[j].result.device_result.target_lun =
2102 device->lun_id;
2103 bcopy(&device->inq_data,
2104 &cdm->matches[j].result.device_result.inq_data,
2105 sizeof(struct scsi_inquiry_data));
2106
2107 /* Let the user know whether this device is unconfigured */
2108 if (device->flags & CAM_DEV_UNCONFIGURED)
2109 cdm->matches[j].result.device_result.flags =
2110 DEV_RESULT_UNCONFIGURED;
2111 else
2112 cdm->matches[j].result.device_result.flags =
2113 DEV_RESULT_NOFLAG;
2114 }
2115
2116 /*
2117 * If the user isn't interested in peripherals, don't descend
2118 * the tree any further.
2119 */
2120 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2121 return(1);
2122
2123 /*
2124 * If there is a peripheral list generation recorded, make sure
2125 * it hasn't changed.
2126 */
2127 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2128 && (device->target->bus == cdm->pos.cookie.bus)
2129 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2130 && (device->target == cdm->pos.cookie.target)
2131 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2132 && (device == cdm->pos.cookie.device)
2133 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2134 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2135 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2136 device->generation)){
2137 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2138 return(0);
2139 }
2140
2141 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2142 && (cdm->pos.cookie.bus == device->target->bus)
2143 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2144 && (cdm->pos.cookie.target == device->target)
2145 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2146 && (cdm->pos.cookie.device == device)
2147 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2148 && (cdm->pos.cookie.periph != NULL))
2149 return(xptperiphtraverse(device,
2150 (struct cam_periph *)cdm->pos.cookie.periph,
2151 xptedtperiphfunc, arg));
2152 else
2153 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2154}
2155
2156static int
2157xptedtperiphfunc(struct cam_periph *periph, void *arg)
2158{
2159 struct ccb_dev_match *cdm;
2160 dev_match_ret retval;
2161
2162 cdm = (struct ccb_dev_match *)arg;
2163
2164 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2165
2166 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2167 cdm->status = CAM_DEV_MATCH_ERROR;
2168 return(0);
2169 }
2170
2171 /*
2172 * If the copy flag is set, copy this peripheral out.
2173 */
2174 if (retval & DM_RET_COPY) {
2175 int spaceleft, j;
2176
2177 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2178 sizeof(struct dev_match_result));
2179
2180 /*
2181 * If we don't have enough space to put in another
2182 * match result, save our position and tell the
2183 * user there are more devices to check.
2184 */
2185 if (spaceleft < sizeof(struct dev_match_result)) {
2186 bzero(&cdm->pos, sizeof(cdm->pos));
2187 cdm->pos.position_type =
2188 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2189 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2190 CAM_DEV_POS_PERIPH;
2191
2192 cdm->pos.cookie.bus = periph->path->bus;
2193 cdm->pos.generations[CAM_BUS_GENERATION]=
2194 bus_generation;
2195 cdm->pos.cookie.target = periph->path->target;
2196 cdm->pos.generations[CAM_TARGET_GENERATION] =
2197 periph->path->bus->generation;
2198 cdm->pos.cookie.device = periph->path->device;
2199 cdm->pos.generations[CAM_DEV_GENERATION] =
2200 periph->path->target->generation;
2201 cdm->pos.cookie.periph = periph;
2202 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2203 periph->path->device->generation;
2204 cdm->status = CAM_DEV_MATCH_MORE;
2205 return(0);
2206 }
2207
2208 j = cdm->num_matches;
2209 cdm->num_matches++;
2210 cdm->matches[j].type = DEV_MATCH_PERIPH;
2211 cdm->matches[j].result.periph_result.path_id =
2212 periph->path->bus->path_id;
2213 cdm->matches[j].result.periph_result.target_id =
2214 periph->path->target->target_id;
2215 cdm->matches[j].result.periph_result.target_lun =
2216 periph->path->device->lun_id;
2217 cdm->matches[j].result.periph_result.unit_number =
2218 periph->unit_number;
2219 strncpy(cdm->matches[j].result.periph_result.periph_name,
2220 periph->periph_name, DEV_IDLEN);
2221 }
2222
2223 return(1);
2224}
2225
2226static int
2227xptedtmatch(struct ccb_dev_match *cdm)
2228{
2229 int ret;
2230
2231 cdm->num_matches = 0;
2232
2233 /*
2234 * Check the bus list generation. If it has changed, the user
2235 * needs to reset everything and start over.
2236 */
2237 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2238 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2239 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2240 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2241 return(0);
2242 }
2243
2244 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2245 && (cdm->pos.cookie.bus != NULL))
2246 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2247 xptedtbusfunc, cdm);
2248 else
2249 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2250
2251 /*
2252 * If we get back 0, that means that we had to stop before fully
2253 * traversing the EDT. It also means that one of the subroutines
2254 * has set the status field to the proper value. If we get back 1,
2255 * we've fully traversed the EDT and copied out any matching entries.
2256 */
2257 if (ret == 1)
2258 cdm->status = CAM_DEV_MATCH_LAST;
2259
2260 return(ret);
2261}
2262
2263static int
2264xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2265{
2266 struct ccb_dev_match *cdm;
2267
2268 cdm = (struct ccb_dev_match *)arg;
2269
2270 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2271 && (cdm->pos.cookie.pdrv == pdrv)
2272 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2273 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2274 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2275 (*pdrv)->generation)) {
2276 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2277 return(0);
2278 }
2279
2280 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2281 && (cdm->pos.cookie.pdrv == pdrv)
2282 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2283 && (cdm->pos.cookie.periph != NULL))
2284 return(xptpdperiphtraverse(pdrv,
2285 (struct cam_periph *)cdm->pos.cookie.periph,
2286 xptplistperiphfunc, arg));
2287 else
2288 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2289}
2290
2291static int
2292xptplistperiphfunc(struct cam_periph *periph, void *arg)
2293{
2294 struct ccb_dev_match *cdm;
2295 dev_match_ret retval;
2296
2297 cdm = (struct ccb_dev_match *)arg;
2298
2299 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2300
2301 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2302 cdm->status = CAM_DEV_MATCH_ERROR;
2303 return(0);
2304 }
2305
2306 /*
2307 * If the copy flag is set, copy this peripheral out.
2308 */
2309 if (retval & DM_RET_COPY) {
2310 int spaceleft, j;
2311
2312 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2313 sizeof(struct dev_match_result));
2314
2315 /*
2316 * If we don't have enough space to put in another
2317 * match result, save our position and tell the
2318 * user there are more devices to check.
2319 */
2320 if (spaceleft < sizeof(struct dev_match_result)) {
2321 struct periph_driver **pdrv;
2322
2323 pdrv = NULL;
2324 bzero(&cdm->pos, sizeof(cdm->pos));
2325 cdm->pos.position_type =
2326 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2327 CAM_DEV_POS_PERIPH;
2328
2329 /*
2330 * This may look a bit non-sensical, but it is
2331 * actually quite logical. There are very few
2332 * peripheral drivers, and bloating every peripheral
2333 * structure with a pointer back to its parent
2334 * peripheral driver linker set entry would cost
2335 * more in the long run than doing this quick lookup.
2336 */
dc62b251 2337 SET_FOREACH(pdrv, periphdriver_set) {
984263bc
MD
2338 if (strcmp((*pdrv)->driver_name,
2339 periph->periph_name) == 0)
2340 break;
2341 }
2342
2343 if (pdrv == NULL) {
2344 cdm->status = CAM_DEV_MATCH_ERROR;
2345 return(0);
2346 }
2347
2348 cdm->pos.cookie.pdrv = pdrv;
2349 /*
2350 * The periph generation slot does double duty, as
2351 * does the periph pointer slot. They are used for
2352 * both edt and pdrv lookups and positioning.
2353 */
2354 cdm->pos.cookie.periph = periph;
2355 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2356 (*pdrv)->generation;
2357 cdm->status = CAM_DEV_MATCH_MORE;
2358 return(0);
2359 }
2360
2361 j = cdm->num_matches;
2362 cdm->num_matches++;
2363 cdm->matches[j].type = DEV_MATCH_PERIPH;
2364 cdm->matches[j].result.periph_result.path_id =
2365 periph->path->bus->path_id;
2366
2367 /*
2368 * The transport layer peripheral doesn't have a target or
2369 * lun.
2370 */
2371 if (periph->path->target)
2372 cdm->matches[j].result.periph_result.target_id =
2373 periph->path->target->target_id;
2374 else
2375 cdm->matches[j].result.periph_result.target_id = -1;
2376
2377 if (periph->path->device)
2378 cdm->matches[j].result.periph_result.target_lun =
2379 periph->path->device->lun_id;
2380 else
2381 cdm->matches[j].result.periph_result.target_lun = -1;
2382
2383 cdm->matches[j].result.periph_result.unit_number =
2384 periph->unit_number;
2385 strncpy(cdm->matches[j].result.periph_result.periph_name,
2386 periph->periph_name, DEV_IDLEN);
2387 }
2388
2389 return(1);
2390}
2391
2392static int
2393xptperiphlistmatch(struct ccb_dev_match *cdm)
2394{
2395 int ret;
2396
2397 cdm->num_matches = 0;
2398
2399 /*
2400 * At this point in the edt traversal function, we check the bus
2401 * list generation to make sure that no busses have been added or
2402 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2403 * For the peripheral driver list traversal function, however, we
2404 * don't have to worry about new peripheral driver types coming or
2405 * going; they're in a linker set, and therefore can't change
2406 * without a recompile.
2407 */
2408
2409 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2410 && (cdm->pos.cookie.pdrv != NULL))
2411 ret = xptpdrvtraverse(
2412 (struct periph_driver **)cdm->pos.cookie.pdrv,
2413 xptplistpdrvfunc, cdm);
2414 else
2415 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2416
2417 /*
2418 * If we get back 0, that means that we had to stop before fully
2419 * traversing the peripheral driver tree. It also means that one of
2420 * the subroutines has set the status field to the proper value. If
2421 * we get back 1, we've fully traversed the EDT and copied out any
2422 * matching entries.
2423 */
2424 if (ret == 1)
2425 cdm->status = CAM_DEV_MATCH_LAST;
2426
2427 return(ret);
2428}
2429
2430static int
2431xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2432{
2433 struct cam_eb *bus, *next_bus;
2434 int retval;
2435
2436 retval = 1;
2437
2438 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2439 bus != NULL;
2440 bus = next_bus) {
2441 next_bus = TAILQ_NEXT(bus, links);
2442
2443 retval = tr_func(bus, arg);
2444 if (retval == 0)
2445 return(retval);
2446 }
2447
2448 return(retval);
2449}
2450
2451static int
2452xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2453 xpt_targetfunc_t *tr_func, void *arg)
2454{
2455 struct cam_et *target, *next_target;
2456 int retval;
2457
2458 retval = 1;
2459 for (target = (start_target ? start_target :
2460 TAILQ_FIRST(&bus->et_entries));
2461 target != NULL; target = next_target) {
2462
2463 next_target = TAILQ_NEXT(target, links);
2464
2465 retval = tr_func(target, arg);
2466
2467 if (retval == 0)
2468 return(retval);
2469 }
2470
2471 return(retval);
2472}
2473
2474static int
2475xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2476 xpt_devicefunc_t *tr_func, void *arg)
2477{
2478 struct cam_ed *device, *next_device;
2479 int retval;
2480
2481 retval = 1;
2482 for (device = (start_device ? start_device :
2483 TAILQ_FIRST(&target->ed_entries));
2484 device != NULL;
2485 device = next_device) {
2486
2487 next_device = TAILQ_NEXT(device, links);
2488
2489 retval = tr_func(device, arg);
2490
2491 if (retval == 0)
2492 return(retval);
2493 }
2494
2495 return(retval);
2496}
2497
2498static int
2499xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2500 xpt_periphfunc_t *tr_func, void *arg)
2501{
2502 struct cam_periph *periph, *next_periph;
2503 int retval;
2504
2505 retval = 1;
2506
2507 for (periph = (start_periph ? start_periph :
2508 SLIST_FIRST(&device->periphs));
2509 periph != NULL;
2510 periph = next_periph) {
2511
2512 next_periph = SLIST_NEXT(periph, periph_links);
2513
2514 retval = tr_func(periph, arg);
2515 if (retval == 0)
2516 return(retval);
2517 }
2518
2519 return(retval);
2520}
2521
2522static int
2523xptpdrvtraverse(struct periph_driver **start_pdrv,
2524 xpt_pdrvfunc_t *tr_func, void *arg)
2525{
2526 struct periph_driver **pdrv;
2527 int retval;
2528
2529 retval = 1;
2530
2531 /*
2532 * We don't traverse the peripheral driver list like we do the
2533 * other lists, because it is a linker set, and therefore cannot be
2534 * changed during runtime. If the peripheral driver list is ever
2535 * re-done to be something other than a linker set (i.e. it can
2536 * change while the system is running), the list traversal should
2537 * be modified to work like the other traversal functions.
2538 */
dc62b251
MD
2539 if (start_pdrv == NULL) {
2540 SET_FOREACH(pdrv, periphdriver_set) {
2541 }
2542 } else {
2543 while (*start_pdrv != NULL) {
2544 retval = tr_func(start_pdrv, arg);
2545 if (retval == 0)
2546 return(retval);
2547 ++start_pdrv;
2548 }
984263bc
MD
2549 }
2550
2551 return(retval);
2552}
2553
2554static int
2555xptpdperiphtraverse(struct periph_driver **pdrv,
2556 struct cam_periph *start_periph,
2557 xpt_periphfunc_t *tr_func, void *arg)
2558{
2559 struct cam_periph *periph, *next_periph;
2560 int retval;
2561
2562 retval = 1;
2563
2564 for (periph = (start_periph ? start_periph :
2565 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2566 periph = next_periph) {
2567
2568 next_periph = TAILQ_NEXT(periph, unit_links);
2569
2570 retval = tr_func(periph, arg);
2571 if (retval == 0)
2572 return(retval);
2573 }
2574 return(retval);
2575}
2576
2577static int
2578xptdefbusfunc(struct cam_eb *bus, void *arg)
2579{
2580 struct xpt_traverse_config *tr_config;
2581
2582 tr_config = (struct xpt_traverse_config *)arg;
2583
2584 if (tr_config->depth == XPT_DEPTH_BUS) {
2585 xpt_busfunc_t *tr_func;
2586
2587 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2588
2589 return(tr_func(bus, tr_config->tr_arg));
2590 } else
2591 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2592}
2593
2594static int
2595xptdeftargetfunc(struct cam_et *target, void *arg)
2596{
2597 struct xpt_traverse_config *tr_config;
2598
2599 tr_config = (struct xpt_traverse_config *)arg;
2600
2601 if (tr_config->depth == XPT_DEPTH_TARGET) {
2602 xpt_targetfunc_t *tr_func;
2603
2604 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2605
2606 return(tr_func(target, tr_config->tr_arg));
2607 } else
2608 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2609}
2610
2611static int
2612xptdefdevicefunc(struct cam_ed *device, void *arg)
2613{
2614 struct xpt_traverse_config *tr_config;
2615
2616 tr_config = (struct xpt_traverse_config *)arg;
2617
2618 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2619 xpt_devicefunc_t *tr_func;
2620
2621 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2622
2623 return(tr_func(device, tr_config->tr_arg));
2624 } else
2625 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2626}
2627
2628static int
2629xptdefperiphfunc(struct cam_periph *periph, void *arg)
2630{
2631 struct xpt_traverse_config *tr_config;
2632 xpt_periphfunc_t *tr_func;
2633
2634 tr_config = (struct xpt_traverse_config *)arg;
2635
2636 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2637
2638 /*
2639 * Unlike the other default functions, we don't check for depth
2640 * here. The peripheral driver level is the last level in the EDT,
2641 * so if we're here, we should execute the function in question.
2642 */
2643 return(tr_func(periph, tr_config->tr_arg));
2644}
2645
2646/*
2647 * Execute the given function for every bus in the EDT.
2648 */
2649static int
2650xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2651{
2652 struct xpt_traverse_config tr_config;
2653
2654 tr_config.depth = XPT_DEPTH_BUS;
2655 tr_config.tr_func = tr_func;
2656 tr_config.tr_arg = arg;
2657
2658 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2659}
2660
2661#ifdef notusedyet
2662/*
2663 * Execute the given function for every target in the EDT.
2664 */
2665static int
2666xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2667{
2668 struct xpt_traverse_config tr_config;
2669
2670 tr_config.depth = XPT_DEPTH_TARGET;
2671 tr_config.tr_func = tr_func;
2672 tr_config.tr_arg = arg;
2673
2674 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2675}
2676#endif /* notusedyet */
2677
2678/*
2679 * Execute the given function for every device in the EDT.
2680 */
2681static int
2682xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2683{
2684 struct xpt_traverse_config tr_config;
2685
2686 tr_config.depth = XPT_DEPTH_DEVICE;
2687 tr_config.tr_func = tr_func;
2688 tr_config.tr_arg = arg;
2689
2690 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2691}
2692
2693#ifdef notusedyet
2694/*
2695 * Execute the given function for every peripheral in the EDT.
2696 */
2697static int
2698xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2699{
2700 struct xpt_traverse_config tr_config;
2701
2702 tr_config.depth = XPT_DEPTH_PERIPH;
2703 tr_config.tr_func = tr_func;
2704 tr_config.tr_arg = arg;
2705
2706 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2707}
2708#endif /* notusedyet */
2709
2710static int
2711xptsetasyncfunc(struct cam_ed *device, void *arg)
2712{
2713 struct cam_path path;
2714 struct ccb_getdev cgd;
2715 struct async_node *cur_entry;
2716
2717 cur_entry = (struct async_node *)arg;
2718
2719 /*
2720 * Don't report unconfigured devices (Wildcard devs,
2721 * devices only for target mode, device instances
2722 * that have been invalidated but are waiting for
2723 * their last reference count to be released).
2724 */
2725 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2726 return (1);
2727
2728 xpt_compile_path(&path,
2729 NULL,
2730 device->target->bus->path_id,
2731 device->target->target_id,
2732 device->lun_id);
2733 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2734 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2735 xpt_action((union ccb *)&cgd);
2736 cur_entry->callback(cur_entry->callback_arg,
2737 AC_FOUND_DEVICE,
2738 &path, &cgd);
2739 xpt_release_path(&path);
2740
2741 return(1);
2742}
2743
2744static int
2745xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2746{
2747 struct cam_path path;
2748 struct ccb_pathinq cpi;
2749 struct async_node *cur_entry;
2750
2751 cur_entry = (struct async_node *)arg;
2752
2753 xpt_compile_path(&path, /*periph*/NULL,
2754 bus->sim->path_id,
2755 CAM_TARGET_WILDCARD,
2756 CAM_LUN_WILDCARD);
2757 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2758 cpi.ccb_h.func_code = XPT_PATH_INQ;
2759 xpt_action((union ccb *)&cpi);
2760 cur_entry->callback(cur_entry->callback_arg,
2761 AC_PATH_REGISTERED,
2762 &path, &cpi);
2763 xpt_release_path(&path);
2764
2765 return(1);
2766}
2767
2768void
2769xpt_action(union ccb *start_ccb)
2770{
2771 int iopl;
2772
2773 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2774
2775 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2776
2777 iopl = splsoftcam();
2778 switch (start_ccb->ccb_h.func_code) {
2779 case XPT_SCSI_IO:
2780 {
2781#ifdef CAMDEBUG
2782 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2783 struct cam_path *path;
2784
2785 path = start_ccb->ccb_h.path;
2786#endif
2787
2788 /*
2789 * For the sake of compatibility with SCSI-1
2790 * devices that may not understand the identify
2791 * message, we include lun information in the
2792 * second byte of all commands. SCSI-1 specifies
2793 * that luns are a 3 bit value and reserves only 3
2794 * bits for lun information in the CDB. Later
2795 * revisions of the SCSI spec allow for more than 8
2796 * luns, but have deprecated lun information in the
2797 * CDB. So, if the lun won't fit, we must omit.
2798 *
2799 * Also be aware that during initial probing for devices,
2800 * the inquiry information is unknown but initialized to 0.
2801 * This means that this code will be exercised while probing
2802 * devices with an ANSI revision greater than 2.
2803 */
2804 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2805 && start_ccb->ccb_h.target_lun < 8
2806 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2807
2808 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2809 start_ccb->ccb_h.target_lun << 5;
2810 }
2811 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2812 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2813 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2814 &path->device->inq_data),
2815 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2816 cdb_str, sizeof(cdb_str))));
2817 /* FALLTHROUGH */
2818 }
2819 case XPT_TARGET_IO:
2820 case XPT_CONT_TARGET_IO:
2821 start_ccb->csio.sense_resid = 0;
2822 start_ccb->csio.resid = 0;
2823 /* FALLTHROUGH */
2824 case XPT_RESET_DEV:
2825 case XPT_ENG_EXEC:
2826 {
2827 struct cam_path *path;
2828 int s;
2829 int runq;
2830
2831 path = start_ccb->ccb_h.path;
2832 s = splsoftcam();
2833
2834 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2835 if (path->device->qfrozen_cnt == 0)
2836 runq = xpt_schedule_dev_sendq(path->bus, path->device);
2837 else
2838 runq = 0;
2839 splx(s);
2840 if (runq != 0)
2841 xpt_run_dev_sendq(path->bus);
2842 break;
2843 }
2844 case XPT_SET_TRAN_SETTINGS:
2845 {
2846 xpt_set_transfer_settings(&start_ccb->cts,
2847 start_ccb->ccb_h.path->device,
2848 /*async_update*/FALSE);
2849 break;
2850 }
2851 case XPT_CALC_GEOMETRY:
2852 {
2853 struct cam_sim *sim;
2854
2855 /* Filter out garbage */
2856 if (start_ccb->ccg.block_size == 0
2857 || start_ccb->ccg.volume_size == 0) {
2858 start_ccb->ccg.cylinders = 0;
2859 start_ccb->ccg.heads = 0;
2860 start_ccb->ccg.secs_per_track = 0;
2861 start_ccb->ccb_h.status = CAM_REQ_CMP;
2862 break;
2863 }
2864#ifdef PC98
2865 /*
2866 * In a PC-98 system, geometry translation depens on
2867 * the "real" device geometry obtained from mode page 4.
2868 * SCSI geometry translation is performed in the
2869 * initialization routine of the SCSI BIOS and the result
2870 * stored in host memory. If the translation is available
2871 * in host memory, use it. If not, rely on the default
2872 * translation the device driver performs.
2873 */
2874 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2875 start_ccb->ccb_h.status = CAM_REQ_CMP;
2876 break;
2877 }
2878#endif
2879 sim = start_ccb->ccb_h.path->bus->sim;
2880 (*(sim->sim_action))(sim, start_ccb);
2881 break;
2882 }
2883 case XPT_ABORT:
2884 {
2885 union ccb* abort_ccb;
2886 int s;
2887
2888 abort_ccb = start_ccb->cab.abort_ccb;
2889 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2890
2891 if (abort_ccb->ccb_h.pinfo.index >= 0) {
2892 struct cam_ccbq *ccbq;
2893
2894 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2895 cam_ccbq_remove_ccb(ccbq, abort_ccb);
2896 abort_ccb->ccb_h.status =
2897 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2898 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2899 s = splcam();
2900 xpt_done(abort_ccb);
2901 splx(s);
2902 start_ccb->ccb_h.status = CAM_REQ_CMP;
2903 break;
2904 }
2905 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2906 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2907 /*
2908 * We've caught this ccb en route to
2909 * the SIM. Flag it for abort and the
2910 * SIM will do so just before starting
2911 * real work on the CCB.
2912 */
2913 abort_ccb->ccb_h.status =
2914 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2915 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2916 start_ccb->ccb_h.status = CAM_REQ_CMP;
2917 break;
2918 }
2919 }
2920 if (XPT_FC_IS_QUEUED(abort_ccb)
2921 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2922 /*
2923 * It's already completed but waiting
2924 * for our SWI to get to it.
2925 */
2926 start_ccb->ccb_h.status = CAM_UA_ABORT;
2927 break;
2928 }
2929 /*
2930 * If we weren't able to take care of the abort request
2931 * in the XPT, pass the request down to the SIM for processing.
2932 */
2933 /* FALLTHROUGH */
2934 }
2935 case XPT_ACCEPT_TARGET_IO:
2936 case XPT_EN_LUN:
2937 case XPT_IMMED_NOTIFY:
2938 case XPT_NOTIFY_ACK:
2939 case XPT_GET_TRAN_SETTINGS:
2940 case XPT_RESET_BUS:
2941 {
2942 struct cam_sim *sim;
2943
2944 sim = start_ccb->ccb_h.path->bus->sim;
2945 (*(sim->sim_action))(sim, start_ccb);
2946 break;
2947 }
2948 case XPT_PATH_INQ:
2949 {
2950 struct cam_sim *sim;
2951
2952 sim = start_ccb->ccb_h.path->bus->sim;
2953 (*(sim->sim_action))(sim, start_ccb);
2954 break;
2955 }
2956 case XPT_PATH_STATS:
2957 start_ccb->cpis.last_reset =
2958 start_ccb->ccb_h.path->bus->last_reset;
2959 start_ccb->ccb_h.status = CAM_REQ_CMP;
2960 break;
2961 case XPT_GDEV_TYPE:
2962 {
2963 struct cam_ed *dev;
2964 int s;
2965
2966 dev = start_ccb->ccb_h.path->device;
2967 s = splcam();
2968 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2969 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2970 } else {
2971 struct ccb_getdev *cgd;
2972 struct cam_eb *bus;
2973 struct cam_et *tar;
2974
2975 cgd = &start_ccb->cgd;
2976 bus = cgd->ccb_h.path->bus;
2977 tar = cgd->ccb_h.path->target;
2978 cgd->inq_data = dev->inq_data;
2979 cgd->ccb_h.status = CAM_REQ_CMP;
2980 cgd->serial_num_len = dev->serial_num_len;
2981 if ((dev->serial_num_len > 0)
2982 && (dev->serial_num != NULL))
2983 bcopy(dev->serial_num, cgd->serial_num,
2984 dev->serial_num_len);
2985 }
2986 splx(s);
2987 break;
2988 }
2989 case XPT_GDEV_STATS:
2990 {
2991 struct cam_ed *dev;
2992 int s;
2993
2994 dev = start_ccb->ccb_h.path->device;
2995 s = splcam();
2996 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2997 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2998 } else {
2999 struct ccb_getdevstats *cgds;
3000 struct cam_eb *bus;
3001 struct cam_et *tar;
3002
3003 cgds = &start_ccb->cgds;
3004 bus = cgds->ccb_h.path->bus;
3005 tar = cgds->ccb_h.path->target;
3006 cgds->dev_openings = dev->ccbq.dev_openings;
3007 cgds->dev_active = dev->ccbq.dev_active;
3008 cgds->devq_openings = dev->ccbq.devq_openings;
3009 cgds->devq_queued = dev->ccbq.queue.entries;
3010 cgds->held = dev->ccbq.held;
3011 cgds->last_reset = tar->last_reset;
3012 cgds->maxtags = dev->quirk->maxtags;
3013 cgds->mintags = dev->quirk->mintags;
3014 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3015 cgds->last_reset = bus->last_reset;
3016 cgds->ccb_h.status = CAM_REQ_CMP;
3017 }
3018 splx(s);
3019 break;
3020 }
3021 case XPT_GDEVLIST:
3022 {
3023 struct cam_periph *nperiph;
3024 struct periph_list *periph_head;
3025 struct ccb_getdevlist *cgdl;
3026 int i;
3027 int s;
3028 struct cam_ed *device;
3029 int found;
3030
3031
3032 found = 0;
3033
3034 /*
3035 * Don't want anyone mucking with our data.
3036 */
3037 s = splcam();
3038 device = start_ccb->ccb_h.path->device;
3039 periph_head = &device->periphs;
3040 cgdl = &start_ccb->cgdl;
3041
3042 /*
3043 * Check and see if the list has changed since the user
3044 * last requested a list member. If so, tell them that the
3045 * list has changed, and therefore they need to start over
3046 * from the beginning.
3047 */
3048 if ((cgdl->index != 0) &&
3049 (cgdl->generation != device->generation)) {
3050 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3051 splx(s);
3052 break;
3053 }
3054
3055 /*
3056 * Traverse the list of peripherals and attempt to find
3057 * the requested peripheral.
3058 */
3059 for (nperiph = periph_head->slh_first, i = 0;
3060 (nperiph != NULL) && (i <= cgdl->index);
3061 nperiph = nperiph->periph_links.sle_next, i++) {
3062 if (i == cgdl->index) {
3063 strncpy(cgdl->periph_name,
3064 nperiph->periph_name,
3065 DEV_IDLEN);
3066 cgdl->unit_number = nperiph->unit_number;
3067 found = 1;
3068 }
3069 }
3070 if (found == 0) {
3071 cgdl->status = CAM_GDEVLIST_ERROR;
3072 splx(s);
3073 break;
3074 }
3075
3076 if (nperiph == NULL)
3077 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3078 else
3079 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3080
3081 cgdl->index++;
3082 cgdl->generation = device->generation;
3083
3084 splx(s);
3085 cgdl->ccb_h.status = CAM_REQ_CMP;
3086 break;
3087 }
3088 case XPT_DEV_MATCH:
3089 {
3090 int s;
3091 dev_pos_type position_type;
3092 struct ccb_dev_match *cdm;
3093 int ret;
3094
3095 cdm = &start_ccb->cdm;
3096
3097 /*
3098 * Prevent EDT changes while we traverse it.
3099 */
3100 s = splcam();
3101 /*
3102 * There are two ways of getting at information in the EDT.
3103 * The first way is via the primary EDT tree. It starts
3104 * with a list of busses, then a list of targets on a bus,
3105 * then devices/luns on a target, and then peripherals on a
3106 * device/lun. The "other" way is by the peripheral driver
3107 * lists. The peripheral driver lists are organized by
3108 * peripheral driver. (obviously) So it makes sense to
3109 * use the peripheral driver list if the user is looking
3110 * for something like "da1", or all "da" devices. If the
3111 * user is looking for something on a particular bus/target
3112 * or lun, it's generally better to go through the EDT tree.
3113 */
3114
3115 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3116 position_type = cdm->pos.position_type;
3117 else {
3118 int i;
3119
3120 position_type = CAM_DEV_POS_NONE;
3121
3122 for (i = 0; i < cdm->num_patterns; i++) {
3123 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3124 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3125 position_type = CAM_DEV_POS_EDT;
3126 break;
3127 }
3128 }
3129
3130 if (cdm->num_patterns == 0)
3131 position_type = CAM_DEV_POS_EDT;
3132 else if (position_type == CAM_DEV_POS_NONE)
3133 position_type = CAM_DEV_POS_PDRV;
3134 }
3135
3136 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3137 case CAM_DEV_POS_EDT:
3138 ret = xptedtmatch(cdm);
3139 break;
3140 case CAM_DEV_POS_PDRV:
3141 ret = xptperiphlistmatch(cdm);
3142 break;
3143 default:
3144 cdm->status = CAM_DEV_MATCH_ERROR;
3145 break;
3146 }
3147
3148 splx(s);
3149
3150 if (cdm->status == CAM_DEV_MATCH_ERROR)
3151 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3152 else
3153 start_ccb->ccb_h.status = CAM_REQ_CMP;
3154
3155 break;
3156 }
3157 case XPT_SASYNC_CB:
3158 {
3159 struct ccb_setasync *csa;
3160 struct async_node *cur_entry;
3161 struct async_list *async_head;
3162 u_int32_t added;
3163 int s;
3164
3165 csa = &start_ccb->csa;
3166 added = csa->event_enable;
3167 async_head = &csa->ccb_h.path->device->asyncs;
3168
3169 /*
3170 * If there is already an entry for us, simply
3171 * update it.
3172 */
3173 s = splcam();
3174 cur_entry = SLIST_FIRST(async_head);
3175 while (cur_entry != NULL) {
3176 if ((cur_entry->callback_arg == csa->callback_arg)
3177 && (cur_entry->callback == csa->callback))
3178 break;
3179 cur_entry = SLIST_NEXT(cur_entry, links);
3180 }
3181
3182 if (cur_entry != NULL) {
3183 /*
3184 * If the request has no flags set,
3185 * remove the entry.
3186 */
3187 added &= ~cur_entry->event_enable;
3188 if (csa->event_enable == 0) {
3189 SLIST_REMOVE(async_head, cur_entry,
3190 async_node, links);
3191 csa->ccb_h.path->device->refcount--;
3192 free(cur_entry, M_DEVBUF);
3193 } else {
3194 cur_entry->event_enable = csa->event_enable;
3195 }
3196 } else {
3197 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3198 M_NOWAIT);
3199 if (cur_entry == NULL) {
3200 splx(s);
3201 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3202 break;
3203 }
3204 cur_entry->event_enable = csa->event_enable;
3205 cur_entry->callback_arg = csa->callback_arg;
3206 cur_entry->callback = csa->callback;
3207 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3208 csa->ccb_h.path->device->refcount++;
3209 }
3210
3211 if ((added & AC_FOUND_DEVICE) != 0) {
3212 /*
3213 * Get this peripheral up to date with all
3214 * the currently existing devices.
3215 */
3216 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3217 }
3218 if ((added & AC_PATH_REGISTERED) != 0) {
3219 /*
3220 * Get this peripheral up to date with all
3221 * the currently existing busses.
3222 */
3223 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3224 }
3225 splx(s);
3226 start_ccb->ccb_h.status = CAM_REQ_CMP;
3227 break;
3228 }
3229 case XPT_REL_SIMQ:
3230 {
3231 struct ccb_relsim *crs;
3232 struct cam_ed *dev;
3233 int s;
3234
3235 crs = &start_ccb->crs;
3236 dev = crs->ccb_h.path->device;
3237 if (dev == NULL) {
3238
3239 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3240 break;
3241 }
3242
3243 s = splcam();
3244
3245 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3246
3247 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3248
3249 /* Don't ever go below one opening */
3250 if (crs->openings > 0) {
3251 xpt_dev_ccbq_resize(crs->ccb_h.path,
3252 crs->openings);
3253
3254 if (bootverbose) {
3255 xpt_print_path(crs->ccb_h.path);
3256 printf("tagged openings "
3257 "now %d\n",
3258 crs->openings);
3259 }
3260 }
3261 }
3262 }
3263
3264 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3265
3266 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3267
3268 /*
3269 * Just extend the old timeout and decrement
3270 * the freeze count so that a single timeout
3271 * is sufficient for releasing the queue.
3272 */
3273 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3274 untimeout(xpt_release_devq_timeout,
3275 dev, dev->c_handle);
3276 } else {
3277
3278 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3279 }
3280
3281 dev->c_handle =
3282 timeout(xpt_release_devq_timeout,
3283 dev,
3284 (crs->release_timeout * hz) / 1000);
3285
3286 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3287
3288 }
3289
3290 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3291
3292 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3293 /*
3294 * Decrement the freeze count so that a single
3295 * completion is still sufficient to unfreeze
3296 * the queue.
3297 */
3298 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3299 } else {
3300
3301 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3302 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3303 }
3304 }
3305
3306 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3307
3308 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3309 || (dev->ccbq.dev_active == 0)) {
3310
3311 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3312 } else {
3313
3314 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3315 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3316 }
3317 }
3318 splx(s);
3319
3320 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3321
3322 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3323 /*run_queue*/TRUE);
3324 }
3325 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3326 start_ccb->ccb_h.status = CAM_REQ_CMP;
3327 break;
3328 }
3329 case XPT_SCAN_BUS:
3330 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3331 break;
3332 case XPT_SCAN_LUN:
3333 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3334 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3335 start_ccb);
3336 break;
3337 case XPT_DEBUG: {
3338#ifdef CAMDEBUG
3339 int s;
3340
3341 s = splcam();
3342#ifdef CAM_DEBUG_DELAY
3343 cam_debug_delay = CAM_DEBUG_DELAY;
3344#endif
3345 cam_dflags = start_ccb->cdbg.flags;
3346 if (cam_dpath != NULL) {
3347 xpt_free_path(cam_dpath);
3348 cam_dpath = NULL;
3349 }
3350
3351 if (cam_dflags != CAM_DEBUG_NONE) {
3352 if (xpt_create_path(&cam_dpath, xpt_periph,
3353 start_ccb->ccb_h.path_id,
3354 start_ccb->ccb_h.target_id,
3355 start_ccb->ccb_h.target_lun) !=
3356 CAM_REQ_CMP) {
3357 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3358 cam_dflags = CAM_DEBUG_NONE;
3359 } else {
3360 start_ccb->ccb_h.status = CAM_REQ_CMP;
3361 xpt_print_path(cam_dpath);
3362 printf("debugging flags now %x\n", cam_dflags);
3363 }
3364 } else {
3365 cam_dpath = NULL;
3366 start_ccb->ccb_h.status = CAM_REQ_CMP;
3367 }
3368 splx(s);
3369#else /* !CAMDEBUG */
3370 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3371#endif /* CAMDEBUG */
3372 break;
3373 }
3374 case XPT_NOOP:
3375 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3376 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3377 start_ccb->ccb_h.status = CAM_REQ_CMP;
3378 break;
3379 default:
3380 case XPT_SDEV_TYPE:
3381 case XPT_TERM_IO:
3382 case XPT_ENG_INQ:
3383 /* XXX Implement */
3384 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3385 break;
3386 }
3387 splx(iopl);
3388}
3389
3390void
3391xpt_polled_action(union ccb *start_ccb)
3392{
3393 int s;
3394 u_int32_t timeout;
3395 struct cam_sim *sim;
3396 struct cam_devq *devq;
3397 struct cam_ed *dev;
3398
3399 timeout = start_ccb->ccb_h.timeout;
3400 sim = start_ccb->ccb_h.path->bus->sim;
3401 devq = sim->devq;
3402 dev = start_ccb->ccb_h.path->device;
3403
3404 s = splcam();
3405
3406 /*
3407 * Steal an opening so that no other queued requests
3408 * can get it before us while we simulate interrupts.
3409 */
3410 dev->ccbq.devq_openings--;
3411 dev->ccbq.dev_openings--;
3412
3413 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3414 && (--timeout > 0)) {
3415 DELAY(1000);
3416 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3417 swi_camnet(NULL);
3418 swi_cambio(NULL);
984263bc
MD
3419 }
3420
3421 dev->ccbq.devq_openings++;
3422 dev->ccbq.dev_openings++;
3423
3424 if (timeout != 0) {
3425 xpt_action(start_ccb);
3426 while(--timeout > 0) {
3427 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3428 swi_camnet(NULL);
3429 swi_cambio(NULL);
984263bc
MD
3430 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3431 != CAM_REQ_INPROG)
3432 break;
3433 DELAY(1000);
3434 }
3435 if (timeout == 0) {
3436 /*
3437 * XXX Is it worth adding a sim_timeout entry
3438 * point so we can attempt recovery? If
3439 * this is only used for dumps, I don't think
3440 * it is.
3441 */
3442 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3443 }
3444 } else {
3445 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3446 }
3447 splx(s);
3448}
3449
3450/*
3451 * Schedule a peripheral driver to receive a ccb when it's
3452 * target device has space for more transactions.
3453 */
3454void
3455xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3456{
3457 struct cam_ed *device;
3458 int s;
3459 int runq;
3460
3461 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3462 device = perph->path->device;
3463 s = splsoftcam();
3464 if (periph_is_queued(perph)) {
3465 /* Simply reorder based on new priority */
3466 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3467 (" change priority to %d\n", new_priority));
3468 if (new_priority < perph->pinfo.priority) {
3469 camq_change_priority(&device->drvq,
3470 perph->pinfo.index,
3471 new_priority);
3472 }
3473 runq = 0;
3474 } else {
3475 /* New entry on the queue */
3476 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3477 (" added periph to queue\n"));
3478 perph->pinfo.priority = new_priority;
3479 perph->pinfo.generation = ++device->drvq.generation;
3480 camq_insert(&device->drvq, &perph->pinfo);
3481 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3482 }
3483 splx(s);
3484 if (runq != 0) {
3485 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3486 (" calling xpt_run_devq\n"));
3487 xpt_run_dev_allocq(perph->path->bus);
3488 }
3489}
3490
3491
3492/*
3493 * Schedule a device to run on a given queue.
3494 * If the device was inserted as a new entry on the queue,
3495 * return 1 meaning the device queue should be run. If we
3496 * were already queued, implying someone else has already
3497 * started the queue, return 0 so the caller doesn't attempt
3498 * to run the queue. Must be run at either splsoftcam
3499 * (or splcam since that encompases splsoftcam).
3500 */
3501static int
3502xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3503 u_int32_t new_priority)
3504{
3505 int retval;
3506 u_int32_t old_priority;
3507
3508 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3509
3510 old_priority = pinfo->priority;
3511
3512 /*
3513 * Are we already queued?
3514 */
3515 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3516 /* Simply reorder based on new priority */
3517 if (new_priority < old_priority) {
3518 camq_change_priority(queue, pinfo->index,
3519 new_priority);
3520 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3521 ("changed priority to %d\n",
3522 new_priority));
3523 }
3524 retval = 0;
3525 } else {
3526 /* New entry on the queue */
3527 if (new_priority < old_priority)
3528 pinfo->priority = new_priority;
3529
3530 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3531 ("Inserting onto queue\n"));
3532 pinfo->generation = ++queue->generation;
3533 camq_insert(queue, pinfo);
3534 retval = 1;
3535 }
3536 return (retval);
3537}
3538
3539static void
3540xpt_run_dev_allocq(struct cam_eb *bus)
3541{
3542 struct cam_devq *devq;
3543 int s;
3544
3545 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3546 devq = bus->sim->devq;
3547
3548 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3549 (" qfrozen_cnt == 0x%x, entries == %d, "
3550 "openings == %d, active == %d\n",
3551 devq->alloc_queue.qfrozen_cnt,
3552 devq->alloc_queue.entries,
3553 devq->alloc_openings,
3554 devq->alloc_active));
3555
3556 s = splsoftcam();
3557 devq->alloc_queue.qfrozen_cnt++;
3558 while ((devq->alloc_queue.entries > 0)
3559 && (devq->alloc_openings > 0)
3560 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3561 struct cam_ed_qinfo *qinfo;
3562 struct cam_ed *device;
3563 union ccb *work_ccb;
3564 struct cam_periph *drv;
3565 struct camq *drvq;
3566
3567 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3568 CAMQ_HEAD);
3569 device = qinfo->device;
3570
3571 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3572 ("running device %p\n", device));
3573
3574 drvq = &device->drvq;
3575
3576#ifdef CAMDEBUG
3577 if (drvq->entries <= 0) {
3578 panic("xpt_run_dev_allocq: "
3579 "Device on queue without any work to do");
3580 }
3581#endif
3582 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3583 devq->alloc_openings--;
3584 devq->alloc_active++;
3585 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3586 splx(s);
3587 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3588 drv->pinfo.priority);
3589 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3590 ("calling periph start\n"));
3591 drv->periph_start(drv, work_ccb);
3592 } else {
3593 /*
3594 * Malloc failure in alloc_ccb
3595 */
3596 /*
3597 * XXX add us to a list to be run from free_ccb
3598 * if we don't have any ccbs active on this
3599 * device queue otherwise we may never get run
3600 * again.
3601 */
3602 break;
3603 }
3604
3605 /* Raise IPL for possible insertion and test at top of loop */
3606 s = splsoftcam();
3607
3608 if (drvq->entries > 0) {
3609 /* We have more work. Attempt to reschedule */
3610 xpt_schedule_dev_allocq(bus, device);
3611 }
3612 }
3613 devq->alloc_queue.qfrozen_cnt--;
3614 splx(s);
3615}
3616
3617static void
3618xpt_run_dev_sendq(struct cam_eb *bus)
3619{
3620 struct cam_devq *devq;
3621 int s;
3622
3623 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3624
3625 devq = bus->sim->devq;
3626
3627 s = splcam();
3628 devq->send_queue.qfrozen_cnt++;
3629 splx(s);
3630 s = splsoftcam();
3631 while ((devq->send_queue.entries > 0)
3632 && (devq->send_openings > 0)) {
3633 struct cam_ed_qinfo *qinfo;
3634 struct cam_ed *device;
3635 union ccb *work_ccb;
3636 struct cam_sim *sim;
3637 int ospl;
3638
3639 ospl = splcam();
3640 if (devq->send_queue.qfrozen_cnt > 1) {
3641 splx(ospl);
3642 break;
3643 }
3644
3645 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3646 CAMQ_HEAD);
3647 device = qinfo->device;
3648
3649 /*
3650 * If the device has been "frozen", don't attempt
3651 * to run it.
3652 */
3653 if (device->qfrozen_cnt > 0) {
3654 splx(ospl);
3655 continue;
3656 }
3657
3658 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3659 ("running device %p\n", device));
3660
3661 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3662 if (work_ccb == NULL) {
3663 printf("device on run queue with no ccbs???\n");
3664 splx(ospl);
3665 continue;
3666 }
3667
3668 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3669
3670 if (num_highpower <= 0) {
3671 /*
3672 * We got a high power command, but we
3673 * don't have any available slots. Freeze
3674 * the device queue until we have a slot
3675 * available.
3676 */
3677 device->qfrozen_cnt++;
3678 STAILQ_INSERT_TAIL(&highpowerq,
3679 &work_ccb->ccb_h,
3680 xpt_links.stqe);
3681
3682 splx(ospl);
3683 continue;
3684 } else {
3685 /*
3686 * Consume a high power slot while
3687 * this ccb runs.
3688 */
3689 num_highpower--;
3690 }
3691 }
3692 devq->active_dev = device;
3693 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3694
3695 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3696 splx(ospl);
3697
3698 devq->send_openings--;
3699 devq->send_active++;
3700
3701 if (device->ccbq.queue.entries > 0)
3702 xpt_schedule_dev_sendq(bus, device);
3703
3704 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3705 /*
3706 * The client wants to freeze the queue
3707 * after this CCB is sent.
3708 */
3709 ospl = splcam();
3710 device->qfrozen_cnt++;
3711 splx(ospl);
3712 }
3713
3714 splx(s);
3715
3716 /* In Target mode, the peripheral driver knows best... */
3717 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3718 if ((device->inq_flags & SID_CmdQue) != 0
3719 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3720 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3721 else
3722 /*
3723 * Clear this in case of a retried CCB that
3724 * failed due to a rejected tag.
3725 */
3726 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3727 }
3728
3729 /*
3730 * Device queues can be shared among multiple sim instances
3731 * that reside on different busses. Use the SIM in the queue
3732 * CCB's path, rather than the one in the bus that was passed
3733 * into this function.
3734 */
3735 sim = work_ccb->ccb_h.path->bus->sim;
3736 (*(sim->sim_action))(sim, work_ccb);
3737
3738 ospl = splcam();
3739 devq->active_dev = NULL;
3740 splx(ospl);
3741 /* Raise IPL for possible insertion and test at top of loop */
3742 s = splsoftcam();
3743 }
3744 splx(s);
3745 s = splcam();
3746 devq->send_queue.qfrozen_cnt--;
3747 splx(s);
3748}
3749
3750/*
3751 * This function merges stuff from the slave ccb into the master ccb, while
3752 * keeping important fields in the master ccb constant.
3753 */
3754void
3755xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3756{
3757 /*
3758 * Pull fields that are valid for peripheral drivers to set
3759 * into the master CCB along with the CCB "payload".
3760 */
3761 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3762 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3763 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3764 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3765 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3766 sizeof(union ccb) - sizeof(struct ccb_hdr));
3767}
3768
3769void
3770xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3771{
3772 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3773 ccb_h->pinfo.priority = priority;
3774 ccb_h->path = path;
3775 ccb_h->path_id = path->bus->path_id;
3776 if (path->target)
3777 ccb_h->target_id = path->target->target_id;
3778 else
3779 ccb_h->target_id = CAM_TARGET_WILDCARD;
3780 if (path->device) {
3781 ccb_h->target_lun = path->device->lun_id;
3782 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3783 } else {
3784 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3785 }
3786 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3787 ccb_h->flags = 0;
3788}
3789
3790/* Path manipulation functions */
3791cam_status
3792xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3793 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3794{
3795 struct cam_path *path;
3796 cam_status status;
3797
3798 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3799
3800 if (path == NULL) {
3801 status = CAM_RESRC_UNAVAIL;
3802 return(status);
3803 }
3804 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3805 if (status != CAM_REQ_CMP) {
3806 free(path, M_DEVBUF);
3807 path = NULL;
3808 }
3809 *new_path_ptr = path;
3810 return (status);
3811}
3812
3813static cam_status
3814xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3815 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3816{
3817 struct cam_eb *bus;
3818 struct cam_et *target;
3819 struct cam_ed *device;
3820 cam_status status;
3821 int s;
3822
3823 status = CAM_REQ_CMP; /* Completed without error */
3824 target = NULL; /* Wildcarded */
3825 device = NULL; /* Wildcarded */
3826
3827 /*
3828 * We will potentially modify the EDT, so block interrupts
3829 * that may attempt to create cam paths.
3830 */
3831 s = splcam();
3832 bus = xpt_find_bus(path_id);
3833 if (bus == NULL) {
3834 status = CAM_PATH_INVALID;
3835 } else {
3836 target = xpt_find_target(bus, target_id);
3837 if (target == NULL) {
3838 /* Create one */
3839 struct cam_et *new_target;
3840
3841 new_target = xpt_alloc_target(bus, target_id);
3842 if (new_target == NULL) {
3843 status = CAM_RESRC_UNAVAIL;
3844 } else {
3845 target = new_target;
3846 }
3847 }
3848 if (target != NULL) {
3849 device = xpt_find_device(target, lun_id);
3850 if (device == NULL) {
3851 /* Create one */
3852 struct cam_ed *new_device;
3853
3854 new_device = xpt_alloc_device(bus,
3855 target,
3856 lun_id);
3857 if (new_device == NULL) {
3858 status = CAM_RESRC_UNAVAIL;
3859 } else {
3860 device = new_device;
3861 }
3862 }
3863 }
3864 }
3865 splx(s);
3866
3867 /*
3868 * Only touch the user's data if we are successful.
3869 */
3870 if (status == CAM_REQ_CMP) {
3871 new_path->periph = perph;
3872 new_path->bus = bus;
3873 new_path->target = target;
3874 new_path->device = device;
3875 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3876 } else {
3877 if (device != NULL)
3878 xpt_release_device(bus, target, device);
3879 if (target != NULL)
3880 xpt_release_target(bus, target);
3881 if (bus != NULL)
3882 xpt_release_bus(bus);
3883 }
3884 return (status);
3885}
3886
3887static void
3888xpt_release_path(struct cam_path *path)
3889{
3890 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3891 if (path->device != NULL) {
3892 xpt_release_device(path->bus, path->target, path->device);
3893 path->device = NULL;
3894 }
3895 if (path->target != NULL) {
3896 xpt_release_target(path->bus, path->target);
3897 path->target = NULL;
3898 }
3899 if (path->bus != NULL) {
3900 xpt_release_bus(path->bus);
3901 path->bus = NULL;
3902 }
3903}
3904
3905void
3906xpt_free_path(struct cam_path *path)
3907{
3908 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3909 xpt_release_path(path);
3910 free(path, M_DEVBUF);
3911}
3912
3913
3914/*
3915 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3916 * in path1, 2 for match with wildcards in path2.
3917 */
3918int
3919xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3920{
3921 int retval = 0;
3922
3923 if (path1->bus != path2->bus) {
3924 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3925 retval = 1;
3926 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3927 retval = 2;
3928 else
3929 return (-1);
3930 }
3931 if (path1->target != path2->target) {
3932 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3933 if (retval == 0)
3934 retval = 1;
3935 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3936 retval = 2;
3937 else
3938 return (-1);
3939 }
3940 if (path1->device != path2->device) {
3941 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3942 if (retval == 0)
3943 retval = 1;
3944 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3945 retval = 2;
3946 else
3947 return (-1);
3948 }
3949 return (retval);
3950}
3951
3952void
3953xpt_print_path(struct cam_path *path)
3954{
3955 if (path == NULL)
3956 printf("(nopath): ");
3957 else {
3958 if (path->periph != NULL)
3959 printf("(%s%d:", path->periph->periph_name,
3960 path->periph->unit_number);
3961 else
3962 printf("(noperiph:");
3963
3964 if (path->bus != NULL)
3965 printf("%s%d:%d:", path->bus->sim->sim_name,
3966 path->bus->sim->unit_number,
3967 path->bus->sim->bus_id);
3968 else
3969 printf("nobus:");
3970
3971 if (path->target != NULL)
3972 printf("%d:", path->target->target_id);
3973 else
3974 printf("X:");
3975
3976 if (path->device != NULL)
3977 printf("%d): ", path->device->lun_id);
3978 else
3979 printf("X): ");
3980 }
3981}
3982
3983path_id_t
3984xpt_path_path_id(struct cam_path *path)
3985{
3986 return(path->bus->path_id);
3987}
3988
3989target_id_t
3990xpt_path_target_id(struct cam_path *path)
3991{
3992 if (path->target != NULL)
3993 return (path->target->target_id);
3994 else
3995 return (CAM_TARGET_WILDCARD);
3996}
3997
3998lun_id_t
3999xpt_path_lun_id(struct cam_path *path)
4000{
4001 if (path->device != NULL)
4002 return (path->device->lun_id);
4003 else
4004 return (CAM_LUN_WILDCARD);
4005}
4006
4007struct cam_sim *
4008xpt_path_sim(struct cam_path *path)
4009{
4010 return (path->bus->sim);
4011}
4012
4013struct cam_periph*
4014xpt_path_periph(struct cam_path *path)
4015{
4016 return (path->periph);
4017}
4018
4019/*
4020 * Release a CAM control block for the caller. Remit the cost of the structure
4021 * to the device referenced by the path. If the this device had no 'credits'
4022 * and peripheral drivers have registered async callbacks for this notification
4023 * call them now.
4024 */
4025void
4026xpt_release_ccb(union ccb *free_ccb)
4027{
4028 int s;
4029 struct cam_path *path;
4030 struct cam_ed *device;
4031 struct cam_eb *bus;
4032
4033 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4034 path = free_ccb->ccb_h.path;
4035 device = path->device;
4036 bus = path->bus;
4037 s = splsoftcam();
4038 cam_ccbq_release_opening(&device->ccbq);
4039 if (xpt_ccb_count > xpt_max_ccbs) {
4040 xpt_free_ccb(free_ccb);
4041 xpt_ccb_count--;
4042 } else {
4043 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4044 }
4045 bus->sim->devq->alloc_openings++;
4046 bus->sim->devq->alloc_active--;
4047 /* XXX Turn this into an inline function - xpt_run_device?? */
4048 if ((device_is_alloc_queued(device) == 0)
4049 && (device->drvq.entries > 0)) {
4050 xpt_schedule_dev_allocq(bus, device);
4051 }
4052 splx(s);
4053 if (dev_allocq_is_runnable(bus->sim->devq))
4054 xpt_run_dev_allocq(bus);
4055}
4056
4057/* Functions accessed by SIM drivers */
4058
4059/*
4060 * A sim structure, listing the SIM entry points and instance
4061 * identification info is passed to xpt_bus_register to hook the SIM
4062 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4063 * for this new bus and places it in the array of busses and assigns
4064 * it a path_id. The path_id may be influenced by "hard wiring"
4065 * information specified by the user. Once interrupt services are
4066 * availible, the bus will be probed.
4067 */
4068int32_t
4069xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4070{
4071 struct cam_eb *new_bus;
4072 struct cam_eb *old_bus;
4073 struct ccb_pathinq cpi;
4074 int s;
4075
4076 sim->bus_id = bus;
4077 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4078 M_DEVBUF, M_NOWAIT);
4079 if (new_bus == NULL) {
4080 /* Couldn't satisfy request */
4081 return (CAM_RESRC_UNAVAIL);
4082 }
4083
4084 if (strcmp(sim->sim_name, "xpt") != 0) {
4085
4086 sim->path_id =
4087 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4088 }
4089
4090 TAILQ_INIT(&new_bus->et_entries);
4091 new_bus->path_id = sim->path_id;
4092 new_bus->sim = sim;
4093 timevalclear(&new_bus->last_reset);
4094 new_bus->flags = 0;
4095 new_bus->refcount = 1; /* Held until a bus_deregister event */
4096 new_bus->generation = 0;
4097 s = splcam();
4098 old_bus = TAILQ_FIRST(&xpt_busses);
4099 while (old_bus != NULL