Remove spl*() calls from the bus/ infrastructure, replacing them with
[dragonfly.git] / sys / bus / cam / cam_xpt.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
4e01b467 30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.24 2005/06/02 20:40:29 dillon Exp $
984263bc
MD
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/devicestat.h>
42#include <sys/interrupt.h>
43#include <sys/bus.h>
3aed1355
MD
44#include <sys/thread.h>
45#include <sys/thread2.h>
984263bc 46
984263bc
MD
47#include <machine/clock.h>
48#include <machine/ipl.h>
49
1f2de5d4
MD
50#include "cam.h"
51#include "cam_ccb.h"
52#include "cam_periph.h"
53#include "cam_sim.h"
54#include "cam_xpt.h"
55#include "cam_xpt_sim.h"
56#include "cam_xpt_periph.h"
57#include "cam_debug.h"
984263bc 58
1f2de5d4
MD
59#include "scsi/scsi_all.h"
60#include "scsi/scsi_message.h"
61#include "scsi/scsi_pass.h"
984263bc
MD
62#include "opt_cam.h"
63
64/* Datastructures internal to the xpt layer */
65
66/*
67 * Definition of an async handler callback block. These are used to add
68 * SIMs and peripherals to the async callback lists.
69 */
70struct async_node {
71 SLIST_ENTRY(async_node) links;
72 u_int32_t event_enable; /* Async Event enables */
73 void (*callback)(void *arg, u_int32_t code,
74 struct cam_path *path, void *args);
75 void *callback_arg;
76};
77
78SLIST_HEAD(async_list, async_node);
79SLIST_HEAD(periph_list, cam_periph);
80static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
81
82/*
83 * This is the maximum number of high powered commands (e.g. start unit)
84 * that can be outstanding at a particular time.
85 */
86#ifndef CAM_MAX_HIGHPOWER
87#define CAM_MAX_HIGHPOWER 4
88#endif
89
90/* number of high powered commands that can go through right now */
91static int num_highpower = CAM_MAX_HIGHPOWER;
92
93/*
94 * Structure for queueing a device in a run queue.
95 * There is one run queue for allocating new ccbs,
96 * and another for sending ccbs to the controller.
97 */
98struct cam_ed_qinfo {
99 cam_pinfo pinfo;
100 struct cam_ed *device;
101};
102
103/*
104 * The CAM EDT (Existing Device Table) contains the device information for
105 * all devices for all busses in the system. The table contains a
106 * cam_ed structure for each device on the bus.
107 */
108struct cam_ed {
109 TAILQ_ENTRY(cam_ed) links;
110 struct cam_ed_qinfo alloc_ccb_entry;
111 struct cam_ed_qinfo send_ccb_entry;
112 struct cam_et *target;
113 lun_id_t lun_id;
114 struct camq drvq; /*
115 * Queue of type drivers wanting to do
116 * work on this device.
117 */
118 struct cam_ccbq ccbq; /* Queue of pending ccbs */
119 struct async_list asyncs; /* Async callback info for this B/T/L */
120 struct periph_list periphs; /* All attached devices */
121 u_int generation; /* Generation number */
122 struct cam_periph *owner; /* Peripheral driver's ownership tag */
123 struct xpt_quirk_entry *quirk; /* Oddities about this device */
124 /* Storage for the inquiry data */
125 struct scsi_inquiry_data inq_data;
126 u_int8_t inq_flags; /*
127 * Current settings for inquiry flags.
128 * This allows us to override settings
129 * like disconnection and tagged
130 * queuing for a device.
131 */
132 u_int8_t queue_flags; /* Queue flags from the control page */
133 u_int8_t serial_num_len;
134 u_int8_t *serial_num;
135 u_int32_t qfrozen_cnt;
136 u_int32_t flags;
137#define CAM_DEV_UNCONFIGURED 0x01
138#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
139#define CAM_DEV_REL_ON_COMPLETE 0x04
140#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
141#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
142#define CAM_DEV_TAG_AFTER_COUNT 0x20
143#define CAM_DEV_INQUIRY_DATA_VALID 0x40
144 u_int32_t tag_delay_count;
145#define CAM_TAG_DELAY_COUNT 5
146 u_int32_t refcount;
eaa58895 147 struct callout c_handle;
984263bc
MD
148};
149
150/*
151 * Each target is represented by an ET (Existing Target). These
152 * entries are created when a target is successfully probed with an
153 * identify, and removed when a device fails to respond after a number
154 * of retries, or a bus rescan finds the device missing.
155 */
156struct cam_et {
157 TAILQ_HEAD(, cam_ed) ed_entries;
158 TAILQ_ENTRY(cam_et) links;
159 struct cam_eb *bus;
160 target_id_t target_id;
161 u_int32_t refcount;
162 u_int generation;
88c4d2f6 163 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
164};
165
166/*
167 * Each bus is represented by an EB (Existing Bus). These entries
168 * are created by calls to xpt_bus_register and deleted by calls to
169 * xpt_bus_deregister.
170 */
171struct cam_eb {
172 TAILQ_HEAD(, cam_et) et_entries;
173 TAILQ_ENTRY(cam_eb) links;
174 path_id_t path_id;
175 struct cam_sim *sim;
88c4d2f6 176 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
177 u_int32_t flags;
178#define CAM_EB_RUNQ_SCHEDULED 0x01
179 u_int32_t refcount;
180 u_int generation;
181};
182
183struct cam_path {
184 struct cam_periph *periph;
185 struct cam_eb *bus;
186 struct cam_et *target;
187 struct cam_ed *device;
188};
189
190struct xpt_quirk_entry {
191 struct scsi_inquiry_pattern inq_pat;
192 u_int8_t quirks;
193#define CAM_QUIRK_NOLUNS 0x01
194#define CAM_QUIRK_NOSERIAL 0x02
195#define CAM_QUIRK_HILUNS 0x04
196 u_int mintags;
197 u_int maxtags;
198};
199#define CAM_SCSI2_MAXLUN 8
200
201typedef enum {
202 XPT_FLAG_OPEN = 0x01
203} xpt_flags;
204
205struct xpt_softc {
206 xpt_flags flags;
207 u_int32_t generation;
208};
209
210static const char quantum[] = "QUANTUM";
211static const char sony[] = "SONY";
212static const char west_digital[] = "WDIGTL";
213static const char samsung[] = "SAMSUNG";
214static const char seagate[] = "SEAGATE";
215static const char microp[] = "MICROP";
216
217static struct xpt_quirk_entry xpt_quirk_table[] =
218{
219 {
220 /* Reports QUEUE FULL for temporary resource shortages */
221 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
222 /*quirks*/0, /*mintags*/24, /*maxtags*/32
223 },
224 {
225 /* Reports QUEUE FULL for temporary resource shortages */
226 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
227 /*quirks*/0, /*mintags*/24, /*maxtags*/32
228 },
229 {
230 /* Reports QUEUE FULL for temporary resource shortages */
231 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
232 /*quirks*/0, /*mintags*/24, /*maxtags*/32
233 },
234 {
235 /* Broken tagged queuing drive */
236 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
237 /*quirks*/0, /*mintags*/0, /*maxtags*/0
238 },
239 {
240 /* Broken tagged queuing drive */
241 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
242 /*quirks*/0, /*mintags*/0, /*maxtags*/0
243 },
244 {
245 /* Broken tagged queuing drive */
246 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
247 /*quirks*/0, /*mintags*/0, /*maxtags*/0
248 },
249 {
250 /*
251 * Unfortunately, the Quantum Atlas III has the same
252 * problem as the Atlas II drives above.
253 * Reported by: "Johan Granlund" <johan@granlund.nu>
254 *
255 * For future reference, the drive with the problem was:
256 * QUANTUM QM39100TD-SW N1B0
257 *
258 * It's possible that Quantum will fix the problem in later
259 * firmware revisions. If that happens, the quirk entry
260 * will need to be made specific to the firmware revisions
261 * with the problem.
262 *
263 */
264 /* Reports QUEUE FULL for temporary resource shortages */
265 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
266 /*quirks*/0, /*mintags*/24, /*maxtags*/32
267 },
268 {
269 /*
270 * 18 Gig Atlas III, same problem as the 9G version.
271 * Reported by: Andre Albsmeier
272 * <andre.albsmeier@mchp.siemens.de>
273 *
274 * For future reference, the drive with the problem was:
275 * QUANTUM QM318000TD-S N491
276 */
277 /* Reports QUEUE FULL for temporary resource shortages */
278 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
279 /*quirks*/0, /*mintags*/24, /*maxtags*/32
280 },
281 {
282 /*
283 * Broken tagged queuing drive
284 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
285 * and: Martin Renters <martin@tdc.on.ca>
286 */
287 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
288 /*quirks*/0, /*mintags*/0, /*maxtags*/0
289 },
290 /*
291 * The Seagate Medalist Pro drives have very poor write
292 * performance with anything more than 2 tags.
293 *
294 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
295 * Drive: <SEAGATE ST36530N 1444>
296 *
297 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
298 * Drive: <SEAGATE ST34520W 1281>
299 *
300 * No one has actually reported that the 9G version
301 * (ST39140*) of the Medalist Pro has the same problem, but
302 * we're assuming that it does because the 4G and 6.5G
303 * versions of the drive are broken.
304 */
305 {
306 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
307 /*quirks*/0, /*mintags*/2, /*maxtags*/2
308 },
309 {
310 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
311 /*quirks*/0, /*mintags*/2, /*maxtags*/2
312 },
313 {
314 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
315 /*quirks*/0, /*mintags*/2, /*maxtags*/2
316 },
317 {
318 /*
319 * Slow when tagged queueing is enabled. Write performance
320 * steadily drops off with more and more concurrent
321 * transactions. Best sequential write performance with
322 * tagged queueing turned off and write caching turned on.
323 *
324 * PR: kern/10398
325 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
326 * Drive: DCAS-34330 w/ "S65A" firmware.
327 *
328 * The drive with the problem had the "S65A" firmware
329 * revision, and has also been reported (by Stephen J.
330 * Roznowski <sjr@home.net>) for a drive with the "S61A"
331 * firmware revision.
332 *
333 * Although no one has reported problems with the 2 gig
334 * version of the DCAS drive, the assumption is that it
335 * has the same problems as the 4 gig version. Therefore
336 * this quirk entries disables tagged queueing for all
337 * DCAS drives.
338 */
339 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
340 /*quirks*/0, /*mintags*/0, /*maxtags*/0
341 },
342 {
343 /* Broken tagged queuing drive */
344 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
345 /*quirks*/0, /*mintags*/0, /*maxtags*/0
346 },
347 {
348 /* Broken tagged queuing drive */
349 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
350 /*quirks*/0, /*mintags*/0, /*maxtags*/0
351 },
352 {
353 /*
354 * Broken tagged queuing drive.
355 * Submitted by:
356 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
357 * in PR kern/9535
358 */
359 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
360 /*quirks*/0, /*mintags*/0, /*maxtags*/0
361 },
362 {
363 /*
364 * Slow when tagged queueing is enabled. (1.5MB/sec versus
365 * 8MB/sec.)
366 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
367 * Best performance with these drives is achieved with
368 * tagged queueing turned off, and write caching turned on.
369 */
370 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
371 /*quirks*/0, /*mintags*/0, /*maxtags*/0
372 },
373 {
374 /*
375 * Slow when tagged queueing is enabled. (1.5MB/sec versus
376 * 8MB/sec.)
377 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
378 * Best performance with these drives is achieved with
379 * tagged queueing turned off, and write caching turned on.
380 */
381 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
382 /*quirks*/0, /*mintags*/0, /*maxtags*/0
383 },
384 {
385 /*
386 * Doesn't handle queue full condition correctly,
387 * so we need to limit maxtags to what the device
388 * can handle instead of determining this automatically.
389 */
390 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
391 /*quirks*/0, /*mintags*/2, /*maxtags*/32
392 },
393 {
394 /* Really only one LUN */
395 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
396 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
397 },
398 {
399 /* I can't believe we need a quirk for DPT volumes. */
400 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
401 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
402 /*mintags*/0, /*maxtags*/255
403 },
404 {
405 /*
406 * Many Sony CDROM drives don't like multi-LUN probing.
407 */
408 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
409 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
410 },
411 {
412 /*
413 * This drive doesn't like multiple LUN probing.
414 * Submitted by: Parag Patel <parag@cgt.com>
415 */
416 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
417 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
418 },
419 {
420 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
421 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
422 },
423 {
424 /*
425 * The 8200 doesn't like multi-lun probing, and probably
426 * don't like serial number requests either.
427 */
428 {
429 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
430 "EXB-8200*", "*"
431 },
432 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
433 },
434 {
435 /*
436 * Let's try the same as above, but for a drive that says
437 * it's an IPL-6860 but is actually an EXB 8200.
438 */
439 {
440 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
441 "IPL-6860*", "*"
442 },
443 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
444 },
445 {
446 /*
447 * These Hitachi drives don't like multi-lun probing.
448 * The PR submitter has a DK319H, but says that the Linux
449 * kernel has a similar work-around for the DK312 and DK314,
450 * so all DK31* drives are quirked here.
451 * PR: misc/18793
452 * Submitted by: Paul Haddad <paul@pth.com>
453 */
454 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
455 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
456 },
457 {
458 /*
459 * This old revision of the TDC3600 is also SCSI-1, and
460 * hangs upon serial number probing.
461 */
462 {
463 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
464 " TDC 3600", "U07:"
465 },
466 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
467 },
468 {
469 /*
470 * Would repond to all LUNs if asked for.
471 */
472 {
473 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
474 "CP150", "*"
475 },
476 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
477 },
478 {
479 /*
480 * Would repond to all LUNs if asked for.
481 */
482 {
483 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
484 "96X2*", "*"
485 },
486 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
487 },
488 {
489 /* Submitted by: Matthew Dodd <winter@jurai.net> */
490 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
491 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
492 },
493 {
494 /* Submitted by: Matthew Dodd <winter@jurai.net> */
495 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
496 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
497 },
498 {
499 /* TeraSolutions special settings for TRC-22 RAID */
500 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
501 /*quirks*/0, /*mintags*/55, /*maxtags*/255
502 },
503 {
504 /* Veritas Storage Appliance */
505 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
506 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
507 },
508 {
509 /*
510 * Would respond to all LUNs. Device type and removable
511 * flag are jumper-selectable.
512 */
513 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
514 "Tahiti 1", "*"
515 },
516 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
517 },
518 {
519 /* Default tagged queuing parameters for all devices */
520 {
521 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
522 /*vendor*/"*", /*product*/"*", /*revision*/"*"
523 },
524 /*quirks*/0, /*mintags*/2, /*maxtags*/255
525 },
526};
527
528static const int xpt_quirk_table_size =
529 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
530
531typedef enum {
532 DM_RET_COPY = 0x01,
533 DM_RET_FLAG_MASK = 0x0f,
534 DM_RET_NONE = 0x00,
535 DM_RET_STOP = 0x10,
536 DM_RET_DESCEND = 0x20,
537 DM_RET_ERROR = 0x30,
538 DM_RET_ACTION_MASK = 0xf0
539} dev_match_ret;
540
541typedef enum {
542 XPT_DEPTH_BUS,
543 XPT_DEPTH_TARGET,
544 XPT_DEPTH_DEVICE,
545 XPT_DEPTH_PERIPH
546} xpt_traverse_depth;
547
548struct xpt_traverse_config {
549 xpt_traverse_depth depth;
550 void *tr_func;
551 void *tr_arg;
552};
553
554typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
555typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
556typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
557typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
558typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
559
560/* Transport layer configuration information */
561static struct xpt_softc xsoftc;
562
563/* Queues for our software interrupt handler */
564typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
565static cam_isrq_t cam_bioq;
566static cam_isrq_t cam_netq;
567
568/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
569static SLIST_HEAD(,ccb_hdr) ccb_freeq;
570static u_int xpt_max_ccbs; /*
571 * Maximum size of ccb pool. Modified as
572 * devices are added/removed or have their
573 * opening counts changed.
574 */
575static u_int xpt_ccb_count; /* Current count of allocated ccbs */
576
577struct cam_periph *xpt_periph;
578
579static periph_init_t xpt_periph_init;
580
581static periph_init_t probe_periph_init;
582
583static struct periph_driver xpt_driver =
584{
585 xpt_periph_init, "xpt",
586 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
587};
588
589static struct periph_driver probe_driver =
590{
591 probe_periph_init, "probe",
592 TAILQ_HEAD_INITIALIZER(probe_driver.units)
593};
594
595DATA_SET(periphdriver_set, xpt_driver);
596DATA_SET(periphdriver_set, probe_driver);
597
598#define XPT_CDEV_MAJOR 104
599
600static d_open_t xptopen;
601static d_close_t xptclose;
602static d_ioctl_t xptioctl;
603
604static struct cdevsw xpt_cdevsw = {
fabb8ceb
MD
605 /* name */ "xpt",
606 /* maj */ XPT_CDEV_MAJOR,
607 /* flags */ 0,
608 /* port */ NULL,
455fcd7e 609 /* clone */ NULL,
fabb8ceb 610
984263bc
MD
611 /* open */ xptopen,
612 /* close */ xptclose,
613 /* read */ noread,
614 /* write */ nowrite,
615 /* ioctl */ xptioctl,
616 /* poll */ nopoll,
617 /* mmap */ nommap,
618 /* strategy */ nostrategy,
984263bc 619 /* dump */ nodump,
fabb8ceb 620 /* psize */ nopsize
984263bc
MD
621};
622
623static struct intr_config_hook *xpt_config_hook;
624
625/* Registered busses */
626static TAILQ_HEAD(,cam_eb) xpt_busses;
627static u_int bus_generation;
628
629/* Storage for debugging datastructures */
630#ifdef CAMDEBUG
631struct cam_path *cam_dpath;
632u_int32_t cam_dflags;
633u_int32_t cam_debug_delay;
634#endif
635
636#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
637#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
638#endif
639
640/*
641 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
642 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
643 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
644 */
645#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
646 || defined(CAM_DEBUG_LUN)
647#ifdef CAMDEBUG
648#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
649 || !defined(CAM_DEBUG_LUN)
650#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
651 and CAM_DEBUG_LUN"
652#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
653#else /* !CAMDEBUG */
654#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
655#endif /* CAMDEBUG */
656#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
657
658/* Our boot-time initialization hook */
659static void xpt_init(void *);
660SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
661
662static cam_status xpt_compile_path(struct cam_path *new_path,
663 struct cam_periph *perph,
664 path_id_t path_id,
665 target_id_t target_id,
666 lun_id_t lun_id);
667
668static void xpt_release_path(struct cam_path *path);
669
670static void xpt_async_bcast(struct async_list *async_head,
671 u_int32_t async_code,
672 struct cam_path *path,
673 void *async_arg);
674static void xpt_dev_async(u_int32_t async_code,
675 struct cam_eb *bus,
676 struct cam_et *target,
677 struct cam_ed *device,
678 void *async_arg);
679static path_id_t xptnextfreepathid(void);
680static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
681static union ccb *xpt_get_ccb(struct cam_ed *device);
682static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
683 u_int32_t new_priority);
684static void xpt_run_dev_allocq(struct cam_eb *bus);
685static void xpt_run_dev_sendq(struct cam_eb *bus);
686static timeout_t xpt_release_devq_timeout;
984263bc
MD
687static void xpt_release_bus(struct cam_eb *bus);
688static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
689 int run_queue);
690static struct cam_et*
691 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
692static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
693static struct cam_ed*
694 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
695 lun_id_t lun_id);
696static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
697 struct cam_ed *device);
698static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
699static struct cam_eb*
700 xpt_find_bus(path_id_t path_id);
701static struct cam_et*
702 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
703static struct cam_ed*
704 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
705static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
706static void xpt_scan_lun(struct cam_periph *periph,
707 struct cam_path *path, cam_flags flags,
708 union ccb *ccb);
709static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
710static xpt_busfunc_t xptconfigbuscountfunc;
711static xpt_busfunc_t xptconfigfunc;
712static void xpt_config(void *arg);
713static xpt_devicefunc_t xptpassannouncefunc;
714static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
715static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
716static void xptpoll(struct cam_sim *sim);
ef0fdad1
MD
717static inthand2_t swi_camnet;
718static inthand2_t swi_cambio;
984263bc
MD
719static void camisr(cam_isrq_t *queue);
720#if 0
721static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
722static void xptasync(struct cam_periph *periph,
723 u_int32_t code, cam_path *path);
724#endif
725static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
726 int num_patterns, struct cam_eb *bus);
727static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
728 int num_patterns, struct cam_ed *device);
729static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
730 int num_patterns,
731 struct cam_periph *periph);
732static xpt_busfunc_t xptedtbusfunc;
733static xpt_targetfunc_t xptedttargetfunc;
734static xpt_devicefunc_t xptedtdevicefunc;
735static xpt_periphfunc_t xptedtperiphfunc;
736static xpt_pdrvfunc_t xptplistpdrvfunc;
737static xpt_periphfunc_t xptplistperiphfunc;
738static int xptedtmatch(struct ccb_dev_match *cdm);
739static int xptperiphlistmatch(struct ccb_dev_match *cdm);
740static int xptbustraverse(struct cam_eb *start_bus,
741 xpt_busfunc_t *tr_func, void *arg);
742static int xpttargettraverse(struct cam_eb *bus,
743 struct cam_et *start_target,
744 xpt_targetfunc_t *tr_func, void *arg);
745static int xptdevicetraverse(struct cam_et *target,
746 struct cam_ed *start_device,
747 xpt_devicefunc_t *tr_func, void *arg);
748static int xptperiphtraverse(struct cam_ed *device,
749 struct cam_periph *start_periph,
750 xpt_periphfunc_t *tr_func, void *arg);
751static int xptpdrvtraverse(struct periph_driver **start_pdrv,
752 xpt_pdrvfunc_t *tr_func, void *arg);
753static int xptpdperiphtraverse(struct periph_driver **pdrv,
754 struct cam_periph *start_periph,
755 xpt_periphfunc_t *tr_func,
756 void *arg);
757static xpt_busfunc_t xptdefbusfunc;
758static xpt_targetfunc_t xptdeftargetfunc;
759static xpt_devicefunc_t xptdefdevicefunc;
760static xpt_periphfunc_t xptdefperiphfunc;
761static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
762#ifdef notusedyet
763static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
764 void *arg);
765#endif
766static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
767 void *arg);
768#ifdef notusedyet
769static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
770 void *arg);
771#endif
772static xpt_devicefunc_t xptsetasyncfunc;
773static xpt_busfunc_t xptsetasyncbusfunc;
774static cam_status xptregister(struct cam_periph *periph,
775 void *arg);
776static cam_status proberegister(struct cam_periph *periph,
777 void *arg);
778static void probeschedule(struct cam_periph *probe_periph);
779static void probestart(struct cam_periph *periph, union ccb *start_ccb);
780static void proberequestdefaultnegotiation(struct cam_periph *periph);
781static void probedone(struct cam_periph *periph, union ccb *done_ccb);
782static void probecleanup(struct cam_periph *periph);
783static void xpt_find_quirk(struct cam_ed *device);
784static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
785 struct cam_ed *device,
786 int async_update);
787static void xpt_toggle_tags(struct cam_path *path);
788static void xpt_start_tags(struct cam_path *path);
789static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
790 struct cam_ed *dev);
791static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
792 struct cam_ed *dev);
793static __inline int periph_is_queued(struct cam_periph *periph);
794static __inline int device_is_alloc_queued(struct cam_ed *device);
795static __inline int device_is_send_queued(struct cam_ed *device);
796static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
797
798static __inline int
799xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
800{
801 int retval;
802
803 if (dev->ccbq.devq_openings > 0) {
804 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
805 cam_ccbq_resize(&dev->ccbq,
806 dev->ccbq.dev_openings
807 + dev->ccbq.dev_active);
808 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
809 }
810 /*
811 * The priority of a device waiting for CCB resources
812 * is that of the the highest priority peripheral driver
813 * enqueued.
814 */
815 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
816 &dev->alloc_ccb_entry.pinfo,
817 CAMQ_GET_HEAD(&dev->drvq)->priority);
818 } else {
819 retval = 0;
820 }
821
822 return (retval);
823}
824
825static __inline int
826xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
827{
828 int retval;
829
830 if (dev->ccbq.dev_openings > 0) {
831 /*
832 * The priority of a device waiting for controller
833 * resources is that of the the highest priority CCB
834 * enqueued.
835 */
836 retval =
837 xpt_schedule_dev(&bus->sim->devq->send_queue,
838 &dev->send_ccb_entry.pinfo,
839 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
840 } else {
841 retval = 0;
842 }
843 return (retval);
844}
845
846static __inline int
847periph_is_queued(struct cam_periph *periph)
848{
849 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
850}
851
852static __inline int
853device_is_alloc_queued(struct cam_ed *device)
854{
855 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
856}
857
858static __inline int
859device_is_send_queued(struct cam_ed *device)
860{
861 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
862}
863
864static __inline int
865dev_allocq_is_runnable(struct cam_devq *devq)
866{
867 /*
868 * Have work to do.
869 * Have space to do more work.
870 * Allowed to do work.
871 */
872 return ((devq->alloc_queue.qfrozen_cnt == 0)
873 && (devq->alloc_queue.entries > 0)
874 && (devq->alloc_openings > 0));
875}
876
877static void
878xpt_periph_init()
879{
e4c9c0c8 880 cdevsw_add(&xpt_cdevsw, 0, 0);
984263bc
MD
881 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
882}
883
884static void
885probe_periph_init()
886{
887}
888
889
890static void
891xptdone(struct cam_periph *periph, union ccb *done_ccb)
892{
893 /* Caller will release the CCB */
894 wakeup(&done_ccb->ccb_h.cbfcnp);
895}
896
897static int
41c20dac 898xptopen(dev_t dev, int flags, int fmt, struct thread *td)
984263bc
MD
899{
900 int unit;
901
902 unit = minor(dev) & 0xff;
903
904 /*
905 * Only allow read-write access.
906 */
907 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
908 return(EPERM);
909
910 /*
911 * We don't allow nonblocking access.
912 */
913 if ((flags & O_NONBLOCK) != 0) {
914 printf("xpt%d: can't do nonblocking access\n", unit);
915 return(ENODEV);
916 }
917
918 /*
919 * We only have one transport layer right now. If someone accesses
920 * us via something other than minor number 1, point out their
921 * mistake.
922 */
923 if (unit != 0) {
924 printf("xptopen: got invalid xpt unit %d\n", unit);
925 return(ENXIO);
926 }
927
928 /* Mark ourselves open */
929 xsoftc.flags |= XPT_FLAG_OPEN;
930
931 return(0);
932}
933
934static int
41c20dac 935xptclose(dev_t dev, int flag, int fmt, struct thread *td)
984263bc
MD
936{
937 int unit;
938
939 unit = minor(dev) & 0xff;
940
941 /*
942 * We only have one transport layer right now. If someone accesses
943 * us via something other than minor number 1, point out their
944 * mistake.
945 */
946 if (unit != 0) {
947 printf("xptclose: got invalid xpt unit %d\n", unit);
948 return(ENXIO);
949 }
950
951 /* Mark ourselves closed */
952 xsoftc.flags &= ~XPT_FLAG_OPEN;
953
954 return(0);
955}
956
957static int
41c20dac 958xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
984263bc
MD
959{
960 int unit, error;
961
962 error = 0;
963 unit = minor(dev) & 0xff;
964
965 /*
966 * We only have one transport layer right now. If someone accesses
967 * us via something other than minor number 1, point out their
968 * mistake.
969 */
970 if (unit != 0) {
971 printf("xptioctl: got invalid xpt unit %d\n", unit);
972 return(ENXIO);
973 }
974
975 switch(cmd) {
976 /*
977 * For the transport layer CAMIOCOMMAND ioctl, we really only want
978 * to accept CCB types that don't quite make sense to send through a
979 * passthrough driver.
980 */
981 case CAMIOCOMMAND: {
982 union ccb *ccb;
983 union ccb *inccb;
984
985 inccb = (union ccb *)addr;
986
987 switch(inccb->ccb_h.func_code) {
988 case XPT_SCAN_BUS:
989 case XPT_RESET_BUS:
990 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
991 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
992 error = EINVAL;
993 break;
994 }
995 /* FALLTHROUGH */
996 case XPT_PATH_INQ:
997 case XPT_ENG_INQ:
998 case XPT_SCAN_LUN:
999
1000 ccb = xpt_alloc_ccb();
1001
1002 /*
1003 * Create a path using the bus, target, and lun the
1004 * user passed in.
1005 */
1006 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1007 inccb->ccb_h.path_id,
1008 inccb->ccb_h.target_id,
1009 inccb->ccb_h.target_lun) !=
1010 CAM_REQ_CMP){
1011 error = EINVAL;
1012 xpt_free_ccb(ccb);
1013 break;
1014 }
1015 /* Ensure all of our fields are correct */
1016 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1017 inccb->ccb_h.pinfo.priority);
1018 xpt_merge_ccb(ccb, inccb);
1019 ccb->ccb_h.cbfcnp = xptdone;
1020 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1021 bcopy(ccb, inccb, sizeof(union ccb));
1022 xpt_free_path(ccb->ccb_h.path);
1023 xpt_free_ccb(ccb);
1024 break;
1025
1026 case XPT_DEBUG: {
1027 union ccb ccb;
1028
1029 /*
1030 * This is an immediate CCB, so it's okay to
1031 * allocate it on the stack.
1032 */
1033
1034 /*
1035 * Create a path using the bus, target, and lun the
1036 * user passed in.
1037 */
1038 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1039 inccb->ccb_h.path_id,
1040 inccb->ccb_h.target_id,
1041 inccb->ccb_h.target_lun) !=
1042 CAM_REQ_CMP){
1043 error = EINVAL;
1044 break;
1045 }
1046 /* Ensure all of our fields are correct */
1047 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1048 inccb->ccb_h.pinfo.priority);
1049 xpt_merge_ccb(&ccb, inccb);
1050 ccb.ccb_h.cbfcnp = xptdone;
1051 xpt_action(&ccb);
1052 bcopy(&ccb, inccb, sizeof(union ccb));
1053 xpt_free_path(ccb.ccb_h.path);
1054 break;
1055
1056 }
1057 case XPT_DEV_MATCH: {
1058 struct cam_periph_map_info mapinfo;
1059 struct cam_path *old_path;
1060
1061 /*
1062 * We can't deal with physical addresses for this
1063 * type of transaction.
1064 */
1065 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1066 error = EINVAL;
1067 break;
1068 }
1069
1070 /*
1071 * Save this in case the caller had it set to
1072 * something in particular.
1073 */
1074 old_path = inccb->ccb_h.path;
1075
1076 /*
1077 * We really don't need a path for the matching
1078 * code. The path is needed because of the
1079 * debugging statements in xpt_action(). They
1080 * assume that the CCB has a valid path.
1081 */
1082 inccb->ccb_h.path = xpt_periph->path;
1083
1084 bzero(&mapinfo, sizeof(mapinfo));
1085
1086 /*
1087 * Map the pattern and match buffers into kernel
1088 * virtual address space.
1089 */
1090 error = cam_periph_mapmem(inccb, &mapinfo);
1091
1092 if (error) {
1093 inccb->ccb_h.path = old_path;
1094 break;
1095 }
1096
1097 /*
1098 * This is an immediate CCB, we can send it on directly.
1099 */
1100 xpt_action(inccb);
1101
1102 /*
1103 * Map the buffers back into user space.
1104 */
1105 cam_periph_unmapmem(inccb, &mapinfo);
1106
1107 inccb->ccb_h.path = old_path;
1108
1109 error = 0;
1110 break;
1111 }
1112 default:
1113 error = ENOTSUP;
1114 break;
1115 }
1116 break;
1117 }
1118 /*
1119 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1120 * with the periphal driver name and unit name filled in. The other
1121 * fields don't really matter as input. The passthrough driver name
1122 * ("pass"), and unit number are passed back in the ccb. The current
1123 * device generation number, and the index into the device peripheral
1124 * driver list, and the status are also passed back. Note that
1125 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1126 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1127 * (or rather should be) impossible for the device peripheral driver
1128 * list to change since we look at the whole thing in one pass, and
4e01b467 1129 * we do it within a critical section.
984263bc
MD
1130 *
1131 */
1132 case CAMGETPASSTHRU: {
1133 union ccb *ccb;
1134 struct cam_periph *periph;
1135 struct periph_driver **p_drv;
1136 char *name;
1137 int unit;
1138 int cur_generation;
1139 int base_periph_found;
1140 int splbreaknum;
984263bc
MD
1141
1142 ccb = (union ccb *)addr;
1143 unit = ccb->cgdl.unit_number;
1144 name = ccb->cgdl.periph_name;
1145 /*
4e01b467
MD
1146 * Every 100 devices, we want to call splz() to check for
1147 * and allow the software interrupt handler a chance to run.
1148 *
984263bc
MD
1149 * Most systems won't run into this check, but this should
1150 * avoid starvation in the software interrupt handler in
1151 * large systems.
1152 */
1153 splbreaknum = 100;
1154
1155 ccb = (union ccb *)addr;
1156
1157 base_periph_found = 0;
1158
1159 /*
1160 * Sanity check -- make sure we don't get a null peripheral
1161 * driver name.
1162 */
1163 if (*ccb->cgdl.periph_name == '\0') {
1164 error = EINVAL;
1165 break;
1166 }
1167
1168 /* Keep the list from changing while we traverse it */
4e01b467 1169 crit_enter();
984263bc
MD
1170ptstartover:
1171 cur_generation = xsoftc.generation;
1172
1173 /* first find our driver in the list of drivers */
dc62b251 1174 SET_FOREACH(p_drv, periphdriver_set) {
984263bc
MD
1175 if (strcmp((*p_drv)->driver_name, name) == 0)
1176 break;
dc62b251 1177 }
984263bc
MD
1178
1179 if (*p_drv == NULL) {
4e01b467 1180 crit_exit();
984263bc
MD
1181 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1182 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1183 *ccb->cgdl.periph_name = '\0';
1184 ccb->cgdl.unit_number = 0;
1185 error = ENOENT;
1186 break;
1187 }
1188
1189 /*
1190 * Run through every peripheral instance of this driver
1191 * and check to see whether it matches the unit passed
1192 * in by the user. If it does, get out of the loops and
1193 * find the passthrough driver associated with that
1194 * peripheral driver.
1195 */
1196 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1197 periph = TAILQ_NEXT(periph, unit_links)) {
1198
1199 if (periph->unit_number == unit) {
1200 break;
1201 } else if (--splbreaknum == 0) {
4e01b467 1202 splz();
984263bc
MD
1203 splbreaknum = 100;
1204 if (cur_generation != xsoftc.generation)
1205 goto ptstartover;
1206 }
1207 }
1208 /*
1209 * If we found the peripheral driver that the user passed
1210 * in, go through all of the peripheral drivers for that
1211 * particular device and look for a passthrough driver.
1212 */
1213 if (periph != NULL) {
1214 struct cam_ed *device;
1215 int i;
1216
1217 base_periph_found = 1;
1218 device = periph->path->device;
1219 for (i = 0, periph = device->periphs.slh_first;
1220 periph != NULL;
1221 periph = periph->periph_links.sle_next, i++) {
1222 /*
1223 * Check to see whether we have a
1224 * passthrough device or not.
1225 */
1226 if (strcmp(periph->periph_name, "pass") == 0) {
1227 /*
1228 * Fill in the getdevlist fields.
1229 */
1230 strcpy(ccb->cgdl.periph_name,
1231 periph->periph_name);
1232 ccb->cgdl.unit_number =
1233 periph->unit_number;
1234 if (periph->periph_links.sle_next)
1235 ccb->cgdl.status =
1236 CAM_GDEVLIST_MORE_DEVS;
1237 else
1238 ccb->cgdl.status =
1239 CAM_GDEVLIST_LAST_DEVICE;
1240 ccb->cgdl.generation =
1241 device->generation;
1242 ccb->cgdl.index = i;
1243 /*
1244 * Fill in some CCB header fields
1245 * that the user may want.
1246 */
1247 ccb->ccb_h.path_id =
1248 periph->path->bus->path_id;
1249 ccb->ccb_h.target_id =
1250 periph->path->target->target_id;
1251 ccb->ccb_h.target_lun =
1252 periph->path->device->lun_id;
1253 ccb->ccb_h.status = CAM_REQ_CMP;
1254 break;
1255 }
1256 }
1257 }
1258
1259 /*
1260 * If the periph is null here, one of two things has
1261 * happened. The first possibility is that we couldn't
1262 * find the unit number of the particular peripheral driver
1263 * that the user is asking about. e.g. the user asks for
1264 * the passthrough driver for "da11". We find the list of
1265 * "da" peripherals all right, but there is no unit 11.
1266 * The other possibility is that we went through the list
1267 * of peripheral drivers attached to the device structure,
1268 * but didn't find one with the name "pass". Either way,
1269 * we return ENOENT, since we couldn't find something.
1270 */
1271 if (periph == NULL) {
1272 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1273 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1274 *ccb->cgdl.periph_name = '\0';
1275 ccb->cgdl.unit_number = 0;
1276 error = ENOENT;
1277 /*
1278 * It is unfortunate that this is even necessary,
1279 * but there are many, many clueless users out there.
1280 * If this is true, the user is looking for the
1281 * passthrough driver, but doesn't have one in his
1282 * kernel.
1283 */
1284 if (base_periph_found == 1) {
1285 printf("xptioctl: pass driver is not in the "
1286 "kernel\n");
1287 printf("xptioctl: put \"device pass0\" in "
1288 "your kernel config file\n");
1289 }
1290 }
4e01b467 1291 crit_exit();
984263bc
MD
1292 break;
1293 }
1294 default:
1295 error = ENOTTY;
1296 break;
1297 }
1298
1299 return(error);
1300}
1301
1302/* Functions accessed by the peripheral drivers */
1303static void
1304xpt_init(dummy)
1305 void *dummy;
1306{
1307 struct cam_sim *xpt_sim;
1308 struct cam_path *path;
1309 struct cam_devq *devq;
1310 cam_status status;
1311
1312 TAILQ_INIT(&xpt_busses);
1313 TAILQ_INIT(&cam_bioq);
1314 TAILQ_INIT(&cam_netq);
1315 SLIST_INIT(&ccb_freeq);
1316 STAILQ_INIT(&highpowerq);
1317
1318 /*
1319 * The xpt layer is, itself, the equivelent of a SIM.
1320 * Allow 16 ccbs in the ccb pool for it. This should
1321 * give decent parallelism when we probe busses and
1322 * perform other XPT functions.
1323 */
1324 devq = cam_simq_alloc(16);
1325 xpt_sim = cam_sim_alloc(xptaction,
1326 xptpoll,
1327 "xpt",
1328 /*softc*/NULL,
1329 /*unit*/0,
1330 /*max_dev_transactions*/0,
1331 /*max_tagged_dev_transactions*/0,
1332 devq);
3aed1355 1333 cam_simq_release(devq);
984263bc
MD
1334 xpt_max_ccbs = 16;
1335
1336 xpt_bus_register(xpt_sim, /*bus #*/0);
1337
1338 /*
1339 * Looking at the XPT from the SIM layer, the XPT is
1340 * the equivelent of a peripheral driver. Allocate
1341 * a peripheral driver entry for us.
1342 */
1343 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1344 CAM_TARGET_WILDCARD,
1345 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1346 printf("xpt_init: xpt_create_path failed with status %#x,"
1347 " failing attach\n", status);
1348 return;
1349 }
1350
1351 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1352 path, NULL, 0, NULL);
1353 xpt_free_path(path);
1354
1355 xpt_sim->softc = xpt_periph;
1356
1357 /*
1358 * Register a callback for when interrupts are enabled.
1359 */
898d961b
MD
1360 xpt_config_hook = malloc(sizeof(struct intr_config_hook),
1361 M_TEMP, M_INTWAIT | M_ZERO);
984263bc 1362 xpt_config_hook->ich_func = xpt_config;
a1e26a0c 1363 xpt_config_hook->ich_desc = "xpt";
984263bc
MD
1364 if (config_intrhook_establish(xpt_config_hook) != 0) {
1365 free (xpt_config_hook, M_TEMP);
1366 printf("xpt_init: config_intrhook_establish failed "
1367 "- failing attach\n");
1368 }
1369
1370 /* Install our software interrupt handlers */
45d76888
MD
1371 register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet", NULL);
1372 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
984263bc
MD
1373}
1374
1375static cam_status
1376xptregister(struct cam_periph *periph, void *arg)
1377{
1378 if (periph == NULL) {
1379 printf("xptregister: periph was NULL!!\n");
1380 return(CAM_REQ_CMP_ERR);
1381 }
1382
1383 periph->softc = NULL;
1384
1385 xpt_periph = periph;
1386
1387 return(CAM_REQ_CMP);
1388}
1389
1390int32_t
1391xpt_add_periph(struct cam_periph *periph)
1392{
1393 struct cam_ed *device;
1394 int32_t status;
1395 struct periph_list *periph_head;
1396
1397 device = periph->path->device;
1398
1399 periph_head = &device->periphs;
1400
1401 status = CAM_REQ_CMP;
1402
1403 if (device != NULL) {
984263bc
MD
1404 /*
1405 * Make room for this peripheral
1406 * so it will fit in the queue
1407 * when it's scheduled to run
1408 */
4e01b467 1409 crit_enter();
984263bc
MD
1410 status = camq_resize(&device->drvq,
1411 device->drvq.array_size + 1);
1412
1413 device->generation++;
1414
1415 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
4e01b467 1416 crit_exit();
984263bc
MD
1417 }
1418
1419 xsoftc.generation++;
1420
1421 return (status);
1422}
1423
1424void
1425xpt_remove_periph(struct cam_periph *periph)
1426{
1427 struct cam_ed *device;
1428
1429 device = periph->path->device;
1430
1431 if (device != NULL) {
984263bc
MD
1432 struct periph_list *periph_head;
1433
1434 periph_head = &device->periphs;
1435
1436 /* Release the slot for this peripheral */
4e01b467 1437 crit_enter();
984263bc
MD
1438 camq_resize(&device->drvq, device->drvq.array_size - 1);
1439
1440 device->generation++;
1441
1442 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
4e01b467 1443 crit_exit();
984263bc
MD
1444 }
1445
1446 xsoftc.generation++;
1447
1448}
1449
1450void
1451xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1452{
984263bc
MD
1453 u_int mb;
1454 struct cam_path *path;
1455 struct ccb_trans_settings cts;
1456
1457 path = periph->path;
1458 /*
1459 * To ensure that this is printed in one piece,
1460 * mask out CAM interrupts.
1461 */
4e01b467 1462 crit_enter();
984263bc
MD
1463 printf("%s%d at %s%d bus %d target %d lun %d\n",
1464 periph->periph_name, periph->unit_number,
1465 path->bus->sim->sim_name,
1466 path->bus->sim->unit_number,
1467 path->bus->sim->bus_id,
1468 path->target->target_id,
1469 path->device->lun_id);
1470 printf("%s%d: ", periph->periph_name, periph->unit_number);
1471 scsi_print_inquiry(&path->device->inq_data);
1472 if ((bootverbose)
1473 && (path->device->serial_num_len > 0)) {
1474 /* Don't wrap the screen - print only the first 60 chars */
1475 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1476 periph->unit_number, path->device->serial_num);
1477 }
1478 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1479 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1480 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1481 xpt_action((union ccb*)&cts);
1482 if (cts.ccb_h.status == CAM_REQ_CMP) {
1483 u_int speed;
1484 u_int freq;
1485
1486 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1487 && cts.sync_offset != 0) {
1488 freq = scsi_calc_syncsrate(cts.sync_period);
1489 speed = freq;
1490 } else {
1491 struct ccb_pathinq cpi;
1492
1493 /* Ask the SIM for its base transfer speed */
1494 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1495 cpi.ccb_h.func_code = XPT_PATH_INQ;
1496 xpt_action((union ccb *)&cpi);
1497
1498 speed = cpi.base_transfer_speed;
1499 freq = 0;
1500 }
1501 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1502 speed *= (0x01 << cts.bus_width);
1503 mb = speed / 1000;
1504 if (mb > 0)
1505 printf("%s%d: %d.%03dMB/s transfers",
1506 periph->periph_name, periph->unit_number,
1507 mb, speed % 1000);
1508 else
1509 printf("%s%d: %dKB/s transfers", periph->periph_name,
1510 periph->unit_number, speed);
1511 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1512 && cts.sync_offset != 0) {
1513 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1514 freq % 1000, cts.sync_offset);
1515 }
1516 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1517 && cts.bus_width > 0) {
1518 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1519 && cts.sync_offset != 0) {
1520 printf(", ");
1521 } else {
1522 printf(" (");
1523 }
1524 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1525 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1526 && cts.sync_offset != 0) {
1527 printf(")");
1528 }
1529
1530 if (path->device->inq_flags & SID_CmdQue
1531 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1532 printf(", Tagged Queueing Enabled");
1533 }
1534
1535 printf("\n");
1536 } else if (path->device->inq_flags & SID_CmdQue
1537 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1538 printf("%s%d: Tagged Queueing Enabled\n",
1539 periph->periph_name, periph->unit_number);
1540 }
1541
1542 /*
1543 * We only want to print the caller's announce string if they've
1544 * passed one in..
1545 */
1546 if (announce_string != NULL)
1547 printf("%s%d: %s\n", periph->periph_name,
1548 periph->unit_number, announce_string);
4e01b467 1549 crit_exit();
984263bc
MD
1550}
1551
1552
1553static dev_match_ret
1554xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1555 struct cam_eb *bus)
1556{
1557 dev_match_ret retval;
1558 int i;
1559
1560 retval = DM_RET_NONE;
1561
1562 /*
1563 * If we aren't given something to match against, that's an error.
1564 */
1565 if (bus == NULL)
1566 return(DM_RET_ERROR);
1567
1568 /*
1569 * If there are no match entries, then this bus matches no
1570 * matter what.
1571 */
1572 if ((patterns == NULL) || (num_patterns == 0))
1573 return(DM_RET_DESCEND | DM_RET_COPY);
1574
1575 for (i = 0; i < num_patterns; i++) {
1576 struct bus_match_pattern *cur_pattern;
1577
1578 /*
1579 * If the pattern in question isn't for a bus node, we
1580 * aren't interested. However, we do indicate to the
1581 * calling routine that we should continue descending the
1582 * tree, since the user wants to match against lower-level
1583 * EDT elements.
1584 */
1585 if (patterns[i].type != DEV_MATCH_BUS) {
1586 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1587 retval |= DM_RET_DESCEND;
1588 continue;
1589 }
1590
1591 cur_pattern = &patterns[i].pattern.bus_pattern;
1592
1593 /*
1594 * If they want to match any bus node, we give them any
1595 * device node.
1596 */
1597 if (cur_pattern->flags == BUS_MATCH_ANY) {
1598 /* set the copy flag */
1599 retval |= DM_RET_COPY;
1600
1601 /*
1602 * If we've already decided on an action, go ahead
1603 * and return.
1604 */
1605 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1606 return(retval);
1607 }
1608
1609 /*
1610 * Not sure why someone would do this...
1611 */
1612 if (cur_pattern->flags == BUS_MATCH_NONE)
1613 continue;
1614
1615 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1616 && (cur_pattern->path_id != bus->path_id))
1617 continue;
1618
1619 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1620 && (cur_pattern->bus_id != bus->sim->bus_id))
1621 continue;
1622
1623 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1624 && (cur_pattern->unit_number != bus->sim->unit_number))
1625 continue;
1626
1627 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1628 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1629 DEV_IDLEN) != 0))
1630 continue;
1631
1632 /*
1633 * If we get to this point, the user definitely wants
1634 * information on this bus. So tell the caller to copy the
1635 * data out.
1636 */
1637 retval |= DM_RET_COPY;
1638
1639 /*
1640 * If the return action has been set to descend, then we
1641 * know that we've already seen a non-bus matching
1642 * expression, therefore we need to further descend the tree.
1643 * This won't change by continuing around the loop, so we
1644 * go ahead and return. If we haven't seen a non-bus
1645 * matching expression, we keep going around the loop until
1646 * we exhaust the matching expressions. We'll set the stop
1647 * flag once we fall out of the loop.
1648 */
1649 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1650 return(retval);
1651 }
1652
1653 /*
1654 * If the return action hasn't been set to descend yet, that means
1655 * we haven't seen anything other than bus matching patterns. So
1656 * tell the caller to stop descending the tree -- the user doesn't
1657 * want to match against lower level tree elements.
1658 */
1659 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1660 retval |= DM_RET_STOP;
1661
1662 return(retval);
1663}
1664
1665static dev_match_ret
1666xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1667 struct cam_ed *device)
1668{
1669 dev_match_ret retval;
1670 int i;
1671
1672 retval = DM_RET_NONE;
1673
1674 /*
1675 * If we aren't given something to match against, that's an error.
1676 */
1677 if (device == NULL)
1678 return(DM_RET_ERROR);
1679
1680 /*
1681 * If there are no match entries, then this device matches no
1682 * matter what.
1683 */
1684 if ((patterns == NULL) || (patterns == 0))
1685 return(DM_RET_DESCEND | DM_RET_COPY);
1686
1687 for (i = 0; i < num_patterns; i++) {
1688 struct device_match_pattern *cur_pattern;
1689
1690 /*
1691 * If the pattern in question isn't for a device node, we
1692 * aren't interested.
1693 */
1694 if (patterns[i].type != DEV_MATCH_DEVICE) {
1695 if ((patterns[i].type == DEV_MATCH_PERIPH)
1696 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1697 retval |= DM_RET_DESCEND;
1698 continue;
1699 }
1700
1701 cur_pattern = &patterns[i].pattern.device_pattern;
1702
1703 /*
1704 * If they want to match any device node, we give them any
1705 * device node.
1706 */
1707 if (cur_pattern->flags == DEV_MATCH_ANY) {
1708 /* set the copy flag */
1709 retval |= DM_RET_COPY;
1710
1711
1712 /*
1713 * If we've already decided on an action, go ahead
1714 * and return.
1715 */
1716 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1717 return(retval);
1718 }
1719
1720 /*
1721 * Not sure why someone would do this...
1722 */
1723 if (cur_pattern->flags == DEV_MATCH_NONE)
1724 continue;
1725
1726 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1727 && (cur_pattern->path_id != device->target->bus->path_id))
1728 continue;
1729
1730 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1731 && (cur_pattern->target_id != device->target->target_id))
1732 continue;
1733
1734 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1735 && (cur_pattern->target_lun != device->lun_id))
1736 continue;
1737
1738 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1739 && (cam_quirkmatch((caddr_t)&device->inq_data,
1740 (caddr_t)&cur_pattern->inq_pat,
1741 1, sizeof(cur_pattern->inq_pat),
1742 scsi_static_inquiry_match) == NULL))
1743 continue;
1744
1745 /*
1746 * If we get to this point, the user definitely wants
1747 * information on this device. So tell the caller to copy
1748 * the data out.
1749 */
1750 retval |= DM_RET_COPY;
1751
1752 /*
1753 * If the return action has been set to descend, then we
1754 * know that we've already seen a peripheral matching
1755 * expression, therefore we need to further descend the tree.
1756 * This won't change by continuing around the loop, so we
1757 * go ahead and return. If we haven't seen a peripheral
1758 * matching expression, we keep going around the loop until
1759 * we exhaust the matching expressions. We'll set the stop
1760 * flag once we fall out of the loop.
1761 */
1762 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1763 return(retval);
1764 }
1765
1766 /*
1767 * If the return action hasn't been set to descend yet, that means
1768 * we haven't seen any peripheral matching patterns. So tell the
1769 * caller to stop descending the tree -- the user doesn't want to
1770 * match against lower level tree elements.
1771 */
1772 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1773 retval |= DM_RET_STOP;
1774
1775 return(retval);
1776}
1777
1778/*
1779 * Match a single peripheral against any number of match patterns.
1780 */
1781static dev_match_ret
1782xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1783 struct cam_periph *periph)
1784{
1785 dev_match_ret retval;
1786 int i;
1787
1788 /*
1789 * If we aren't given something to match against, that's an error.
1790 */
1791 if (periph == NULL)
1792 return(DM_RET_ERROR);
1793
1794 /*
1795 * If there are no match entries, then this peripheral matches no
1796 * matter what.
1797 */
1798 if ((patterns == NULL) || (num_patterns == 0))
1799 return(DM_RET_STOP | DM_RET_COPY);
1800
1801 /*
1802 * There aren't any nodes below a peripheral node, so there's no
1803 * reason to descend the tree any further.
1804 */
1805 retval = DM_RET_STOP;
1806
1807 for (i = 0; i < num_patterns; i++) {
1808 struct periph_match_pattern *cur_pattern;
1809
1810 /*
1811 * If the pattern in question isn't for a peripheral, we
1812 * aren't interested.
1813 */
1814 if (patterns[i].type != DEV_MATCH_PERIPH)
1815 continue;
1816
1817 cur_pattern = &patterns[i].pattern.periph_pattern;
1818
1819 /*
1820 * If they want to match on anything, then we will do so.
1821 */
1822 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1823 /* set the copy flag */
1824 retval |= DM_RET_COPY;
1825
1826 /*
1827 * We've already set the return action to stop,
1828 * since there are no nodes below peripherals in
1829 * the tree.
1830 */
1831 return(retval);
1832 }
1833
1834 /*
1835 * Not sure why someone would do this...
1836 */
1837 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1838 continue;
1839
1840 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1841 && (cur_pattern->path_id != periph->path->bus->path_id))
1842 continue;
1843
1844 /*
1845 * For the target and lun id's, we have to make sure the
1846 * target and lun pointers aren't NULL. The xpt peripheral
1847 * has a wildcard target and device.
1848 */
1849 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1850 && ((periph->path->target == NULL)
1851 ||(cur_pattern->target_id != periph->path->target->target_id)))
1852 continue;
1853
1854 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1855 && ((periph->path->device == NULL)
1856 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1857 continue;
1858
1859 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1860 && (cur_pattern->unit_number != periph->unit_number))
1861 continue;
1862
1863 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1864 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1865 DEV_IDLEN) != 0))
1866 continue;
1867
1868 /*
1869 * If we get to this point, the user definitely wants
1870 * information on this peripheral. So tell the caller to
1871 * copy the data out.
1872 */
1873 retval |= DM_RET_COPY;
1874
1875 /*
1876 * The return action has already been set to stop, since
1877 * peripherals don't have any nodes below them in the EDT.
1878 */
1879 return(retval);
1880 }
1881
1882 /*
1883 * If we get to this point, the peripheral that was passed in
1884 * doesn't match any of the patterns.
1885 */
1886 return(retval);
1887}
1888
1889static int
1890xptedtbusfunc(struct cam_eb *bus, void *arg)
1891{
1892 struct ccb_dev_match *cdm;
1893 dev_match_ret retval;
1894
1895 cdm = (struct ccb_dev_match *)arg;
1896
1897 /*
1898 * If our position is for something deeper in the tree, that means
1899 * that we've already seen this node. So, we keep going down.
1900 */
1901 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1902 && (cdm->pos.cookie.bus == bus)
1903 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1904 && (cdm->pos.cookie.target != NULL))
1905 retval = DM_RET_DESCEND;
1906 else
1907 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1908
1909 /*
1910 * If we got an error, bail out of the search.
1911 */
1912 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1913 cdm->status = CAM_DEV_MATCH_ERROR;
1914 return(0);
1915 }
1916
1917 /*
1918 * If the copy flag is set, copy this bus out.
1919 */
1920 if (retval & DM_RET_COPY) {
1921 int spaceleft, j;
1922
1923 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1924 sizeof(struct dev_match_result));
1925
1926 /*
1927 * If we don't have enough space to put in another
1928 * match result, save our position and tell the
1929 * user there are more devices to check.
1930 */
1931 if (spaceleft < sizeof(struct dev_match_result)) {
1932 bzero(&cdm->pos, sizeof(cdm->pos));
1933 cdm->pos.position_type =
1934 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1935
1936 cdm->pos.cookie.bus = bus;
1937 cdm->pos.generations[CAM_BUS_GENERATION]=
1938 bus_generation;
1939 cdm->status = CAM_DEV_MATCH_MORE;
1940 return(0);
1941 }
1942 j = cdm->num_matches;
1943 cdm->num_matches++;
1944 cdm->matches[j].type = DEV_MATCH_BUS;
1945 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1946 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1947 cdm->matches[j].result.bus_result.unit_number =
1948 bus->sim->unit_number;
1949 strncpy(cdm->matches[j].result.bus_result.dev_name,
1950 bus->sim->sim_name, DEV_IDLEN);
1951 }
1952
1953 /*
1954 * If the user is only interested in busses, there's no
1955 * reason to descend to the next level in the tree.
1956 */
1957 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1958 return(1);
1959
1960 /*
1961 * If there is a target generation recorded, check it to
1962 * make sure the target list hasn't changed.
1963 */
1964 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1965 && (bus == cdm->pos.cookie.bus)
1966 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1967 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1968 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1969 bus->generation)) {
1970 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1971 return(0);
1972 }
1973
1974 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1975 && (cdm->pos.cookie.bus == bus)
1976 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1977 && (cdm->pos.cookie.target != NULL))
1978 return(xpttargettraverse(bus,
1979 (struct cam_et *)cdm->pos.cookie.target,
1980 xptedttargetfunc, arg));
1981 else
1982 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1983}
1984
1985static int
1986xptedttargetfunc(struct cam_et *target, void *arg)
1987{
1988 struct ccb_dev_match *cdm;
1989
1990 cdm = (struct ccb_dev_match *)arg;
1991
1992 /*
1993 * If there is a device list generation recorded, check it to
1994 * make sure the device list hasn't changed.
1995 */
1996 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1997 && (cdm->pos.cookie.bus == target->bus)
1998 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1999 && (cdm->pos.cookie.target == target)
2000 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2001 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2002 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2003 target->generation)) {
2004 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2005 return(0);
2006 }
2007
2008 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2009 && (cdm->pos.cookie.bus == target->bus)
2010 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2011 && (cdm->pos.cookie.target == target)
2012 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2013 && (cdm->pos.cookie.device != NULL))
2014 return(xptdevicetraverse(target,
2015 (struct cam_ed *)cdm->pos.cookie.device,
2016 xptedtdevicefunc, arg));
2017 else
2018 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2019}
2020
2021static int
2022xptedtdevicefunc(struct cam_ed *device, void *arg)
2023{
2024
2025 struct ccb_dev_match *cdm;
2026 dev_match_ret retval;
2027
2028 cdm = (struct ccb_dev_match *)arg;
2029
2030 /*
2031 * If our position is for something deeper in the tree, that means
2032 * that we've already seen this node. So, we keep going down.
2033 */
2034 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2035 && (cdm->pos.cookie.device == device)
2036 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2037 && (cdm->pos.cookie.periph != NULL))
2038 retval = DM_RET_DESCEND;
2039 else
2040 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2041 device);
2042
2043 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2044 cdm->status = CAM_DEV_MATCH_ERROR;
2045 return(0);
2046 }
2047
2048 /*
2049 * If the copy flag is set, copy this device out.
2050 */
2051 if (retval & DM_RET_COPY) {
2052 int spaceleft, j;
2053
2054 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2055 sizeof(struct dev_match_result));
2056
2057 /*
2058 * If we don't have enough space to put in another
2059 * match result, save our position and tell the
2060 * user there are more devices to check.
2061 */
2062 if (spaceleft < sizeof(struct dev_match_result)) {
2063 bzero(&cdm->pos, sizeof(cdm->pos));
2064 cdm->pos.position_type =
2065 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2066 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2067
2068 cdm->pos.cookie.bus = device->target->bus;
2069 cdm->pos.generations[CAM_BUS_GENERATION]=
2070 bus_generation;
2071 cdm->pos.cookie.target = device->target;
2072 cdm->pos.generations[CAM_TARGET_GENERATION] =
2073 device->target->bus->generation;
2074 cdm->pos.cookie.device = device;
2075 cdm->pos.generations[CAM_DEV_GENERATION] =
2076 device->target->generation;
2077 cdm->status = CAM_DEV_MATCH_MORE;
2078 return(0);
2079 }
2080 j = cdm->num_matches;
2081 cdm->num_matches++;
2082 cdm->matches[j].type = DEV_MATCH_DEVICE;
2083 cdm->matches[j].result.device_result.path_id =
2084 device->target->bus->path_id;
2085 cdm->matches[j].result.device_result.target_id =
2086 device->target->target_id;
2087 cdm->matches[j].result.device_result.target_lun =
2088 device->lun_id;
2089 bcopy(&device->inq_data,
2090 &cdm->matches[j].result.device_result.inq_data,
2091 sizeof(struct scsi_inquiry_data));
2092
2093 /* Let the user know whether this device is unconfigured */
2094 if (device->flags & CAM_DEV_UNCONFIGURED)
2095 cdm->matches[j].result.device_result.flags =
2096 DEV_RESULT_UNCONFIGURED;
2097 else
2098 cdm->matches[j].result.device_result.flags =
2099 DEV_RESULT_NOFLAG;
2100 }
2101
2102 /*
2103 * If the user isn't interested in peripherals, don't descend
2104 * the tree any further.
2105 */
2106 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2107 return(1);
2108
2109 /*
2110 * If there is a peripheral list generation recorded, make sure
2111 * it hasn't changed.
2112 */
2113 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2114 && (device->target->bus == cdm->pos.cookie.bus)
2115 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2116 && (device->target == cdm->pos.cookie.target)
2117 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2118 && (device == cdm->pos.cookie.device)
2119 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2120 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2121 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2122 device->generation)){
2123 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2124 return(0);
2125 }
2126
2127 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2128 && (cdm->pos.cookie.bus == device->target->bus)
2129 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2130 && (cdm->pos.cookie.target == device->target)
2131 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2132 && (cdm->pos.cookie.device == device)
2133 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2134 && (cdm->pos.cookie.periph != NULL))
2135 return(xptperiphtraverse(device,
2136 (struct cam_periph *)cdm->pos.cookie.periph,
2137 xptedtperiphfunc, arg));
2138 else
2139 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2140}
2141
2142static int
2143xptedtperiphfunc(struct cam_periph *periph, void *arg)
2144{
2145 struct ccb_dev_match *cdm;
2146 dev_match_ret retval;
2147
2148 cdm = (struct ccb_dev_match *)arg;
2149
2150 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2151
2152 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2153 cdm->status = CAM_DEV_MATCH_ERROR;
2154 return(0);
2155 }
2156
2157 /*
2158 * If the copy flag is set, copy this peripheral out.
2159 */
2160 if (retval & DM_RET_COPY) {
2161 int spaceleft, j;
2162
2163 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2164 sizeof(struct dev_match_result));
2165
2166 /*
2167 * If we don't have enough space to put in another
2168 * match result, save our position and tell the
2169 * user there are more devices to check.
2170 */
2171 if (spaceleft < sizeof(struct dev_match_result)) {
2172 bzero(&cdm->pos, sizeof(cdm->pos));
2173 cdm->pos.position_type =
2174 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2175 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2176 CAM_DEV_POS_PERIPH;
2177
2178 cdm->pos.cookie.bus = periph->path->bus;
2179 cdm->pos.generations[CAM_BUS_GENERATION]=
2180 bus_generation;
2181 cdm->pos.cookie.target = periph->path->target;
2182 cdm->pos.generations[CAM_TARGET_GENERATION] =
2183 periph->path->bus->generation;
2184 cdm->pos.cookie.device = periph->path->device;
2185 cdm->pos.generations[CAM_DEV_GENERATION] =
2186 periph->path->target->generation;
2187 cdm->pos.cookie.periph = periph;
2188 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2189 periph->path->device->generation;
2190 cdm->status = CAM_DEV_MATCH_MORE;
2191 return(0);
2192 }
2193
2194 j = cdm->num_matches;
2195 cdm->num_matches++;
2196 cdm->matches[j].type = DEV_MATCH_PERIPH;
2197 cdm->matches[j].result.periph_result.path_id =
2198 periph->path->bus->path_id;
2199 cdm->matches[j].result.periph_result.target_id =
2200 periph->path->target->target_id;
2201 cdm->matches[j].result.periph_result.target_lun =
2202 periph->path->device->lun_id;
2203 cdm->matches[j].result.periph_result.unit_number =
2204 periph->unit_number;
2205 strncpy(cdm->matches[j].result.periph_result.periph_name,
2206 periph->periph_name, DEV_IDLEN);
2207 }
2208
2209 return(1);
2210}
2211
2212static int
2213xptedtmatch(struct ccb_dev_match *cdm)
2214{
2215 int ret;
2216
2217 cdm->num_matches = 0;
2218
2219 /*
2220 * Check the bus list generation. If it has changed, the user
2221 * needs to reset everything and start over.
2222 */
2223 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2224 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2225 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2226 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2227 return(0);
2228 }
2229
2230 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2231 && (cdm->pos.cookie.bus != NULL))
2232 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2233 xptedtbusfunc, cdm);
2234 else
2235 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2236
2237 /*
2238 * If we get back 0, that means that we had to stop before fully
2239 * traversing the EDT. It also means that one of the subroutines
2240 * has set the status field to the proper value. If we get back 1,
2241 * we've fully traversed the EDT and copied out any matching entries.
2242 */
2243 if (ret == 1)
2244 cdm->status = CAM_DEV_MATCH_LAST;
2245
2246 return(ret);
2247}
2248
2249static int
2250xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2251{
2252 struct ccb_dev_match *cdm;
2253
2254 cdm = (struct ccb_dev_match *)arg;
2255
2256 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2257 && (cdm->pos.cookie.pdrv == pdrv)
2258 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2259 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2260 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2261 (*pdrv)->generation)) {
2262 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2263 return(0);
2264 }
2265
2266 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2267 && (cdm->pos.cookie.pdrv == pdrv)
2268 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2269 && (cdm->pos.cookie.periph != NULL))
2270 return(xptpdperiphtraverse(pdrv,
2271 (struct cam_periph *)cdm->pos.cookie.periph,
2272 xptplistperiphfunc, arg));
2273 else
2274 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2275}
2276
2277static int
2278xptplistperiphfunc(struct cam_periph *periph, void *arg)
2279{
2280 struct ccb_dev_match *cdm;
2281 dev_match_ret retval;
2282
2283 cdm = (struct ccb_dev_match *)arg;
2284
2285 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2286
2287 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2288 cdm->status = CAM_DEV_MATCH_ERROR;
2289 return(0);
2290 }
2291
2292 /*
2293 * If the copy flag is set, copy this peripheral out.
2294 */
2295 if (retval & DM_RET_COPY) {
2296 int spaceleft, j;
2297
2298 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2299 sizeof(struct dev_match_result));
2300
2301 /*
2302 * If we don't have enough space to put in another
2303 * match result, save our position and tell the
2304 * user there are more devices to check.
2305 */
2306 if (spaceleft < sizeof(struct dev_match_result)) {
2307 struct periph_driver **pdrv;
2308
2309 pdrv = NULL;
2310 bzero(&cdm->pos, sizeof(cdm->pos));
2311 cdm->pos.position_type =
2312 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2313 CAM_DEV_POS_PERIPH;
2314
2315 /*
2316 * This may look a bit non-sensical, but it is
2317 * actually quite logical. There are very few
2318 * peripheral drivers, and bloating every peripheral
2319 * structure with a pointer back to its parent
2320 * peripheral driver linker set entry would cost
2321 * more in the long run than doing this quick lookup.
2322 */
dc62b251 2323 SET_FOREACH(pdrv, periphdriver_set) {
984263bc
MD
2324 if (strcmp((*pdrv)->driver_name,
2325 periph->periph_name) == 0)
2326 break;
2327 }
2328
beac9491 2329 if (*pdrv == NULL) {
984263bc
MD
2330 cdm->status = CAM_DEV_MATCH_ERROR;
2331 return(0);
2332 }
2333
2334 cdm->pos.cookie.pdrv = pdrv;
2335 /*
2336 * The periph generation slot does double duty, as
2337 * does the periph pointer slot. They are used for
2338 * both edt and pdrv lookups and positioning.
2339 */
2340 cdm->pos.cookie.periph = periph;
2341 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2342 (*pdrv)->generation;
2343 cdm->status = CAM_DEV_MATCH_MORE;
2344 return(0);
2345 }
2346
2347 j = cdm->num_matches;
2348 cdm->num_matches++;
2349 cdm->matches[j].type = DEV_MATCH_PERIPH;
2350 cdm->matches[j].result.periph_result.path_id =
2351 periph->path->bus->path_id;
2352
2353 /*
2354 * The transport layer peripheral doesn't have a target or
2355 * lun.
2356 */
2357 if (periph->path->target)
2358 cdm->matches[j].result.periph_result.target_id =
2359 periph->path->target->target_id;
2360 else
2361 cdm->matches[j].result.periph_result.target_id = -1;
2362
2363 if (periph->path->device)
2364 cdm->matches[j].result.periph_result.target_lun =
2365 periph->path->device->lun_id;
2366 else
2367 cdm->matches[j].result.periph_result.target_lun = -1;
2368
2369 cdm->matches[j].result.periph_result.unit_number =
2370 periph->unit_number;
2371 strncpy(cdm->matches[j].result.periph_result.periph_name,
2372 periph->periph_name, DEV_IDLEN);
2373 }
2374
2375 return(1);
2376}
2377
2378static int
2379xptperiphlistmatch(struct ccb_dev_match *cdm)
2380{
2381 int ret;
2382
2383 cdm->num_matches = 0;
2384
2385 /*
2386 * At this point in the edt traversal function, we check the bus
2387 * list generation to make sure that no busses have been added or
2388 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2389 * For the peripheral driver list traversal function, however, we
2390 * don't have to worry about new peripheral driver types coming or
2391 * going; they're in a linker set, and therefore can't change
2392 * without a recompile.
2393 */
2394
2395 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2396 && (cdm->pos.cookie.pdrv != NULL))
2397 ret = xptpdrvtraverse(
2398 (struct periph_driver **)cdm->pos.cookie.pdrv,
2399 xptplistpdrvfunc, cdm);
2400 else
2401 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2402
2403 /*
2404 * If we get back 0, that means that we had to stop before fully
2405 * traversing the peripheral driver tree. It also means that one of
2406 * the subroutines has set the status field to the proper value. If
2407 * we get back 1, we've fully traversed the EDT and copied out any
2408 * matching entries.
2409 */
2410 if (ret == 1)
2411 cdm->status = CAM_DEV_MATCH_LAST;
2412
2413 return(ret);
2414}
2415
2416static int
2417xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2418{
2419 struct cam_eb *bus, *next_bus;
2420 int retval;
2421
2422 retval = 1;
2423
2424 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2425 bus != NULL;
2426 bus = next_bus) {
2427 next_bus = TAILQ_NEXT(bus, links);
2428
2429 retval = tr_func(bus, arg);
2430 if (retval == 0)
2431 return(retval);
2432 }
2433
2434 return(retval);
2435}
2436
2437static int
2438xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2439 xpt_targetfunc_t *tr_func, void *arg)
2440{
2441 struct cam_et *target, *next_target;
2442 int retval;
2443
2444 retval = 1;
2445 for (target = (start_target ? start_target :
2446 TAILQ_FIRST(&bus->et_entries));
2447 target != NULL; target = next_target) {
2448
2449 next_target = TAILQ_NEXT(target, links);
2450
2451 retval = tr_func(target, arg);
2452
2453 if (retval == 0)
2454 return(retval);
2455 }
2456
2457 return(retval);
2458}
2459
2460static int
2461xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2462 xpt_devicefunc_t *tr_func, void *arg)
2463{
2464 struct cam_ed *device, *next_device;
2465 int retval;
2466
2467 retval = 1;
2468 for (device = (start_device ? start_device :
2469 TAILQ_FIRST(&target->ed_entries));
2470 device != NULL;
2471 device = next_device) {
2472
2473 next_device = TAILQ_NEXT(device, links);
2474
2475 retval = tr_func(device, arg);
2476
2477 if (retval == 0)
2478 return(retval);
2479 }
2480
2481 return(retval);
2482}
2483
2484static int
2485xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2486 xpt_periphfunc_t *tr_func, void *arg)
2487{
2488 struct cam_periph *periph, *next_periph;
2489 int retval;
2490
2491 retval = 1;
2492
2493 for (periph = (start_periph ? start_periph :
2494 SLIST_FIRST(&device->periphs));
2495 periph != NULL;
2496 periph = next_periph) {
2497
2498 next_periph = SLIST_NEXT(periph, periph_links);
2499
2500 retval = tr_func(periph, arg);
2501 if (retval == 0)
2502 return(retval);
2503 }
2504
2505 return(retval);
2506}
2507
2508static int
2509xptpdrvtraverse(struct periph_driver **start_pdrv,
2510 xpt_pdrvfunc_t *tr_func, void *arg)
2511{
2512 struct periph_driver **pdrv;
2513 int retval;
2514
2515 retval = 1;
2516
2517 /*
2518 * We don't traverse the peripheral driver list like we do the
2519 * other lists, because it is a linker set, and therefore cannot be
2520 * changed during runtime. If the peripheral driver list is ever
2521 * re-done to be something other than a linker set (i.e. it can
2522 * change while the system is running), the list traversal should
2523 * be modified to work like the other traversal functions.
2524 */
98593f25
MD
2525 SET_FOREACH(pdrv, periphdriver_set) {
2526 if (start_pdrv == NULL || start_pdrv == pdrv) {
2527 retval = tr_func(pdrv, arg);
dc62b251
MD
2528 if (retval == 0)
2529 return(retval);
98593f25 2530 start_pdrv = NULL; /* traverse remainder */
dc62b251 2531 }
984263bc 2532 }
984263bc
MD
2533 return(retval);
2534}
2535
2536static int
2537xptpdperiphtraverse(struct periph_driver **pdrv,
2538 struct cam_periph *start_periph,
2539 xpt_periphfunc_t *tr_func, void *arg)
2540{
2541 struct cam_periph *periph, *next_periph;
2542 int retval;
2543
2544 retval = 1;
2545
2546 for (periph = (start_periph ? start_periph :
2547 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2548 periph = next_periph) {
2549
2550 next_periph = TAILQ_NEXT(periph, unit_links);
2551
2552 retval = tr_func(periph, arg);
2553 if (retval == 0)
2554 return(retval);
2555 }
2556 return(retval);
2557}
2558
2559static int
2560xptdefbusfunc(struct cam_eb *bus, void *arg)
2561{
2562 struct xpt_traverse_config *tr_config;
2563
2564 tr_config = (struct xpt_traverse_config *)arg;
2565
2566 if (tr_config->depth == XPT_DEPTH_BUS) {
2567 xpt_busfunc_t *tr_func;
2568
2569 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2570
2571 return(tr_func(bus, tr_config->tr_arg));
2572 } else
2573 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2574}
2575
2576static int
2577xptdeftargetfunc(struct cam_et *target, void *arg)
2578{
2579 struct xpt_traverse_config *tr_config;
2580
2581 tr_config = (struct xpt_traverse_config *)arg;
2582
2583 if (tr_config->depth == XPT_DEPTH_TARGET) {
2584 xpt_targetfunc_t *tr_func;
2585
2586 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2587
2588 return(tr_func(target, tr_config->tr_arg));
2589 } else
2590 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2591}
2592
2593static int
2594xptdefdevicefunc(struct cam_ed *device, void *arg)
2595{
2596 struct xpt_traverse_config *tr_config;
2597
2598 tr_config = (struct xpt_traverse_config *)arg;
2599
2600 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2601 xpt_devicefunc_t *tr_func;
2602
2603 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2604
2605 return(tr_func(device, tr_config->tr_arg));
2606 } else
2607 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2608}
2609
2610static int
2611xptdefperiphfunc(struct cam_periph *periph, void *arg)
2612{
2613 struct xpt_traverse_config *tr_config;
2614 xpt_periphfunc_t *tr_func;
2615
2616 tr_config = (struct xpt_traverse_config *)arg;
2617
2618 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2619
2620 /*
2621 * Unlike the other default functions, we don't check for depth
2622 * here. The peripheral driver level is the last level in the EDT,
2623 * so if we're here, we should execute the function in question.
2624 */
2625 return(tr_func(periph, tr_config->tr_arg));
2626}
2627
2628/*
2629 * Execute the given function for every bus in the EDT.
2630 */
2631static int
2632xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2633{
2634 struct xpt_traverse_config tr_config;
2635
2636 tr_config.depth = XPT_DEPTH_BUS;
2637 tr_config.tr_func = tr_func;
2638 tr_config.tr_arg = arg;
2639
2640 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2641}
2642
2643#ifdef notusedyet
2644/*
2645 * Execute the given function for every target in the EDT.
2646 */
2647static int
2648xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2649{
2650 struct xpt_traverse_config tr_config;
2651
2652 tr_config.depth = XPT_DEPTH_TARGET;
2653 tr_config.tr_func = tr_func;
2654 tr_config.tr_arg = arg;
2655
2656 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2657}
2658#endif /* notusedyet */
2659
2660/*
2661 * Execute the given function for every device in the EDT.
2662 */
2663static int
2664xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2665{
2666 struct xpt_traverse_config tr_config;
2667
2668 tr_config.depth = XPT_DEPTH_DEVICE;
2669 tr_config.tr_func = tr_func;
2670 tr_config.tr_arg = arg;
2671
2672 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2673}
2674
2675#ifdef notusedyet
2676/*
2677 * Execute the given function for every peripheral in the EDT.
2678 */
2679static int
2680xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2681{
2682 struct xpt_traverse_config tr_config;
2683
2684 tr_config.depth = XPT_DEPTH_PERIPH;
2685 tr_config.tr_func = tr_func;
2686 tr_config.tr_arg = arg;
2687
2688 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2689}
2690#endif /* notusedyet */
2691
2692static int
2693xptsetasyncfunc(struct cam_ed *device, void *arg)
2694{
2695 struct cam_path path;
2696 struct ccb_getdev cgd;
2697 struct async_node *cur_entry;
2698
2699 cur_entry = (struct async_node *)arg;
2700
2701 /*
2702 * Don't report unconfigured devices (Wildcard devs,
2703 * devices only for target mode, device instances
2704 * that have been invalidated but are waiting for
2705 * their last reference count to be released).
2706 */
2707 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2708 return (1);
2709
2710 xpt_compile_path(&path,
2711 NULL,
2712 device->target->bus->path_id,
2713 device->target->target_id,
2714 device->lun_id);
2715 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2716 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2717 xpt_action((union ccb *)&cgd);
2718 cur_entry->callback(cur_entry->callback_arg,
2719 AC_FOUND_DEVICE,
2720 &path, &cgd);
2721 xpt_release_path(&path);
2722
2723 return(1);
2724}
2725
2726static int
2727xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2728{
2729 struct cam_path path;
2730 struct ccb_pathinq cpi;
2731 struct async_node *cur_entry;
2732
2733 cur_entry = (struct async_node *)arg;
2734
2735 xpt_compile_path(&path, /*periph*/NULL,
2736 bus->sim->path_id,
2737 CAM_TARGET_WILDCARD,
2738 CAM_LUN_WILDCARD);
2739 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2740 cpi.ccb_h.func_code = XPT_PATH_INQ;
2741 xpt_action((union ccb *)&cpi);
2742 cur_entry->callback(cur_entry->callback_arg,
2743 AC_PATH_REGISTERED,
2744 &path, &cpi);
2745 xpt_release_path(&path);
2746
2747 return(1);
2748}
2749
2750void
2751xpt_action(union ccb *start_ccb)
2752{
984263bc
MD
2753 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2754
2755 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2756
4e01b467
MD
2757 crit_enter();
2758
984263bc
MD
2759 switch (start_ccb->ccb_h.func_code) {
2760 case XPT_SCSI_IO:
2761 {
2762#ifdef CAMDEBUG
2763 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2764 struct cam_path *path;
2765
2766 path = start_ccb->ccb_h.path;
2767#endif
2768
2769 /*
2770 * For the sake of compatibility with SCSI-1
2771 * devices that may not understand the identify
2772 * message, we include lun information in the
2773 * second byte of all commands. SCSI-1 specifies
2774 * that luns are a 3 bit value and reserves only 3
2775 * bits for lun information in the CDB. Later
2776 * revisions of the SCSI spec allow for more than 8
2777 * luns, but have deprecated lun information in the
2778 * CDB. So, if the lun won't fit, we must omit.
2779 *
2780 * Also be aware that during initial probing for devices,
2781 * the inquiry information is unknown but initialized to 0.
2782 * This means that this code will be exercised while probing
2783 * devices with an ANSI revision greater than 2.
2784 */
2785 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2786 && start_ccb->ccb_h.target_lun < 8
2787 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2788
2789 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2790 start_ccb->ccb_h.target_lun << 5;
2791 }
2792 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2793 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2794 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2795 &path->device->inq_data),
2796 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2797 cdb_str, sizeof(cdb_str))));
2798 /* FALLTHROUGH */
2799 }
2800 case XPT_TARGET_IO:
2801 case XPT_CONT_TARGET_IO:
2802 start_ccb->csio.sense_resid = 0;
2803 start_ccb->csio.resid = 0;
2804 /* FALLTHROUGH */
2805 case XPT_RESET_DEV:
2806 case XPT_ENG_EXEC:
2807 {
2808 struct cam_path *path;
984263bc
MD
2809 int runq;
2810
2811 path = start_ccb->ccb_h.path;
984263bc
MD
2812
2813 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2814 if (path->device->qfrozen_cnt == 0)
2815 runq = xpt_schedule_dev_sendq(path->bus, path->device);
2816 else
2817 runq = 0;
984263bc
MD
2818 if (runq != 0)
2819 xpt_run_dev_sendq(path->bus);
2820 break;
2821 }
2822 case XPT_SET_TRAN_SETTINGS:
2823 {
2824 xpt_set_transfer_settings(&start_ccb->cts,
2825 start_ccb->ccb_h.path->device,
2826 /*async_update*/FALSE);
2827 break;
2828 }
2829 case XPT_CALC_GEOMETRY:
2830 {
2831 struct cam_sim *sim;
2832
2833 /* Filter out garbage */
2834 if (start_ccb->ccg.block_size == 0
2835 || start_ccb->ccg.volume_size == 0) {
2836 start_ccb->ccg.cylinders = 0;
2837 start_ccb->ccg.heads = 0;
2838 start_ccb->ccg.secs_per_track = 0;
2839 start_ccb->ccb_h.status = CAM_REQ_CMP;
2840 break;
2841 }
984263bc
MD
2842 sim = start_ccb->ccb_h.path->bus->sim;
2843 (*(sim->sim_action))(sim, start_ccb);
2844 break;
2845 }
2846 case XPT_ABORT:
2847 {
2848 union ccb* abort_ccb;
984263bc
MD
2849
2850 abort_ccb = start_ccb->cab.abort_ccb;
2851 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2852
2853 if (abort_ccb->ccb_h.pinfo.index >= 0) {
2854 struct cam_ccbq *ccbq;
2855
2856 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2857 cam_ccbq_remove_ccb(ccbq, abort_ccb);
2858 abort_ccb->ccb_h.status =
2859 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2860 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
984263bc 2861 xpt_done(abort_ccb);
984263bc
MD
2862 start_ccb->ccb_h.status = CAM_REQ_CMP;
2863 break;
2864 }
2865 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2866 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2867 /*
2868 * We've caught this ccb en route to
2869 * the SIM. Flag it for abort and the
2870 * SIM will do so just before starting
2871 * real work on the CCB.
2872 */
2873 abort_ccb->ccb_h.status =
2874 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2875 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2876 start_ccb->ccb_h.status = CAM_REQ_CMP;
2877 break;
2878 }
2879 }
2880 if (XPT_FC_IS_QUEUED(abort_ccb)
2881 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2882 /*
2883 * It's already completed but waiting
2884 * for our SWI to get to it.
2885 */
2886 start_ccb->ccb_h.status = CAM_UA_ABORT;
2887 break;
2888 }
2889 /*
2890 * If we weren't able to take care of the abort request
2891 * in the XPT, pass the request down to the SIM for processing.
2892 */
2893 /* FALLTHROUGH */
2894 }
2895 case XPT_ACCEPT_TARGET_IO:
2896 case XPT_EN_LUN:
2897 case XPT_IMMED_NOTIFY:
2898 case XPT_NOTIFY_ACK:
2899 case XPT_GET_TRAN_SETTINGS:
2900 case XPT_RESET_BUS:
2901 {
2902 struct cam_sim *sim;
2903
2904 sim = start_ccb->ccb_h.path->bus->sim;
2905 (*(sim->sim_action))(sim, start_ccb);
2906 break;
2907 }
2908 case XPT_PATH_INQ:
2909 {
2910 struct cam_sim *sim;
2911
2912 sim = start_ccb->ccb_h.path->bus->sim;
2913 (*(sim->sim_action))(sim, start_ccb);
2914 break;
2915 }
2916 case XPT_PATH_STATS:
2917 start_ccb->cpis.last_reset =
2918 start_ccb->ccb_h.path->bus->last_reset;
2919 start_ccb->ccb_h.status = CAM_REQ_CMP;
2920 break;
2921 case XPT_GDEV_TYPE:
2922 {
2923 struct cam_ed *dev;
984263bc
MD
2924
2925 dev = start_ccb->ccb_h.path->device;
984263bc
MD
2926 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2927 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2928 } else {
2929 struct ccb_getdev *cgd;
2930 struct cam_eb *bus;
2931 struct cam_et *tar;
2932
2933 cgd = &start_ccb->cgd;
2934 bus = cgd->ccb_h.path->bus;
2935 tar = cgd->ccb_h.path->target;
2936 cgd->inq_data = dev->inq_data;
2937 cgd->ccb_h.status = CAM_REQ_CMP;
2938 cgd->serial_num_len = dev->serial_num_len;
2939 if ((dev->serial_num_len > 0)
2940 && (dev->serial_num != NULL))
2941 bcopy(dev->serial_num, cgd->serial_num,
2942 dev->serial_num_len);
2943 }
984263bc
MD
2944 break;
2945 }
2946 case XPT_GDEV_STATS:
2947 {
2948 struct cam_ed *dev;
984263bc
MD
2949
2950 dev = start_ccb->ccb_h.path->device;
984263bc
MD
2951 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2952 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2953 } else {
2954 struct ccb_getdevstats *cgds;
2955 struct cam_eb *bus;
2956 struct cam_et *tar;
2957
2958 cgds = &start_ccb->cgds;
2959 bus = cgds->ccb_h.path->bus;
2960 tar = cgds->ccb_h.path->target;
2961 cgds->dev_openings = dev->ccbq.dev_openings;
2962 cgds->dev_active = dev->ccbq.dev_active;
2963 cgds->devq_openings = dev->ccbq.devq_openings;
2964 cgds->devq_queued = dev->ccbq.queue.entries;
2965 cgds->held = dev->ccbq.held;
2966 cgds->last_reset = tar->last_reset;
2967 cgds->maxtags = dev->quirk->maxtags;
2968 cgds->mintags = dev->quirk->mintags;
2969 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2970 cgds->last_reset = bus->last_reset;
2971 cgds->ccb_h.status = CAM_REQ_CMP;
2972 }
984263bc
MD
2973 break;
2974 }
2975 case XPT_GDEVLIST:
2976 {
2977 struct cam_periph *nperiph;
2978 struct periph_list *periph_head;
2979 struct ccb_getdevlist *cgdl;
2980 int i;
984263bc
MD
2981 struct cam_ed *device;
2982 int found;
2983
2984
2985 found = 0;
2986
2987 /*
2988 * Don't want anyone mucking with our data.
2989 */
984263bc
MD
2990 device = start_ccb->ccb_h.path->device;
2991 periph_head = &device->periphs;
2992 cgdl = &start_ccb->cgdl;
2993
2994 /*
2995 * Check and see if the list has changed since the user
2996 * last requested a list member. If so, tell them that the
2997 * list has changed, and therefore they need to start over
2998 * from the beginning.
2999 */
3000 if ((cgdl->index != 0) &&
3001 (cgdl->generation != device->generation)) {
3002 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
984263bc
MD
3003 break;
3004 }
3005
3006 /*
3007 * Traverse the list of peripherals and attempt to find
3008 * the requested peripheral.
3009 */
3010 for (nperiph = periph_head->slh_first, i = 0;
3011 (nperiph != NULL) && (i <= cgdl->index);
3012 nperiph = nperiph->periph_links.sle_next, i++) {
3013 if (i == cgdl->index) {
3014 strncpy(cgdl->periph_name,
3015 nperiph->periph_name,
3016 DEV_IDLEN);
3017 cgdl->unit_number = nperiph->unit_number;
3018 found = 1;
3019 }
3020 }
3021 if (found == 0) {
3022 cgdl->status = CAM_GDEVLIST_ERROR;
984263bc
MD
3023 break;
3024 }
3025
3026 if (nperiph == NULL)
3027 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3028 else
3029 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3030
3031 cgdl->index++;
3032 cgdl->generation = device->generation;
3033
984263bc
MD
3034 cgdl->ccb_h.status = CAM_REQ_CMP;
3035 break;
3036 }
3037 case XPT_DEV_MATCH:
3038 {
984263bc
MD
3039 dev_pos_type position_type;
3040 struct ccb_dev_match *cdm;
3041 int ret;
3042
3043 cdm = &start_ccb->cdm;
3044
3045 /*
3046 * Prevent EDT changes while we traverse it.
3047 */
984263bc
MD
3048 /*
3049 * There are two ways of getting at information in the EDT.
3050 * The first way is via the primary EDT tree. It starts
3051 * with a list of busses, then a list of targets on a bus,
3052 * then devices/luns on a target, and then peripherals on a
3053 * device/lun. The "other" way is by the peripheral driver
3054 * lists. The peripheral driver lists are organized by
3055 * peripheral driver. (obviously) So it makes sense to
3056 * use the peripheral driver list if the user is looking
3057 * for something like "da1", or all "da" devices. If the
3058 * user is looking for something on a particular bus/target
3059 * or lun, it's generally better to go through the EDT tree.
3060 */
3061
3062 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3063 position_type = cdm->pos.position_type;
3064 else {
3065 int i;
3066
3067 position_type = CAM_DEV_POS_NONE;
3068
3069 for (i = 0; i < cdm->num_patterns; i++) {
3070 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3071 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3072 position_type = CAM_DEV_POS_EDT;
3073 break;
3074 }
3075 }
3076
3077 if (cdm->num_patterns == 0)
3078 position_type = CAM_DEV_POS_EDT;
3079 else if (position_type == CAM_DEV_POS_NONE)
3080 position_type = CAM_DEV_POS_PDRV;
3081 }
3082
3083 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3084 case CAM_DEV_POS_EDT:
3085 ret = xptedtmatch(cdm);
3086 break;
3087 case CAM_DEV_POS_PDRV:
3088 ret = xptperiphlistmatch(cdm);
3089 break;
3090 default:
3091 cdm->status = CAM_DEV_MATCH_ERROR;
3092 break;
3093 }
3094
984263bc
MD
3095 if (cdm->status == CAM_DEV_MATCH_ERROR)
3096 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3097 else
3098 start_ccb->ccb_h.status = CAM_REQ_CMP;
3099
3100 break;
3101 }
3102 case XPT_SASYNC_CB:
3103 {
3104 struct ccb_setasync *csa;
3105 struct async_node *cur_entry;
3106 struct async_list *async_head;
3107 u_int32_t added;
984263bc
MD
3108
3109 csa = &start_ccb->csa;
3110 added = csa->event_enable;
3111 async_head = &csa->ccb_h.path->device->asyncs;
3112
3113 /*
3114 * If there is already an entry for us, simply
3115 * update it.
3116 */
984263bc
MD
3117 cur_entry = SLIST_FIRST(async_head);
3118 while (cur_entry != NULL) {
3119 if ((cur_entry->callback_arg == csa->callback_arg)
3120 && (cur_entry->callback == csa->callback))
3121 break;
3122 cur_entry = SLIST_NEXT(cur_entry, links);
3123 }
3124
3125 if (cur_entry != NULL) {
3126 /*
3127 * If the request has no flags set,
3128 * remove the entry.
3129 */
3130 added &= ~cur_entry->event_enable;
3131 if (csa->event_enable == 0) {
3132 SLIST_REMOVE(async_head, cur_entry,
3133 async_node, links);
3134 csa->ccb_h.path->device->refcount--;
3135 free(cur_entry, M_DEVBUF);
3136 } else {
3137 cur_entry->event_enable = csa->event_enable;
3138 }
3139 } else {
898d961b
MD
3140 cur_entry = malloc(sizeof(*cur_entry),
3141 M_DEVBUF, M_INTWAIT);
984263bc
MD
3142 cur_entry->event_enable = csa->event_enable;
3143 cur_entry->callback_arg = csa->callback_arg;
3144 cur_entry->callback = csa->callback;
3145 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3146 csa->ccb_h.path->device->refcount++;
3147 }
3148
3149 if ((added & AC_FOUND_DEVICE) != 0) {
3150 /*
3151 * Get this peripheral up to date with all
3152 * the currently existing devices.
3153 */
3154 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3155 }
3156 if ((added & AC_PATH_REGISTERED) != 0) {
3157 /*
3158 * Get this peripheral up to date with all
3159 * the currently existing busses.
3160 */
3161 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3162 }
984263bc
MD
3163 start_ccb->ccb_h.status = CAM_REQ_CMP;
3164 break;
3165 }
3166 case XPT_REL_SIMQ:
3167 {
3168 struct ccb_relsim *crs;
3169 struct cam_ed *dev;
984263bc
MD
3170
3171 crs = &start_ccb->crs;
3172 dev = crs->ccb_h.path->device;
3173 if (dev == NULL) {
3174
3175 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3176 break;
3177 }
3178
984263bc
MD
3179 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3180
3181 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3182
3183 /* Don't ever go below one opening */
3184 if (crs->openings > 0) {
3185 xpt_dev_ccbq_resize(crs->ccb_h.path,
3186 crs->openings);
3187
3188 if (bootverbose) {
3189 xpt_print_path(crs->ccb_h.path);
3190 printf("tagged openings "
3191 "now %d\n",
3192 crs->openings);
3193 }
3194 }
3195 }
3196 }
3197
3198 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3199
3200 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3201
3202 /*
3203 * Just extend the old timeout and decrement
3204 * the freeze count so that a single timeout
3205 * is sufficient for releasing the queue.
3206 */
3207 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
eaa58895 3208 callout_stop(&dev->c_handle);
984263bc
MD
3209 } else {
3210
3211 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3212 }
3213
eaa58895
JS
3214 callout_reset(&dev->c_handle,
3215 (crs->release_timeout * hz) / 1000,
3216 xpt_release_devq_timeout, dev);
984263bc
MD
3217
3218 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3219
3220 }
3221
3222 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3223
3224 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3225 /*
3226 * Decrement the freeze count so that a single
3227 * completion is still sufficient to unfreeze
3228 * the queue.
3229 */
3230 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3231 } else {
3232
3233 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3234 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3235 }
3236 }
3237
3238 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3239
3240 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3241 || (dev->ccbq.dev_active == 0)) {
3242
3243 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3244 } else {
3245
3246 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3247 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3248 }
3249 }
984263bc
MD
3250
3251 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3252
3253 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3254 /*run_queue*/TRUE);
3255 }
3256 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3257 start_ccb->ccb_h.status = CAM_REQ_CMP;
3258 break;
3259 }
3260 case XPT_SCAN_BUS:
3261 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3262 break;
3263 case XPT_SCAN_LUN:
3264 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3265 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3266 start_ccb);
3267 break;
3268 case XPT_DEBUG: {
3269#ifdef CAMDEBUG
984263bc
MD
3270#ifdef CAM_DEBUG_DELAY
3271 cam_debug_delay = CAM_DEBUG_DELAY;
3272#endif
3273 cam_dflags = start_ccb->cdbg.flags;
3274 if (cam_dpath != NULL) {
3275 xpt_free_path(cam_dpath);
3276 cam_dpath = NULL;
3277 }
3278
3279 if (cam_dflags != CAM_DEBUG_NONE) {
3280 if (xpt_create_path(&cam_dpath, xpt_periph,
3281 start_ccb->ccb_h.path_id,
3282 start_ccb->ccb_h.target_id,
3283 start_ccb->ccb_h.target_lun) !=
3284 CAM_REQ_CMP) {
3285 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3286 cam_dflags = CAM_DEBUG_NONE;
3287 } else {
3288 start_ccb->ccb_h.status = CAM_REQ_CMP;
3289 xpt_print_path(cam_dpath);
3290 printf("debugging flags now %x\n", cam_dflags);
3291 }
3292 } else {
3293 cam_dpath = NULL;
3294 start_ccb->ccb_h.status = CAM_REQ_CMP;
3295 }
984263bc
MD
3296#else /* !CAMDEBUG */
3297 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3298#endif /* CAMDEBUG */
3299 break;
3300 }
3301 case XPT_NOOP:
3302 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3303 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3304 start_ccb->ccb_h.status = CAM_REQ_CMP;
3305 break;
3306 default:
3307 case XPT_SDEV_TYPE:
3308 case XPT_TERM_IO:
3309 case XPT_ENG_INQ:
3310 /* XXX Implement */
3311 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3312 break;
3313 }
4e01b467 3314 crit_exit();
984263bc
MD
3315}
3316
3317void
3318xpt_polled_action(union ccb *start_ccb)
3319{
984263bc
MD
3320 u_int32_t timeout;
3321 struct cam_sim *sim;
3322 struct cam_devq *devq;
3323 struct cam_ed *dev;
3324
3325 timeout = start_ccb->ccb_h.timeout;
3326 sim = start_ccb->ccb_h.path->bus->sim;
3327 devq = sim->devq;
3328 dev = start_ccb->ccb_h.path->device;
3329
4e01b467 3330 crit_enter();
984263bc
MD
3331
3332 /*
3333 * Steal an opening so that no other queued requests
3334 * can get it before us while we simulate interrupts.
3335 */
3336 dev->ccbq.devq_openings--;
3337 dev->ccbq.dev_openings--;
3338
3339 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3340 && (--timeout > 0)) {
3341 DELAY(1000);
3342 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3343 swi_camnet(NULL);
3344 swi_cambio(NULL);
984263bc
MD
3345 }
3346
3347 dev->ccbq.devq_openings++;
3348 dev->ccbq.dev_openings++;
3349
3350 if (timeout != 0) {
3351 xpt_action(start_ccb);
3352 while(--timeout > 0) {
3353 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3354 swi_camnet(NULL);
3355 swi_cambio(NULL);
984263bc
MD
3356 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3357 != CAM_REQ_INPROG)
3358 break;
3359 DELAY(1000);
3360 }
3361 if (timeout == 0) {
3362 /*
3363 * XXX Is it worth adding a sim_timeout entry
3364 * point so we can attempt recovery? If
3365 * this is only used for dumps, I don't think
3366 * it is.
3367 */
3368 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3369 }
3370 } else {
3371 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3372 }
4e01b467 3373 crit_exit();
984263bc
MD
3374}
3375
3376/*
3377 * Schedule a peripheral driver to receive a ccb when it's
3378 * target device has space for more transactions.
3379 */
3380void
3381xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3382{
3383 struct cam_ed *device;
984263bc
MD
3384 int runq;
3385
3386 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3387 device = perph->path->device;
4e01b467 3388 crit_enter();
984263bc
MD
3389 if (periph_is_queued(perph)) {
3390 /* Simply reorder based on new priority */
3391 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3392 (" change priority to %d\n", new_priority));
3393 if (new_priority < perph->pinfo.priority) {
3394 camq_change_priority(&device->drvq,
3395 perph->pinfo.index,
3396 new_priority);
3397 }
3398 runq = 0;
3399 } else {
3400 /* New entry on the queue */
3401 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3402 (" added periph to queue\n"));
3403 perph->pinfo.priority = new_priority;
3404 perph->pinfo.generation = ++device->drvq.generation;
3405 camq_insert(&device->drvq, &perph->pinfo);
3406 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3407 }
4e01b467 3408 crit_exit();
984263bc
MD
3409 if (runq != 0) {
3410 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3411 (" calling xpt_run_devq\n"));
3412 xpt_run_dev_allocq(perph->path->bus);
3413 }
3414}
3415
3416
3417/*
3418 * Schedule a device to run on a given queue.
3419 * If the device was inserted as a new entry on the queue,
3420 * return 1 meaning the device queue should be run. If we
3421 * were already queued, implying someone else has already
3422 * started the queue, return 0 so the caller doesn't attempt
4e01b467 3423 * to run the queue. Must be run in a critical section.
984263bc
MD
3424 */
3425static int
3426xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3427 u_int32_t new_priority)
3428{
3429 int retval;
3430 u_int32_t old_priority;
3431
3432 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3433
3434 old_priority = pinfo->priority;
3435
3436 /*
3437 * Are we already queued?
3438 */
3439 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3440 /* Simply reorder based on new priority */
3441 if (new_priority < old_priority) {
3442 camq_change_priority(queue, pinfo->index,
3443 new_priority);
3444 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3445 ("changed priority to %d\n",
3446 new_priority));
3447 }
3448 retval = 0;
3449 } else {
3450 /* New entry on the queue */
3451 if (new_priority < old_priority)
3452 pinfo->priority = new_priority;
3453
3454 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3455 ("Inserting onto queue\n"));
3456 pinfo->generation = ++queue->generation;
3457 camq_insert(queue, pinfo);
3458 retval = 1;
3459 }
3460 return (retval);
3461}
3462
3463static void
3464xpt_run_dev_allocq(struct cam_eb *bus)
3465{
3466 struct cam_devq *devq;
984263bc
MD
3467
3468 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3469 devq = bus->sim->devq;
3470
3471 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3472 (" qfrozen_cnt == 0x%x, entries == %d, "
3473 "openings == %d, active == %d\n",
3474 devq->alloc_queue.qfrozen_cnt,
3475 devq->alloc_queue.entries,
3476 devq->alloc_openings,
3477 devq->alloc_active));
3478
4e01b467 3479 crit_enter();
984263bc
MD
3480 devq->alloc_queue.qfrozen_cnt++;
3481 while ((devq->alloc_queue.entries > 0)
3482 && (devq->alloc_openings > 0)
3483 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3484 struct cam_ed_qinfo *qinfo;
3485 struct cam_ed *device;
3486 union ccb *work_ccb;
3487 struct cam_periph *drv;
3488 struct camq *drvq;
3489
3490 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3491 CAMQ_HEAD);
3492 device = qinfo->device;
3493
3494 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3495 ("running device %p\n", device));
3496
3497 drvq = &device->drvq;
3498
3499#ifdef CAMDEBUG
3500 if (drvq->entries <= 0) {
3501 panic("xpt_run_dev_allocq: "
3502 "Device on queue without any work to do");
3503 }
3504#endif
3505 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3506 devq->alloc_openings--;
3507 devq->alloc_active++;
3508 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
4e01b467 3509 crit_exit();
984263bc
MD
3510 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3511 drv->pinfo.priority);
3512 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3513 ("calling periph start\n"));
3514 drv->periph_start(drv, work_ccb);
3515 } else {
3516 /*
3517 * Malloc failure in alloc_ccb
3518 */
3519 /*
3520 * XXX add us to a list to be run from free_ccb
3521 * if we don't have any ccbs active on this
3522 * device queue otherwise we may never get run
3523 * again.
3524 */
3525 break;
3526 }
3527
3528 /* Raise IPL for possible insertion and test at top of loop */
4e01b467 3529 crit_enter();
984263bc
MD
3530
3531 if (drvq->entries > 0) {
3532 /* We have more work. Attempt to reschedule */
3533 xpt_schedule_dev_allocq(bus, device);
3534 }
3535 }
3536 devq->alloc_queue.qfrozen_cnt--;
4e01b467 3537 crit_exit();
984263bc
MD
3538}
3539
3540static void
3541xpt_run_dev_sendq(struct cam_eb *bus)
3542{
3543 struct cam_devq *devq;
984263bc
MD
3544
3545 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3546
3547 devq = bus->sim->devq;
3548
4e01b467 3549 crit_enter();
984263bc 3550 devq->send_queue.qfrozen_cnt++;
984263bc
MD
3551 while ((devq->send_queue.entries > 0)
3552 && (devq->send_openings > 0)) {
3553 struct cam_ed_qinfo *qinfo;
3554 struct cam_ed *device;
3555 union ccb *work_ccb;
3556 struct cam_sim *sim;
984263bc 3557
984263bc 3558 if (devq->send_queue.qfrozen_cnt > 1) {
984263bc
MD
3559 break;
3560 }
3561
3562 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3563 CAMQ_HEAD);
3564 device = qinfo->device;
3565
3566 /*
3567 * If the device has been "frozen", don't attempt
3568 * to run it.
3569 */
3570 if (device->qfrozen_cnt > 0) {
984263bc
MD
3571 continue;
3572 }
3573
3574 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3575 ("running device %p\n", device));
3576
3577 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3578 if (work_ccb == NULL) {
3579 printf("device on run queue with no ccbs???\n");
984263bc
MD
3580 continue;
3581 }
3582
3583 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3584
3585 if (num_highpower <= 0) {
3586 /*
3587 * We got a high power command, but we
3588 * don't have any available slots. Freeze
3589 * the device queue until we have a slot
3590 * available.
3591 */
3592 device->qfrozen_cnt++;
3593 STAILQ_INSERT_TAIL(&highpowerq,
3594 &work_ccb->ccb_h,
3595 xpt_links.stqe);
3596
984263bc
MD
3597 continue;
3598 } else {
3599 /*
3600 * Consume a high power slot while
3601 * this ccb runs.
3602 */
3603 num_highpower--;
3604 }
3605 }
3606 devq->active_dev = device;
3607 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3608
3609 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
984263bc
MD
3610
3611 devq->send_openings--;
3612 devq->send_active++;
3613
3614 if (device->ccbq.queue.entries > 0)
3615 xpt_schedule_dev_sendq(bus, device);
3616
3617 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3618 /*
3619 * The client wants to freeze the queue
3620 * after this CCB is sent.
3621 */
984263bc 3622 device->qfrozen_cnt++;
984263bc 3623 }
984263bc
MD
3624
3625 /* In Target mode, the peripheral driver knows best... */
3626 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3627 if ((device->inq_flags & SID_CmdQue) != 0
3628 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3629 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3630 else
3631 /*
3632 * Clear this in case of a retried CCB that
3633 * failed due to a rejected tag.
3634 */
3635 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3636 }
3637
3638 /*
3639 * Device queues can be shared among multiple sim instances
3640 * that reside on different busses. Use the SIM in the queue
3641 * CCB's path, rather than the one in the bus that was passed
3642 * into this function.
3643 */
3644 sim = work_ccb->ccb_h.path->bus->sim;
3645 (*(sim->sim_action))(sim, work_ccb);
3646
984263bc 3647 devq->active_dev = NULL;
984263bc 3648 /* Raise IPL for possible insertion and test at top of loop */
984263bc 3649 }
984263bc 3650 devq->send_queue.qfrozen_cnt--;
4e01b467 3651 crit_exit();
984263bc
MD
3652}
3653
3654/*
3655 * This function merges stuff from the slave ccb into the master ccb, while
3656 * keeping important fields in the master ccb constant.
3657 */
3658void
3659xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3660{
3661 /*
3662 * Pull fields that are valid for peripheral drivers to set
3663 * into the master CCB along with the CCB "payload".
3664 */
3665 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3666 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3667 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3668 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3669 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3670 sizeof(union ccb) - sizeof(struct ccb_hdr));
3671}
3672
3673void
3674xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3675{
3676 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
5a8f60a3 3677 callout_init(&ccb_h->timeout_ch);
984263bc
MD
3678 ccb_h->pinfo.priority = priority;
3679 ccb_h->path = path;
3680 ccb_h->path_id = path->bus->path_id;
3681 if (path->target)
3682 ccb_h->target_id = path->target->target_id;
3683 else
3684 ccb_h->target_id = CAM_TARGET_WILDCARD;
3685 if (path->device) {
3686 ccb_h->target_lun = path->device->lun_id;
3687 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3688 } else {
3689 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3690 }
3691 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3692 ccb_h->flags = 0;
3693}
3694
3695/* Path manipulation functions */
3696cam_status
3697xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3698 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3699{
3700 struct cam_path *path;
3701 cam_status status;
3702
898d961b 3703 path = malloc(sizeof(*path), M_DEVBUF, M_INTWAIT);
984263bc
MD
3704 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3705 if (status != CAM_REQ_CMP) {
3706 free(path, M_DEVBUF);
3707 path = NULL;
3708 }
3709 *new_path_ptr = path;
3710 return (status);
3711}
3712
3713static cam_status
3714xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3715 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3716{
3717 struct cam_eb *bus;
3718 struct cam_et *target;
3719 struct cam_ed *device;
3720 cam_status status;
984263bc
MD
3721
3722 status = CAM_REQ_CMP; /* Completed without error */
3723 target = NULL; /* Wildcarded */
3724 device = NULL; /* Wildcarded */
3725
3726 /*
3727 * We will potentially modify the EDT, so block interrupts
3728 * that may attempt to create cam paths.
3729 */
4e01b467 3730 crit_enter();
984263bc
MD
3731 bus = xpt_find_bus(path_id);
3732 if (bus == NULL) {
3733 status = CAM_PATH_INVALID;
3734 } else {
3735 target = xpt_find_target(bus, target_id);
3736 if (target == NULL) {
3737 /* Create one */
3738 struct cam_et *new_target;
3739
3740 new_target = xpt_alloc_target(bus, target_id);
3741 if (new_target == NULL) {
3742 status = CAM_RESRC_UNAVAIL;
3743 } else {
3744 target = new_target;
3745 }
3746 }
3747 if (target != NULL) {
3748 device = xpt_find_device(target, lun_id);
3749 if (device == NULL) {
3750 /* Create one */
3751 struct cam_ed *new_device;
3752
3753 new_device = xpt_alloc_device(bus,
3754 target,
3755 lun_id);
3756 if (new_device == NULL) {
3757 status = CAM_RESRC_UNAVAIL;
3758 } else {
3759 device = new_device;
3760 }
3761 }
3762 }
3763 }
4e01b467 3764 crit_exit();
984263bc
MD
3765
3766 /*
3767 * Only touch the user's data if we are successful.
3768 */
3769 if (status == CAM_REQ_CMP) {
3770 new_path->periph = perph;
3771 new_path->bus = bus;
3772 new_path->target = target;
3773 new_path->device = device;
3774 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3775 } else {
3776 if (device != NULL)
3777 xpt_release_device(bus, target, device);
3778 if (target != NULL)
3779 xpt_release_target(bus, target);
3780 if (bus != NULL)
3781 xpt_release_bus(bus);
3782 }
3783 return (status);
3784}
3785
3786static void
3787xpt_release_path(struct cam_path *path)
3788{
3789 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3790 if (path->device != NULL) {
3791 xpt_release_device(path->bus, path->target, path->device);
3792 path->device = NULL;
3793 }
3794 if (path->target != NULL) {
3795 xpt_release_target(path->bus, path->target);
3796 path->target = NULL;
3797 }
3798 if (path->bus != NULL) {
3799 xpt_release_bus(path->bus);
3800 path->bus = NULL;
3801 }
3802}
3803
3804void
3805xpt_free_path(struct cam_path *path)
3806{
3807 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3808 xpt_release_path(path);
3809 free(path, M_DEVBUF);
3810}
3811
3812
3813/*
3814 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3815 * in path1, 2 for match with wildcards in path2.
3816 */
3817int
3818xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3819{
3820 int retval = 0;
3821
3822 if (path1->bus != path2->bus) {
3823 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3824 retval = 1;
3825 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3826 retval = 2;
3827 else
3828 return (-1);
3829 }
3830 if (path1->target != path2->target) {
3831 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3832 if (retval == 0)
3833 retval = 1;
3834 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3835 retval = 2;
3836 else
3837 return (-1);
3838 }
3839 if (path1->device != path2->device) {
3840 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3841 if (retval == 0)
3842 retval = 1;
3843 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3844 retval = 2;
3845 else
3846 return (-1);
3847 }
3848 return (retval);
3849}
3850
3851void
3852xpt_print_path(struct cam_path *path)
3853{
3854 if (path == NULL)
3855 printf("(nopath): ");
3856 else {
3857 if (path->periph != NULL)
3858 printf("(%s%d:", path->periph->periph_name,
3859 path->periph->unit_number);
3860 else
3861 printf("(noperiph:");
3862
3863 if (path->bus != NULL)
3864 printf("%s%d:%d:", path->bus->sim->sim_name,
3865 path->bus->sim->unit_number,
3866 path->bus->sim->bus_id);
3867 else
3868 printf("nobus:");
3869
3870 if (path->target != NULL)
3871 printf("%d:", path->target->target_id);
3872 else
3873 printf("X:");
3874
3875 if (path->device != NULL)
3876 printf("%d): ", path->device->lun_id);
3877 else
3878 printf("X): ");
3879 }
3880}
3881
3882path_id_t
3883xpt_path_path_id(struct cam_path *path)
3884{
3885 return(path->bus->path_id);
3886}
3887
3888target_id_t
3889xpt_path_target_id(struct cam_path *path)
3890{
3891 if (path->target != NULL)
3892 return (path->target->target_id);
3893 else
3894 return (CAM_TARGET_WILDCARD);
3895}
3896
3897lun_id_t
3898xpt_path_lun_id(struct cam_path *path)
3899{
3900 if (path->device != NULL)
3901 return (path->device->lun_id);
3902 else
3903 return (CAM_LUN_WILDCARD);
3904}
3905
3906struct cam_sim *
3907xpt_path_sim(struct cam_path *path)
3908{
3909 return (path->bus->sim);
3910}
3911
3912struct cam_periph*
3913xpt_path_periph(struct cam_path *path)
3914{
3915 return (path->periph);
3916}
3917
3918/*
3919 * Release a CAM control block for the caller. Remit the cost of the structure
3920 * to the device referenced by the path. If the this device had no 'credits'
3921 * and peripheral drivers have registered async callbacks for this notification
3922 * call them now.
3923 */
3924void
3925xpt_release_ccb(union ccb *free_ccb)
3926{
984263bc
MD
3927 struct cam_path *path;
3928 struct cam_ed *device;
3929 struct cam_eb *bus;
3930
3931 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3932 path = free_ccb->ccb_h.path;
3933 device = path->device;
3934 bus = path->bus;
4e01b467 3935 crit_enter();
984263bc
MD
3936 cam_ccbq_release_opening(&device->ccbq);
3937 if (xpt_ccb_count > xpt_max_ccbs) {
3938 xpt_free_ccb(free_ccb);
3939 xpt_ccb_count--;
3940 } else {
3941 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
3942 }
3943 bus->sim->devq->alloc_openings++;
3944 bus->sim->devq->alloc_active--;
3945 /* XXX Turn this into an inline function - xpt_run_device?? */
3946 if ((device_is_alloc_queued(device) == 0)
3947 && (device->drvq.entries > 0)) {
3948 xpt_schedule_dev_allocq(bus, device);
3949 }
4e01b467 3950 crit_exit();
984263bc
MD
3951 if (dev_allocq_is_runnable(bus->sim->devq))
3952 xpt_run_dev_allocq(bus);
3953}
3954
3955/* Functions accessed by SIM drivers */
3956
3957/*
3958 * A sim structure, listing the SIM entry points and instance
3959 * identification info is passed to xpt_bus_register to hook the SIM
3960 * into the CAM framework. xpt_bus_register creates a cam_eb entry
3961 * for this new bus and places it in the array of busses and assigns
3962 * it a path_id. The path_id may be influenced by "hard wiring"
3963 * information specified by the user. Once interrupt services are
3964 * availible, the bus will be probed.
3965 */
3966int32_t
3967xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
3968{
3969 struct cam_eb *new_bus;
3970 struct cam_eb *old_bus;
3971 struct ccb_pathinq cpi;
984263bc
MD
3972
3973 sim->bus_id = bus;
898d961b 3974 new_bus = malloc(sizeof(*new_bus), M_DEVBUF, M_INTWAIT);
984263bc
MD
3975
3976 if (strcmp(sim->sim_name, "xpt") != 0) {
984263bc
MD
3977 sim->path_id =
3978 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3979 }
3980
3981 TAILQ_INIT(&new_bus->et_entries);
3982 new_bus->path_id =