The cam_sim structure was being deallocated unconditionally by device
[dragonfly.git] / sys / bus / cam / cam_xpt.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
3aed1355 30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.11 2004/03/15 01:10:30 dillon Exp $
984263bc
MD
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/devicestat.h>
42#include <sys/interrupt.h>
43#include <sys/bus.h>
3aed1355
MD
44#include <sys/thread.h>
45#include <sys/thread2.h>
984263bc
MD
46
47#ifdef PC98
48#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
49#endif
50
51#include <machine/clock.h>
52#include <machine/ipl.h>
53
1f2de5d4
MD
54#include "cam.h"
55#include "cam_ccb.h"
56#include "cam_periph.h"
57#include "cam_sim.h"
58#include "cam_xpt.h"
59#include "cam_xpt_sim.h"
60#include "cam_xpt_periph.h"
61#include "cam_debug.h"
984263bc 62
1f2de5d4
MD
63#include "scsi/scsi_all.h"
64#include "scsi/scsi_message.h"
65#include "scsi/scsi_pass.h"
984263bc
MD
66#include "opt_cam.h"
67
68/* Datastructures internal to the xpt layer */
69
70/*
71 * Definition of an async handler callback block. These are used to add
72 * SIMs and peripherals to the async callback lists.
73 */
74struct async_node {
75 SLIST_ENTRY(async_node) links;
76 u_int32_t event_enable; /* Async Event enables */
77 void (*callback)(void *arg, u_int32_t code,
78 struct cam_path *path, void *args);
79 void *callback_arg;
80};
81
82SLIST_HEAD(async_list, async_node);
83SLIST_HEAD(periph_list, cam_periph);
84static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
85
86/*
87 * This is the maximum number of high powered commands (e.g. start unit)
88 * that can be outstanding at a particular time.
89 */
90#ifndef CAM_MAX_HIGHPOWER
91#define CAM_MAX_HIGHPOWER 4
92#endif
93
94/* number of high powered commands that can go through right now */
95static int num_highpower = CAM_MAX_HIGHPOWER;
96
97/*
98 * Structure for queueing a device in a run queue.
99 * There is one run queue for allocating new ccbs,
100 * and another for sending ccbs to the controller.
101 */
102struct cam_ed_qinfo {
103 cam_pinfo pinfo;
104 struct cam_ed *device;
105};
106
107/*
108 * The CAM EDT (Existing Device Table) contains the device information for
109 * all devices for all busses in the system. The table contains a
110 * cam_ed structure for each device on the bus.
111 */
112struct cam_ed {
113 TAILQ_ENTRY(cam_ed) links;
114 struct cam_ed_qinfo alloc_ccb_entry;
115 struct cam_ed_qinfo send_ccb_entry;
116 struct cam_et *target;
117 lun_id_t lun_id;
118 struct camq drvq; /*
119 * Queue of type drivers wanting to do
120 * work on this device.
121 */
122 struct cam_ccbq ccbq; /* Queue of pending ccbs */
123 struct async_list asyncs; /* Async callback info for this B/T/L */
124 struct periph_list periphs; /* All attached devices */
125 u_int generation; /* Generation number */
126 struct cam_periph *owner; /* Peripheral driver's ownership tag */
127 struct xpt_quirk_entry *quirk; /* Oddities about this device */
128 /* Storage for the inquiry data */
129 struct scsi_inquiry_data inq_data;
130 u_int8_t inq_flags; /*
131 * Current settings for inquiry flags.
132 * This allows us to override settings
133 * like disconnection and tagged
134 * queuing for a device.
135 */
136 u_int8_t queue_flags; /* Queue flags from the control page */
137 u_int8_t serial_num_len;
138 u_int8_t *serial_num;
139 u_int32_t qfrozen_cnt;
140 u_int32_t flags;
141#define CAM_DEV_UNCONFIGURED 0x01
142#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
143#define CAM_DEV_REL_ON_COMPLETE 0x04
144#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
145#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
146#define CAM_DEV_TAG_AFTER_COUNT 0x20
147#define CAM_DEV_INQUIRY_DATA_VALID 0x40
148 u_int32_t tag_delay_count;
149#define CAM_TAG_DELAY_COUNT 5
150 u_int32_t refcount;
151 struct callout_handle c_handle;
152};
153
154/*
155 * Each target is represented by an ET (Existing Target). These
156 * entries are created when a target is successfully probed with an
157 * identify, and removed when a device fails to respond after a number
158 * of retries, or a bus rescan finds the device missing.
159 */
160struct cam_et {
161 TAILQ_HEAD(, cam_ed) ed_entries;
162 TAILQ_ENTRY(cam_et) links;
163 struct cam_eb *bus;
164 target_id_t target_id;
165 u_int32_t refcount;
166 u_int generation;
88c4d2f6 167 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
168};
169
170/*
171 * Each bus is represented by an EB (Existing Bus). These entries
172 * are created by calls to xpt_bus_register and deleted by calls to
173 * xpt_bus_deregister.
174 */
175struct cam_eb {
176 TAILQ_HEAD(, cam_et) et_entries;
177 TAILQ_ENTRY(cam_eb) links;
178 path_id_t path_id;
179 struct cam_sim *sim;
88c4d2f6 180 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
181 u_int32_t flags;
182#define CAM_EB_RUNQ_SCHEDULED 0x01
183 u_int32_t refcount;
184 u_int generation;
185};
186
187struct cam_path {
188 struct cam_periph *periph;
189 struct cam_eb *bus;
190 struct cam_et *target;
191 struct cam_ed *device;
192};
193
194struct xpt_quirk_entry {
195 struct scsi_inquiry_pattern inq_pat;
196 u_int8_t quirks;
197#define CAM_QUIRK_NOLUNS 0x01
198#define CAM_QUIRK_NOSERIAL 0x02
199#define CAM_QUIRK_HILUNS 0x04
200 u_int mintags;
201 u_int maxtags;
202};
203#define CAM_SCSI2_MAXLUN 8
204
205typedef enum {
206 XPT_FLAG_OPEN = 0x01
207} xpt_flags;
208
209struct xpt_softc {
210 xpt_flags flags;
211 u_int32_t generation;
212};
213
214static const char quantum[] = "QUANTUM";
215static const char sony[] = "SONY";
216static const char west_digital[] = "WDIGTL";
217static const char samsung[] = "SAMSUNG";
218static const char seagate[] = "SEAGATE";
219static const char microp[] = "MICROP";
220
221static struct xpt_quirk_entry xpt_quirk_table[] =
222{
223 {
224 /* Reports QUEUE FULL for temporary resource shortages */
225 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
226 /*quirks*/0, /*mintags*/24, /*maxtags*/32
227 },
228 {
229 /* Reports QUEUE FULL for temporary resource shortages */
230 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
231 /*quirks*/0, /*mintags*/24, /*maxtags*/32
232 },
233 {
234 /* Reports QUEUE FULL for temporary resource shortages */
235 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
236 /*quirks*/0, /*mintags*/24, /*maxtags*/32
237 },
238 {
239 /* Broken tagged queuing drive */
240 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
241 /*quirks*/0, /*mintags*/0, /*maxtags*/0
242 },
243 {
244 /* Broken tagged queuing drive */
245 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
246 /*quirks*/0, /*mintags*/0, /*maxtags*/0
247 },
248 {
249 /* Broken tagged queuing drive */
250 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
251 /*quirks*/0, /*mintags*/0, /*maxtags*/0
252 },
253 {
254 /*
255 * Unfortunately, the Quantum Atlas III has the same
256 * problem as the Atlas II drives above.
257 * Reported by: "Johan Granlund" <johan@granlund.nu>
258 *
259 * For future reference, the drive with the problem was:
260 * QUANTUM QM39100TD-SW N1B0
261 *
262 * It's possible that Quantum will fix the problem in later
263 * firmware revisions. If that happens, the quirk entry
264 * will need to be made specific to the firmware revisions
265 * with the problem.
266 *
267 */
268 /* Reports QUEUE FULL for temporary resource shortages */
269 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
270 /*quirks*/0, /*mintags*/24, /*maxtags*/32
271 },
272 {
273 /*
274 * 18 Gig Atlas III, same problem as the 9G version.
275 * Reported by: Andre Albsmeier
276 * <andre.albsmeier@mchp.siemens.de>
277 *
278 * For future reference, the drive with the problem was:
279 * QUANTUM QM318000TD-S N491
280 */
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /*
287 * Broken tagged queuing drive
288 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
289 * and: Martin Renters <martin@tdc.on.ca>
290 */
291 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
292 /*quirks*/0, /*mintags*/0, /*maxtags*/0
293 },
294 /*
295 * The Seagate Medalist Pro drives have very poor write
296 * performance with anything more than 2 tags.
297 *
298 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
299 * Drive: <SEAGATE ST36530N 1444>
300 *
301 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
302 * Drive: <SEAGATE ST34520W 1281>
303 *
304 * No one has actually reported that the 9G version
305 * (ST39140*) of the Medalist Pro has the same problem, but
306 * we're assuming that it does because the 4G and 6.5G
307 * versions of the drive are broken.
308 */
309 {
310 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
311 /*quirks*/0, /*mintags*/2, /*maxtags*/2
312 },
313 {
314 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
315 /*quirks*/0, /*mintags*/2, /*maxtags*/2
316 },
317 {
318 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
319 /*quirks*/0, /*mintags*/2, /*maxtags*/2
320 },
321 {
322 /*
323 * Slow when tagged queueing is enabled. Write performance
324 * steadily drops off with more and more concurrent
325 * transactions. Best sequential write performance with
326 * tagged queueing turned off and write caching turned on.
327 *
328 * PR: kern/10398
329 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
330 * Drive: DCAS-34330 w/ "S65A" firmware.
331 *
332 * The drive with the problem had the "S65A" firmware
333 * revision, and has also been reported (by Stephen J.
334 * Roznowski <sjr@home.net>) for a drive with the "S61A"
335 * firmware revision.
336 *
337 * Although no one has reported problems with the 2 gig
338 * version of the DCAS drive, the assumption is that it
339 * has the same problems as the 4 gig version. Therefore
340 * this quirk entries disables tagged queueing for all
341 * DCAS drives.
342 */
343 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
344 /*quirks*/0, /*mintags*/0, /*maxtags*/0
345 },
346 {
347 /* Broken tagged queuing drive */
348 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 {
352 /* Broken tagged queuing drive */
353 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
354 /*quirks*/0, /*mintags*/0, /*maxtags*/0
355 },
356 {
357 /*
358 * Broken tagged queuing drive.
359 * Submitted by:
360 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
361 * in PR kern/9535
362 */
363 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
364 /*quirks*/0, /*mintags*/0, /*maxtags*/0
365 },
366 {
367 /*
368 * Slow when tagged queueing is enabled. (1.5MB/sec versus
369 * 8MB/sec.)
370 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
371 * Best performance with these drives is achieved with
372 * tagged queueing turned off, and write caching turned on.
373 */
374 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
375 /*quirks*/0, /*mintags*/0, /*maxtags*/0
376 },
377 {
378 /*
379 * Slow when tagged queueing is enabled. (1.5MB/sec versus
380 * 8MB/sec.)
381 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
382 * Best performance with these drives is achieved with
383 * tagged queueing turned off, and write caching turned on.
384 */
385 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
386 /*quirks*/0, /*mintags*/0, /*maxtags*/0
387 },
388 {
389 /*
390 * Doesn't handle queue full condition correctly,
391 * so we need to limit maxtags to what the device
392 * can handle instead of determining this automatically.
393 */
394 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
395 /*quirks*/0, /*mintags*/2, /*maxtags*/32
396 },
397 {
398 /* Really only one LUN */
399 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
400 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
401 },
402 {
403 /* I can't believe we need a quirk for DPT volumes. */
404 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
405 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
406 /*mintags*/0, /*maxtags*/255
407 },
408 {
409 /*
410 * Many Sony CDROM drives don't like multi-LUN probing.
411 */
412 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
413 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
414 },
415 {
416 /*
417 * This drive doesn't like multiple LUN probing.
418 * Submitted by: Parag Patel <parag@cgt.com>
419 */
420 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
421 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
422 },
423 {
424 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
425 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
426 },
427 {
428 /*
429 * The 8200 doesn't like multi-lun probing, and probably
430 * don't like serial number requests either.
431 */
432 {
433 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
434 "EXB-8200*", "*"
435 },
436 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
437 },
438 {
439 /*
440 * Let's try the same as above, but for a drive that says
441 * it's an IPL-6860 but is actually an EXB 8200.
442 */
443 {
444 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
445 "IPL-6860*", "*"
446 },
447 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
448 },
449 {
450 /*
451 * These Hitachi drives don't like multi-lun probing.
452 * The PR submitter has a DK319H, but says that the Linux
453 * kernel has a similar work-around for the DK312 and DK314,
454 * so all DK31* drives are quirked here.
455 * PR: misc/18793
456 * Submitted by: Paul Haddad <paul@pth.com>
457 */
458 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
459 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
460 },
461 {
462 /*
463 * This old revision of the TDC3600 is also SCSI-1, and
464 * hangs upon serial number probing.
465 */
466 {
467 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
468 " TDC 3600", "U07:"
469 },
470 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
471 },
472 {
473 /*
474 * Would repond to all LUNs if asked for.
475 */
476 {
477 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
478 "CP150", "*"
479 },
480 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
481 },
482 {
483 /*
484 * Would repond to all LUNs if asked for.
485 */
486 {
487 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
488 "96X2*", "*"
489 },
490 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
491 },
492 {
493 /* Submitted by: Matthew Dodd <winter@jurai.net> */
494 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
495 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
496 },
497 {
498 /* Submitted by: Matthew Dodd <winter@jurai.net> */
499 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
500 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
501 },
502 {
503 /* TeraSolutions special settings for TRC-22 RAID */
504 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
505 /*quirks*/0, /*mintags*/55, /*maxtags*/255
506 },
507 {
508 /* Veritas Storage Appliance */
509 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
510 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
511 },
512 {
513 /*
514 * Would respond to all LUNs. Device type and removable
515 * flag are jumper-selectable.
516 */
517 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
518 "Tahiti 1", "*"
519 },
520 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
521 },
522 {
523 /* Default tagged queuing parameters for all devices */
524 {
525 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
526 /*vendor*/"*", /*product*/"*", /*revision*/"*"
527 },
528 /*quirks*/0, /*mintags*/2, /*maxtags*/255
529 },
530};
531
532static const int xpt_quirk_table_size =
533 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
534
535typedef enum {
536 DM_RET_COPY = 0x01,
537 DM_RET_FLAG_MASK = 0x0f,
538 DM_RET_NONE = 0x00,
539 DM_RET_STOP = 0x10,
540 DM_RET_DESCEND = 0x20,
541 DM_RET_ERROR = 0x30,
542 DM_RET_ACTION_MASK = 0xf0
543} dev_match_ret;
544
545typedef enum {
546 XPT_DEPTH_BUS,
547 XPT_DEPTH_TARGET,
548 XPT_DEPTH_DEVICE,
549 XPT_DEPTH_PERIPH
550} xpt_traverse_depth;
551
552struct xpt_traverse_config {
553 xpt_traverse_depth depth;
554 void *tr_func;
555 void *tr_arg;
556};
557
558typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
559typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
560typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
561typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
562typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
563
564/* Transport layer configuration information */
565static struct xpt_softc xsoftc;
566
567/* Queues for our software interrupt handler */
568typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
569static cam_isrq_t cam_bioq;
570static cam_isrq_t cam_netq;
571
572/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
573static SLIST_HEAD(,ccb_hdr) ccb_freeq;
574static u_int xpt_max_ccbs; /*
575 * Maximum size of ccb pool. Modified as
576 * devices are added/removed or have their
577 * opening counts changed.
578 */
579static u_int xpt_ccb_count; /* Current count of allocated ccbs */
580
581struct cam_periph *xpt_periph;
582
583static periph_init_t xpt_periph_init;
584
585static periph_init_t probe_periph_init;
586
587static struct periph_driver xpt_driver =
588{
589 xpt_periph_init, "xpt",
590 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
591};
592
593static struct periph_driver probe_driver =
594{
595 probe_periph_init, "probe",
596 TAILQ_HEAD_INITIALIZER(probe_driver.units)
597};
598
599DATA_SET(periphdriver_set, xpt_driver);
600DATA_SET(periphdriver_set, probe_driver);
601
602#define XPT_CDEV_MAJOR 104
603
604static d_open_t xptopen;
605static d_close_t xptclose;
606static d_ioctl_t xptioctl;
607
608static struct cdevsw xpt_cdevsw = {
fabb8ceb
MD
609 /* name */ "xpt",
610 /* maj */ XPT_CDEV_MAJOR,
611 /* flags */ 0,
612 /* port */ NULL,
613 /* autoq */ 0,
614
984263bc
MD
615 /* open */ xptopen,
616 /* close */ xptclose,
617 /* read */ noread,
618 /* write */ nowrite,
619 /* ioctl */ xptioctl,
620 /* poll */ nopoll,
621 /* mmap */ nommap,
622 /* strategy */ nostrategy,
984263bc 623 /* dump */ nodump,
fabb8ceb 624 /* psize */ nopsize
984263bc
MD
625};
626
627static struct intr_config_hook *xpt_config_hook;
628
629/* Registered busses */
630static TAILQ_HEAD(,cam_eb) xpt_busses;
631static u_int bus_generation;
632
633/* Storage for debugging datastructures */
634#ifdef CAMDEBUG
635struct cam_path *cam_dpath;
636u_int32_t cam_dflags;
637u_int32_t cam_debug_delay;
638#endif
639
640#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
641#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
642#endif
643
644/*
645 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
646 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
647 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
648 */
649#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
650 || defined(CAM_DEBUG_LUN)
651#ifdef CAMDEBUG
652#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
653 || !defined(CAM_DEBUG_LUN)
654#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
655 and CAM_DEBUG_LUN"
656#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
657#else /* !CAMDEBUG */
658#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
659#endif /* CAMDEBUG */
660#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
661
662/* Our boot-time initialization hook */
663static void xpt_init(void *);
664SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
665
666static cam_status xpt_compile_path(struct cam_path *new_path,
667 struct cam_periph *perph,
668 path_id_t path_id,
669 target_id_t target_id,
670 lun_id_t lun_id);
671
672static void xpt_release_path(struct cam_path *path);
673
674static void xpt_async_bcast(struct async_list *async_head,
675 u_int32_t async_code,
676 struct cam_path *path,
677 void *async_arg);
678static void xpt_dev_async(u_int32_t async_code,
679 struct cam_eb *bus,
680 struct cam_et *target,
681 struct cam_ed *device,
682 void *async_arg);
683static path_id_t xptnextfreepathid(void);
684static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
685static union ccb *xpt_get_ccb(struct cam_ed *device);
686static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
687 u_int32_t new_priority);
688static void xpt_run_dev_allocq(struct cam_eb *bus);
689static void xpt_run_dev_sendq(struct cam_eb *bus);
690static timeout_t xpt_release_devq_timeout;
691static timeout_t xpt_release_simq_timeout;
692static void xpt_release_bus(struct cam_eb *bus);
693static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
694 int run_queue);
695static struct cam_et*
696 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
697static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
698static struct cam_ed*
699 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
700 lun_id_t lun_id);
701static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
702 struct cam_ed *device);
703static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
704static struct cam_eb*
705 xpt_find_bus(path_id_t path_id);
706static struct cam_et*
707 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
708static struct cam_ed*
709 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
710static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
711static void xpt_scan_lun(struct cam_periph *periph,
712 struct cam_path *path, cam_flags flags,
713 union ccb *ccb);
714static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
715static xpt_busfunc_t xptconfigbuscountfunc;
716static xpt_busfunc_t xptconfigfunc;
717static void xpt_config(void *arg);
718static xpt_devicefunc_t xptpassannouncefunc;
719static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
720static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
721static void xptpoll(struct cam_sim *sim);
ef0fdad1
MD
722static inthand2_t swi_camnet;
723static inthand2_t swi_cambio;
984263bc
MD
724static void camisr(cam_isrq_t *queue);
725#if 0
726static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
727static void xptasync(struct cam_periph *periph,
728 u_int32_t code, cam_path *path);
729#endif
730static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
731 int num_patterns, struct cam_eb *bus);
732static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
733 int num_patterns, struct cam_ed *device);
734static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
735 int num_patterns,
736 struct cam_periph *periph);
737static xpt_busfunc_t xptedtbusfunc;
738static xpt_targetfunc_t xptedttargetfunc;
739static xpt_devicefunc_t xptedtdevicefunc;
740static xpt_periphfunc_t xptedtperiphfunc;
741static xpt_pdrvfunc_t xptplistpdrvfunc;
742static xpt_periphfunc_t xptplistperiphfunc;
743static int xptedtmatch(struct ccb_dev_match *cdm);
744static int xptperiphlistmatch(struct ccb_dev_match *cdm);
745static int xptbustraverse(struct cam_eb *start_bus,
746 xpt_busfunc_t *tr_func, void *arg);
747static int xpttargettraverse(struct cam_eb *bus,
748 struct cam_et *start_target,
749 xpt_targetfunc_t *tr_func, void *arg);
750static int xptdevicetraverse(struct cam_et *target,
751 struct cam_ed *start_device,
752 xpt_devicefunc_t *tr_func, void *arg);
753static int xptperiphtraverse(struct cam_ed *device,
754 struct cam_periph *start_periph,
755 xpt_periphfunc_t *tr_func, void *arg);
756static int xptpdrvtraverse(struct periph_driver **start_pdrv,
757 xpt_pdrvfunc_t *tr_func, void *arg);
758static int xptpdperiphtraverse(struct periph_driver **pdrv,
759 struct cam_periph *start_periph,
760 xpt_periphfunc_t *tr_func,
761 void *arg);
762static xpt_busfunc_t xptdefbusfunc;
763static xpt_targetfunc_t xptdeftargetfunc;
764static xpt_devicefunc_t xptdefdevicefunc;
765static xpt_periphfunc_t xptdefperiphfunc;
766static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
767#ifdef notusedyet
768static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
769 void *arg);
770#endif
771static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
772 void *arg);
773#ifdef notusedyet
774static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
775 void *arg);
776#endif
777static xpt_devicefunc_t xptsetasyncfunc;
778static xpt_busfunc_t xptsetasyncbusfunc;
779static cam_status xptregister(struct cam_periph *periph,
780 void *arg);
781static cam_status proberegister(struct cam_periph *periph,
782 void *arg);
783static void probeschedule(struct cam_periph *probe_periph);
784static void probestart(struct cam_periph *periph, union ccb *start_ccb);
785static void proberequestdefaultnegotiation(struct cam_periph *periph);
786static void probedone(struct cam_periph *periph, union ccb *done_ccb);
787static void probecleanup(struct cam_periph *periph);
788static void xpt_find_quirk(struct cam_ed *device);
789static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
790 struct cam_ed *device,
791 int async_update);
792static void xpt_toggle_tags(struct cam_path *path);
793static void xpt_start_tags(struct cam_path *path);
794static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
795 struct cam_ed *dev);
796static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
797 struct cam_ed *dev);
798static __inline int periph_is_queued(struct cam_periph *periph);
799static __inline int device_is_alloc_queued(struct cam_ed *device);
800static __inline int device_is_send_queued(struct cam_ed *device);
801static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
802
803static __inline int
804xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
805{
806 int retval;
807
808 if (dev->ccbq.devq_openings > 0) {
809 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
810 cam_ccbq_resize(&dev->ccbq,
811 dev->ccbq.dev_openings
812 + dev->ccbq.dev_active);
813 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
814 }
815 /*
816 * The priority of a device waiting for CCB resources
817 * is that of the the highest priority peripheral driver
818 * enqueued.
819 */
820 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
821 &dev->alloc_ccb_entry.pinfo,
822 CAMQ_GET_HEAD(&dev->drvq)->priority);
823 } else {
824 retval = 0;
825 }
826
827 return (retval);
828}
829
830static __inline int
831xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
832{
833 int retval;
834
835 if (dev->ccbq.dev_openings > 0) {
836 /*
837 * The priority of a device waiting for controller
838 * resources is that of the the highest priority CCB
839 * enqueued.
840 */
841 retval =
842 xpt_schedule_dev(&bus->sim->devq->send_queue,
843 &dev->send_ccb_entry.pinfo,
844 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
845 } else {
846 retval = 0;
847 }
848 return (retval);
849}
850
851static __inline int
852periph_is_queued(struct cam_periph *periph)
853{
854 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
855}
856
857static __inline int
858device_is_alloc_queued(struct cam_ed *device)
859{
860 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
861}
862
863static __inline int
864device_is_send_queued(struct cam_ed *device)
865{
866 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
867}
868
869static __inline int
870dev_allocq_is_runnable(struct cam_devq *devq)
871{
872 /*
873 * Have work to do.
874 * Have space to do more work.
875 * Allowed to do work.
876 */
877 return ((devq->alloc_queue.qfrozen_cnt == 0)
878 && (devq->alloc_queue.entries > 0)
879 && (devq->alloc_openings > 0));
880}
881
882static void
883xpt_periph_init()
884{
885 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
886}
887
888static void
889probe_periph_init()
890{
891}
892
893
894static void
895xptdone(struct cam_periph *periph, union ccb *done_ccb)
896{
897 /* Caller will release the CCB */
898 wakeup(&done_ccb->ccb_h.cbfcnp);
899}
900
901static int
41c20dac 902xptopen(dev_t dev, int flags, int fmt, struct thread *td)
984263bc
MD
903{
904 int unit;
905
906 unit = minor(dev) & 0xff;
907
908 /*
909 * Only allow read-write access.
910 */
911 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
912 return(EPERM);
913
914 /*
915 * We don't allow nonblocking access.
916 */
917 if ((flags & O_NONBLOCK) != 0) {
918 printf("xpt%d: can't do nonblocking access\n", unit);
919 return(ENODEV);
920 }
921
922 /*
923 * We only have one transport layer right now. If someone accesses
924 * us via something other than minor number 1, point out their
925 * mistake.
926 */
927 if (unit != 0) {
928 printf("xptopen: got invalid xpt unit %d\n", unit);
929 return(ENXIO);
930 }
931
932 /* Mark ourselves open */
933 xsoftc.flags |= XPT_FLAG_OPEN;
934
935 return(0);
936}
937
938static int
41c20dac 939xptclose(dev_t dev, int flag, int fmt, struct thread *td)
984263bc
MD
940{
941 int unit;
942
943 unit = minor(dev) & 0xff;
944
945 /*
946 * We only have one transport layer right now. If someone accesses
947 * us via something other than minor number 1, point out their
948 * mistake.
949 */
950 if (unit != 0) {
951 printf("xptclose: got invalid xpt unit %d\n", unit);
952 return(ENXIO);
953 }
954
955 /* Mark ourselves closed */
956 xsoftc.flags &= ~XPT_FLAG_OPEN;
957
958 return(0);
959}
960
961static int
41c20dac 962xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
984263bc
MD
963{
964 int unit, error;
965
966 error = 0;
967 unit = minor(dev) & 0xff;
968
969 /*
970 * We only have one transport layer right now. If someone accesses
971 * us via something other than minor number 1, point out their
972 * mistake.
973 */
974 if (unit != 0) {
975 printf("xptioctl: got invalid xpt unit %d\n", unit);
976 return(ENXIO);
977 }
978
979 switch(cmd) {
980 /*
981 * For the transport layer CAMIOCOMMAND ioctl, we really only want
982 * to accept CCB types that don't quite make sense to send through a
983 * passthrough driver.
984 */
985 case CAMIOCOMMAND: {
986 union ccb *ccb;
987 union ccb *inccb;
988
989 inccb = (union ccb *)addr;
990
991 switch(inccb->ccb_h.func_code) {
992 case XPT_SCAN_BUS:
993 case XPT_RESET_BUS:
994 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
995 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
996 error = EINVAL;
997 break;
998 }
999 /* FALLTHROUGH */
1000 case XPT_PATH_INQ:
1001 case XPT_ENG_INQ:
1002 case XPT_SCAN_LUN:
1003
1004 ccb = xpt_alloc_ccb();
1005
1006 /*
1007 * Create a path using the bus, target, and lun the
1008 * user passed in.
1009 */
1010 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1011 inccb->ccb_h.path_id,
1012 inccb->ccb_h.target_id,
1013 inccb->ccb_h.target_lun) !=
1014 CAM_REQ_CMP){
1015 error = EINVAL;
1016 xpt_free_ccb(ccb);
1017 break;
1018 }
1019 /* Ensure all of our fields are correct */
1020 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1021 inccb->ccb_h.pinfo.priority);
1022 xpt_merge_ccb(ccb, inccb);
1023 ccb->ccb_h.cbfcnp = xptdone;
1024 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1025 bcopy(ccb, inccb, sizeof(union ccb));
1026 xpt_free_path(ccb->ccb_h.path);
1027 xpt_free_ccb(ccb);
1028 break;
1029
1030 case XPT_DEBUG: {
1031 union ccb ccb;
1032
1033 /*
1034 * This is an immediate CCB, so it's okay to
1035 * allocate it on the stack.
1036 */
1037
1038 /*
1039 * Create a path using the bus, target, and lun the
1040 * user passed in.
1041 */
1042 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1043 inccb->ccb_h.path_id,
1044 inccb->ccb_h.target_id,
1045 inccb->ccb_h.target_lun) !=
1046 CAM_REQ_CMP){
1047 error = EINVAL;
1048 break;
1049 }
1050 /* Ensure all of our fields are correct */
1051 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1052 inccb->ccb_h.pinfo.priority);
1053 xpt_merge_ccb(&ccb, inccb);
1054 ccb.ccb_h.cbfcnp = xptdone;
1055 xpt_action(&ccb);
1056 bcopy(&ccb, inccb, sizeof(union ccb));
1057 xpt_free_path(ccb.ccb_h.path);
1058 break;
1059
1060 }
1061 case XPT_DEV_MATCH: {
1062 struct cam_periph_map_info mapinfo;
1063 struct cam_path *old_path;
1064
1065 /*
1066 * We can't deal with physical addresses for this
1067 * type of transaction.
1068 */
1069 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1070 error = EINVAL;
1071 break;
1072 }
1073
1074 /*
1075 * Save this in case the caller had it set to
1076 * something in particular.
1077 */
1078 old_path = inccb->ccb_h.path;
1079
1080 /*
1081 * We really don't need a path for the matching
1082 * code. The path is needed because of the
1083 * debugging statements in xpt_action(). They
1084 * assume that the CCB has a valid path.
1085 */
1086 inccb->ccb_h.path = xpt_periph->path;
1087
1088 bzero(&mapinfo, sizeof(mapinfo));
1089
1090 /*
1091 * Map the pattern and match buffers into kernel
1092 * virtual address space.
1093 */
1094 error = cam_periph_mapmem(inccb, &mapinfo);
1095
1096 if (error) {
1097 inccb->ccb_h.path = old_path;
1098 break;
1099 }
1100
1101 /*
1102 * This is an immediate CCB, we can send it on directly.
1103 */
1104 xpt_action(inccb);
1105
1106 /*
1107 * Map the buffers back into user space.
1108 */
1109 cam_periph_unmapmem(inccb, &mapinfo);
1110
1111 inccb->ccb_h.path = old_path;
1112
1113 error = 0;
1114 break;
1115 }
1116 default:
1117 error = ENOTSUP;
1118 break;
1119 }
1120 break;
1121 }
1122 /*
1123 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1124 * with the periphal driver name and unit name filled in. The other
1125 * fields don't really matter as input. The passthrough driver name
1126 * ("pass"), and unit number are passed back in the ccb. The current
1127 * device generation number, and the index into the device peripheral
1128 * driver list, and the status are also passed back. Note that
1129 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1130 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1131 * (or rather should be) impossible for the device peripheral driver
1132 * list to change since we look at the whole thing in one pass, and
1133 * we do it with splcam protection.
1134 *
1135 */
1136 case CAMGETPASSTHRU: {
1137 union ccb *ccb;
1138 struct cam_periph *periph;
1139 struct periph_driver **p_drv;
1140 char *name;
1141 int unit;
1142 int cur_generation;
1143 int base_periph_found;
1144 int splbreaknum;
1145 int s;
1146
1147 ccb = (union ccb *)addr;
1148 unit = ccb->cgdl.unit_number;
1149 name = ccb->cgdl.periph_name;
1150 /*
1151 * Every 100 devices, we want to drop our spl protection to
1152 * give the software interrupt handler a chance to run.
1153 * Most systems won't run into this check, but this should
1154 * avoid starvation in the software interrupt handler in
1155 * large systems.
1156 */
1157 splbreaknum = 100;
1158
1159 ccb = (union ccb *)addr;
1160
1161 base_periph_found = 0;
1162
1163 /*
1164 * Sanity check -- make sure we don't get a null peripheral
1165 * driver name.
1166 */
1167 if (*ccb->cgdl.periph_name == '\0') {
1168 error = EINVAL;
1169 break;
1170 }
1171
1172 /* Keep the list from changing while we traverse it */
1173 s = splcam();
1174ptstartover:
1175 cur_generation = xsoftc.generation;
1176
1177 /* first find our driver in the list of drivers */
dc62b251 1178 SET_FOREACH(p_drv, periphdriver_set) {
984263bc
MD
1179 if (strcmp((*p_drv)->driver_name, name) == 0)
1180 break;
dc62b251 1181 }
984263bc
MD
1182
1183 if (*p_drv == NULL) {
1184 splx(s);
1185 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1186 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1187 *ccb->cgdl.periph_name = '\0';
1188 ccb->cgdl.unit_number = 0;
1189 error = ENOENT;
1190 break;
1191 }
1192
1193 /*
1194 * Run through every peripheral instance of this driver
1195 * and check to see whether it matches the unit passed
1196 * in by the user. If it does, get out of the loops and
1197 * find the passthrough driver associated with that
1198 * peripheral driver.
1199 */
1200 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1201 periph = TAILQ_NEXT(periph, unit_links)) {
1202
1203 if (periph->unit_number == unit) {
1204 break;
1205 } else if (--splbreaknum == 0) {
1206 splx(s);
1207 s = splcam();
1208 splbreaknum = 100;
1209 if (cur_generation != xsoftc.generation)
1210 goto ptstartover;
1211 }
1212 }
1213 /*
1214 * If we found the peripheral driver that the user passed
1215 * in, go through all of the peripheral drivers for that
1216 * particular device and look for a passthrough driver.
1217 */
1218 if (periph != NULL) {
1219 struct cam_ed *device;
1220 int i;
1221
1222 base_periph_found = 1;
1223 device = periph->path->device;
1224 for (i = 0, periph = device->periphs.slh_first;
1225 periph != NULL;
1226 periph = periph->periph_links.sle_next, i++) {
1227 /*
1228 * Check to see whether we have a
1229 * passthrough device or not.
1230 */
1231 if (strcmp(periph->periph_name, "pass") == 0) {
1232 /*
1233 * Fill in the getdevlist fields.
1234 */
1235 strcpy(ccb->cgdl.periph_name,
1236 periph->periph_name);
1237 ccb->cgdl.unit_number =
1238 periph->unit_number;
1239 if (periph->periph_links.sle_next)
1240 ccb->cgdl.status =
1241 CAM_GDEVLIST_MORE_DEVS;
1242 else
1243 ccb->cgdl.status =
1244 CAM_GDEVLIST_LAST_DEVICE;
1245 ccb->cgdl.generation =
1246 device->generation;
1247 ccb->cgdl.index = i;
1248 /*
1249 * Fill in some CCB header fields
1250 * that the user may want.
1251 */
1252 ccb->ccb_h.path_id =
1253 periph->path->bus->path_id;
1254 ccb->ccb_h.target_id =
1255 periph->path->target->target_id;
1256 ccb->ccb_h.target_lun =
1257 periph->path->device->lun_id;
1258 ccb->ccb_h.status = CAM_REQ_CMP;
1259 break;
1260 }
1261 }
1262 }
1263
1264 /*
1265 * If the periph is null here, one of two things has
1266 * happened. The first possibility is that we couldn't
1267 * find the unit number of the particular peripheral driver
1268 * that the user is asking about. e.g. the user asks for
1269 * the passthrough driver for "da11". We find the list of
1270 * "da" peripherals all right, but there is no unit 11.
1271 * The other possibility is that we went through the list
1272 * of peripheral drivers attached to the device structure,
1273 * but didn't find one with the name "pass". Either way,
1274 * we return ENOENT, since we couldn't find something.
1275 */
1276 if (periph == NULL) {
1277 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1278 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1279 *ccb->cgdl.periph_name = '\0';
1280 ccb->cgdl.unit_number = 0;
1281 error = ENOENT;
1282 /*
1283 * It is unfortunate that this is even necessary,
1284 * but there are many, many clueless users out there.
1285 * If this is true, the user is looking for the
1286 * passthrough driver, but doesn't have one in his
1287 * kernel.
1288 */
1289 if (base_periph_found == 1) {
1290 printf("xptioctl: pass driver is not in the "
1291 "kernel\n");
1292 printf("xptioctl: put \"device pass0\" in "
1293 "your kernel config file\n");
1294 }
1295 }
1296 splx(s);
1297 break;
1298 }
1299 default:
1300 error = ENOTTY;
1301 break;
1302 }
1303
1304 return(error);
1305}
1306
1307/* Functions accessed by the peripheral drivers */
1308static void
1309xpt_init(dummy)
1310 void *dummy;
1311{
1312 struct cam_sim *xpt_sim;
1313 struct cam_path *path;
1314 struct cam_devq *devq;
1315 cam_status status;
1316
1317 TAILQ_INIT(&xpt_busses);
1318 TAILQ_INIT(&cam_bioq);
1319 TAILQ_INIT(&cam_netq);
1320 SLIST_INIT(&ccb_freeq);
1321 STAILQ_INIT(&highpowerq);
1322
1323 /*
1324 * The xpt layer is, itself, the equivelent of a SIM.
1325 * Allow 16 ccbs in the ccb pool for it. This should
1326 * give decent parallelism when we probe busses and
1327 * perform other XPT functions.
1328 */
1329 devq = cam_simq_alloc(16);
1330 xpt_sim = cam_sim_alloc(xptaction,
1331 xptpoll,
1332 "xpt",
1333 /*softc*/NULL,
1334 /*unit*/0,
1335 /*max_dev_transactions*/0,
1336 /*max_tagged_dev_transactions*/0,
1337 devq);
3aed1355 1338 cam_simq_release(devq);
984263bc
MD
1339 xpt_max_ccbs = 16;
1340
1341 xpt_bus_register(xpt_sim, /*bus #*/0);
1342
1343 /*
1344 * Looking at the XPT from the SIM layer, the XPT is
1345 * the equivelent of a peripheral driver. Allocate
1346 * a peripheral driver entry for us.
1347 */
1348 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1349 CAM_TARGET_WILDCARD,
1350 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1351 printf("xpt_init: xpt_create_path failed with status %#x,"
1352 " failing attach\n", status);
1353 return;
1354 }
1355
1356 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1357 path, NULL, 0, NULL);
1358 xpt_free_path(path);
1359
1360 xpt_sim->softc = xpt_periph;
1361
1362 /*
1363 * Register a callback for when interrupts are enabled.
1364 */
898d961b
MD
1365 xpt_config_hook = malloc(sizeof(struct intr_config_hook),
1366 M_TEMP, M_INTWAIT | M_ZERO);
984263bc
MD
1367 xpt_config_hook->ich_func = xpt_config;
1368 if (config_intrhook_establish(xpt_config_hook) != 0) {
1369 free (xpt_config_hook, M_TEMP);
1370 printf("xpt_init: config_intrhook_establish failed "
1371 "- failing attach\n");
1372 }
1373
1374 /* Install our software interrupt handlers */
ef0fdad1
MD
1375 register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet");
1376 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio");
984263bc
MD
1377}
1378
1379static cam_status
1380xptregister(struct cam_periph *periph, void *arg)
1381{
1382 if (periph == NULL) {
1383 printf("xptregister: periph was NULL!!\n");
1384 return(CAM_REQ_CMP_ERR);
1385 }
1386
1387 periph->softc = NULL;
1388
1389 xpt_periph = periph;
1390
1391 return(CAM_REQ_CMP);
1392}
1393
1394int32_t
1395xpt_add_periph(struct cam_periph *periph)
1396{
1397 struct cam_ed *device;
1398 int32_t status;
1399 struct periph_list *periph_head;
1400
1401 device = periph->path->device;
1402
1403 periph_head = &device->periphs;
1404
1405 status = CAM_REQ_CMP;
1406
1407 if (device != NULL) {
1408 int s;
1409
1410 /*
1411 * Make room for this peripheral
1412 * so it will fit in the queue
1413 * when it's scheduled to run
1414 */
1415 s = splsoftcam();
1416 status = camq_resize(&device->drvq,
1417 device->drvq.array_size + 1);
1418
1419 device->generation++;
1420
1421 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1422
1423 splx(s);
1424 }
1425
1426 xsoftc.generation++;
1427
1428 return (status);
1429}
1430
1431void
1432xpt_remove_periph(struct cam_periph *periph)
1433{
1434 struct cam_ed *device;
1435
1436 device = periph->path->device;
1437
1438 if (device != NULL) {
1439 int s;
1440 struct periph_list *periph_head;
1441
1442 periph_head = &device->periphs;
1443
1444 /* Release the slot for this peripheral */
1445 s = splsoftcam();
1446 camq_resize(&device->drvq, device->drvq.array_size - 1);
1447
1448 device->generation++;
1449
1450 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1451
1452 splx(s);
1453 }
1454
1455 xsoftc.generation++;
1456
1457}
1458
1459void
1460xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1461{
1462 int s;
1463 u_int mb;
1464 struct cam_path *path;
1465 struct ccb_trans_settings cts;
1466
1467 path = periph->path;
1468 /*
1469 * To ensure that this is printed in one piece,
1470 * mask out CAM interrupts.
1471 */
1472 s = splsoftcam();
1473 printf("%s%d at %s%d bus %d target %d lun %d\n",
1474 periph->periph_name, periph->unit_number,
1475 path->bus->sim->sim_name,
1476 path->bus->sim->unit_number,
1477 path->bus->sim->bus_id,
1478 path->target->target_id,
1479 path->device->lun_id);
1480 printf("%s%d: ", periph->periph_name, periph->unit_number);
1481 scsi_print_inquiry(&path->device->inq_data);
1482 if ((bootverbose)
1483 && (path->device->serial_num_len > 0)) {
1484 /* Don't wrap the screen - print only the first 60 chars */
1485 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1486 periph->unit_number, path->device->serial_num);
1487 }
1488 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1489 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1490 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1491 xpt_action((union ccb*)&cts);
1492 if (cts.ccb_h.status == CAM_REQ_CMP) {
1493 u_int speed;
1494 u_int freq;
1495
1496 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1497 && cts.sync_offset != 0) {
1498 freq = scsi_calc_syncsrate(cts.sync_period);
1499 speed = freq;
1500 } else {
1501 struct ccb_pathinq cpi;
1502
1503 /* Ask the SIM for its base transfer speed */
1504 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1505 cpi.ccb_h.func_code = XPT_PATH_INQ;
1506 xpt_action((union ccb *)&cpi);
1507
1508 speed = cpi.base_transfer_speed;
1509 freq = 0;
1510 }
1511 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1512 speed *= (0x01 << cts.bus_width);
1513 mb = speed / 1000;
1514 if (mb > 0)
1515 printf("%s%d: %d.%03dMB/s transfers",
1516 periph->periph_name, periph->unit_number,
1517 mb, speed % 1000);
1518 else
1519 printf("%s%d: %dKB/s transfers", periph->periph_name,
1520 periph->unit_number, speed);
1521 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1522 && cts.sync_offset != 0) {
1523 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1524 freq % 1000, cts.sync_offset);
1525 }
1526 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1527 && cts.bus_width > 0) {
1528 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1529 && cts.sync_offset != 0) {
1530 printf(", ");
1531 } else {
1532 printf(" (");
1533 }
1534 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1535 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1536 && cts.sync_offset != 0) {
1537 printf(")");
1538 }
1539
1540 if (path->device->inq_flags & SID_CmdQue
1541 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1542 printf(", Tagged Queueing Enabled");
1543 }
1544
1545 printf("\n");
1546 } else if (path->device->inq_flags & SID_CmdQue
1547 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1548 printf("%s%d: Tagged Queueing Enabled\n",
1549 periph->periph_name, periph->unit_number);
1550 }
1551
1552 /*
1553 * We only want to print the caller's announce string if they've
1554 * passed one in..
1555 */
1556 if (announce_string != NULL)
1557 printf("%s%d: %s\n", periph->periph_name,
1558 periph->unit_number, announce_string);
1559 splx(s);
1560}
1561
1562
1563static dev_match_ret
1564xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1565 struct cam_eb *bus)
1566{
1567 dev_match_ret retval;
1568 int i;
1569
1570 retval = DM_RET_NONE;
1571
1572 /*
1573 * If we aren't given something to match against, that's an error.
1574 */
1575 if (bus == NULL)
1576 return(DM_RET_ERROR);
1577
1578 /*
1579 * If there are no match entries, then this bus matches no
1580 * matter what.
1581 */
1582 if ((patterns == NULL) || (num_patterns == 0))
1583 return(DM_RET_DESCEND | DM_RET_COPY);
1584
1585 for (i = 0; i < num_patterns; i++) {
1586 struct bus_match_pattern *cur_pattern;
1587
1588 /*
1589 * If the pattern in question isn't for a bus node, we
1590 * aren't interested. However, we do indicate to the
1591 * calling routine that we should continue descending the
1592 * tree, since the user wants to match against lower-level
1593 * EDT elements.
1594 */
1595 if (patterns[i].type != DEV_MATCH_BUS) {
1596 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1597 retval |= DM_RET_DESCEND;
1598 continue;
1599 }
1600
1601 cur_pattern = &patterns[i].pattern.bus_pattern;
1602
1603 /*
1604 * If they want to match any bus node, we give them any
1605 * device node.
1606 */
1607 if (cur_pattern->flags == BUS_MATCH_ANY) {
1608 /* set the copy flag */
1609 retval |= DM_RET_COPY;
1610
1611 /*
1612 * If we've already decided on an action, go ahead
1613 * and return.
1614 */
1615 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1616 return(retval);
1617 }
1618
1619 /*
1620 * Not sure why someone would do this...
1621 */
1622 if (cur_pattern->flags == BUS_MATCH_NONE)
1623 continue;
1624
1625 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1626 && (cur_pattern->path_id != bus->path_id))
1627 continue;
1628
1629 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1630 && (cur_pattern->bus_id != bus->sim->bus_id))
1631 continue;
1632
1633 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1634 && (cur_pattern->unit_number != bus->sim->unit_number))
1635 continue;
1636
1637 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1638 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1639 DEV_IDLEN) != 0))
1640 continue;
1641
1642 /*
1643 * If we get to this point, the user definitely wants
1644 * information on this bus. So tell the caller to copy the
1645 * data out.
1646 */
1647 retval |= DM_RET_COPY;
1648
1649 /*
1650 * If the return action has been set to descend, then we
1651 * know that we've already seen a non-bus matching
1652 * expression, therefore we need to further descend the tree.
1653 * This won't change by continuing around the loop, so we
1654 * go ahead and return. If we haven't seen a non-bus
1655 * matching expression, we keep going around the loop until
1656 * we exhaust the matching expressions. We'll set the stop
1657 * flag once we fall out of the loop.
1658 */
1659 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1660 return(retval);
1661 }
1662
1663 /*
1664 * If the return action hasn't been set to descend yet, that means
1665 * we haven't seen anything other than bus matching patterns. So
1666 * tell the caller to stop descending the tree -- the user doesn't
1667 * want to match against lower level tree elements.
1668 */
1669 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1670 retval |= DM_RET_STOP;
1671
1672 return(retval);
1673}
1674
1675static dev_match_ret
1676xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1677 struct cam_ed *device)
1678{
1679 dev_match_ret retval;
1680 int i;
1681
1682 retval = DM_RET_NONE;
1683
1684 /*
1685 * If we aren't given something to match against, that's an error.
1686 */
1687 if (device == NULL)
1688 return(DM_RET_ERROR);
1689
1690 /*
1691 * If there are no match entries, then this device matches no
1692 * matter what.
1693 */
1694 if ((patterns == NULL) || (patterns == 0))
1695 return(DM_RET_DESCEND | DM_RET_COPY);
1696
1697 for (i = 0; i < num_patterns; i++) {
1698 struct device_match_pattern *cur_pattern;
1699
1700 /*
1701 * If the pattern in question isn't for a device node, we
1702 * aren't interested.
1703 */
1704 if (patterns[i].type != DEV_MATCH_DEVICE) {
1705 if ((patterns[i].type == DEV_MATCH_PERIPH)
1706 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1707 retval |= DM_RET_DESCEND;
1708 continue;
1709 }
1710
1711 cur_pattern = &patterns[i].pattern.device_pattern;
1712
1713 /*
1714 * If they want to match any device node, we give them any
1715 * device node.
1716 */
1717 if (cur_pattern->flags == DEV_MATCH_ANY) {
1718 /* set the copy flag */
1719 retval |= DM_RET_COPY;
1720
1721
1722 /*
1723 * If we've already decided on an action, go ahead
1724 * and return.
1725 */
1726 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1727 return(retval);
1728 }
1729
1730 /*
1731 * Not sure why someone would do this...
1732 */
1733 if (cur_pattern->flags == DEV_MATCH_NONE)
1734 continue;
1735
1736 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1737 && (cur_pattern->path_id != device->target->bus->path_id))
1738 continue;
1739
1740 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1741 && (cur_pattern->target_id != device->target->target_id))
1742 continue;
1743
1744 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1745 && (cur_pattern->target_lun != device->lun_id))
1746 continue;
1747
1748 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1749 && (cam_quirkmatch((caddr_t)&device->inq_data,
1750 (caddr_t)&cur_pattern->inq_pat,
1751 1, sizeof(cur_pattern->inq_pat),
1752 scsi_static_inquiry_match) == NULL))
1753 continue;
1754
1755 /*
1756 * If we get to this point, the user definitely wants
1757 * information on this device. So tell the caller to copy
1758 * the data out.
1759 */
1760 retval |= DM_RET_COPY;
1761
1762 /*
1763 * If the return action has been set to descend, then we
1764 * know that we've already seen a peripheral matching
1765 * expression, therefore we need to further descend the tree.
1766 * This won't change by continuing around the loop, so we
1767 * go ahead and return. If we haven't seen a peripheral
1768 * matching expression, we keep going around the loop until
1769 * we exhaust the matching expressions. We'll set the stop
1770 * flag once we fall out of the loop.
1771 */
1772 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1773 return(retval);
1774 }
1775
1776 /*
1777 * If the return action hasn't been set to descend yet, that means
1778 * we haven't seen any peripheral matching patterns. So tell the
1779 * caller to stop descending the tree -- the user doesn't want to
1780 * match against lower level tree elements.
1781 */
1782 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1783 retval |= DM_RET_STOP;
1784
1785 return(retval);
1786}
1787
1788/*
1789 * Match a single peripheral against any number of match patterns.
1790 */
1791static dev_match_ret
1792xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1793 struct cam_periph *periph)
1794{
1795 dev_match_ret retval;
1796 int i;
1797
1798 /*
1799 * If we aren't given something to match against, that's an error.
1800 */
1801 if (periph == NULL)
1802 return(DM_RET_ERROR);
1803
1804 /*
1805 * If there are no match entries, then this peripheral matches no
1806 * matter what.
1807 */
1808 if ((patterns == NULL) || (num_patterns == 0))
1809 return(DM_RET_STOP | DM_RET_COPY);
1810
1811 /*
1812 * There aren't any nodes below a peripheral node, so there's no
1813 * reason to descend the tree any further.
1814 */
1815 retval = DM_RET_STOP;
1816
1817 for (i = 0; i < num_patterns; i++) {
1818 struct periph_match_pattern *cur_pattern;
1819
1820 /*
1821 * If the pattern in question isn't for a peripheral, we
1822 * aren't interested.
1823 */
1824 if (patterns[i].type != DEV_MATCH_PERIPH)
1825 continue;
1826
1827 cur_pattern = &patterns[i].pattern.periph_pattern;
1828
1829 /*
1830 * If they want to match on anything, then we will do so.
1831 */
1832 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1833 /* set the copy flag */
1834 retval |= DM_RET_COPY;
1835
1836 /*
1837 * We've already set the return action to stop,
1838 * since there are no nodes below peripherals in
1839 * the tree.
1840 */
1841 return(retval);
1842 }
1843
1844 /*
1845 * Not sure why someone would do this...
1846 */
1847 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1848 continue;
1849
1850 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1851 && (cur_pattern->path_id != periph->path->bus->path_id))
1852 continue;
1853
1854 /*
1855 * For the target and lun id's, we have to make sure the
1856 * target and lun pointers aren't NULL. The xpt peripheral
1857 * has a wildcard target and device.
1858 */
1859 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1860 && ((periph->path->target == NULL)
1861 ||(cur_pattern->target_id != periph->path->target->target_id)))
1862 continue;
1863
1864 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1865 && ((periph->path->device == NULL)
1866 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1867 continue;
1868
1869 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1870 && (cur_pattern->unit_number != periph->unit_number))
1871 continue;
1872
1873 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1874 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1875 DEV_IDLEN) != 0))
1876 continue;
1877
1878 /*
1879 * If we get to this point, the user definitely wants
1880 * information on this peripheral. So tell the caller to
1881 * copy the data out.
1882 */
1883 retval |= DM_RET_COPY;
1884
1885 /*
1886 * The return action has already been set to stop, since
1887 * peripherals don't have any nodes below them in the EDT.
1888 */
1889 return(retval);
1890 }
1891
1892 /*
1893 * If we get to this point, the peripheral that was passed in
1894 * doesn't match any of the patterns.
1895 */
1896 return(retval);
1897}
1898
1899static int
1900xptedtbusfunc(struct cam_eb *bus, void *arg)
1901{
1902 struct ccb_dev_match *cdm;
1903 dev_match_ret retval;
1904
1905 cdm = (struct ccb_dev_match *)arg;
1906
1907 /*
1908 * If our position is for something deeper in the tree, that means
1909 * that we've already seen this node. So, we keep going down.
1910 */
1911 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1912 && (cdm->pos.cookie.bus == bus)
1913 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1914 && (cdm->pos.cookie.target != NULL))
1915 retval = DM_RET_DESCEND;
1916 else
1917 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1918
1919 /*
1920 * If we got an error, bail out of the search.
1921 */
1922 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1923 cdm->status = CAM_DEV_MATCH_ERROR;
1924 return(0);
1925 }
1926
1927 /*
1928 * If the copy flag is set, copy this bus out.
1929 */
1930 if (retval & DM_RET_COPY) {
1931 int spaceleft, j;
1932
1933 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1934 sizeof(struct dev_match_result));
1935
1936 /*
1937 * If we don't have enough space to put in another
1938 * match result, save our position and tell the
1939 * user there are more devices to check.
1940 */
1941 if (spaceleft < sizeof(struct dev_match_result)) {
1942 bzero(&cdm->pos, sizeof(cdm->pos));
1943 cdm->pos.position_type =
1944 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1945
1946 cdm->pos.cookie.bus = bus;
1947 cdm->pos.generations[CAM_BUS_GENERATION]=
1948 bus_generation;
1949 cdm->status = CAM_DEV_MATCH_MORE;
1950 return(0);
1951 }
1952 j = cdm->num_matches;
1953 cdm->num_matches++;
1954 cdm->matches[j].type = DEV_MATCH_BUS;
1955 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1956 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1957 cdm->matches[j].result.bus_result.unit_number =
1958 bus->sim->unit_number;
1959 strncpy(cdm->matches[j].result.bus_result.dev_name,
1960 bus->sim->sim_name, DEV_IDLEN);
1961 }
1962
1963 /*
1964 * If the user is only interested in busses, there's no
1965 * reason to descend to the next level in the tree.
1966 */
1967 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1968 return(1);
1969
1970 /*
1971 * If there is a target generation recorded, check it to
1972 * make sure the target list hasn't changed.
1973 */
1974 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1975 && (bus == cdm->pos.cookie.bus)
1976 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1977 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1978 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1979 bus->generation)) {
1980 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1981 return(0);
1982 }
1983
1984 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1985 && (cdm->pos.cookie.bus == bus)
1986 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1987 && (cdm->pos.cookie.target != NULL))
1988 return(xpttargettraverse(bus,
1989 (struct cam_et *)cdm->pos.cookie.target,
1990 xptedttargetfunc, arg));
1991 else
1992 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1993}
1994
1995static int
1996xptedttargetfunc(struct cam_et *target, void *arg)
1997{
1998 struct ccb_dev_match *cdm;
1999
2000 cdm = (struct ccb_dev_match *)arg;
2001
2002 /*
2003 * If there is a device list generation recorded, check it to
2004 * make sure the device list hasn't changed.
2005 */
2006 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2007 && (cdm->pos.cookie.bus == target->bus)
2008 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2009 && (cdm->pos.cookie.target == target)
2010 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2011 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2012 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2013 target->generation)) {
2014 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2015 return(0);
2016 }
2017
2018 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2019 && (cdm->pos.cookie.bus == target->bus)
2020 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2021 && (cdm->pos.cookie.target == target)
2022 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2023 && (cdm->pos.cookie.device != NULL))
2024 return(xptdevicetraverse(target,
2025 (struct cam_ed *)cdm->pos.cookie.device,
2026 xptedtdevicefunc, arg));
2027 else
2028 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2029}
2030
2031static int
2032xptedtdevicefunc(struct cam_ed *device, void *arg)
2033{
2034
2035 struct ccb_dev_match *cdm;
2036 dev_match_ret retval;
2037
2038 cdm = (struct ccb_dev_match *)arg;
2039
2040 /*
2041 * If our position is for something deeper in the tree, that means
2042 * that we've already seen this node. So, we keep going down.
2043 */
2044 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2045 && (cdm->pos.cookie.device == device)
2046 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2047 && (cdm->pos.cookie.periph != NULL))
2048 retval = DM_RET_DESCEND;
2049 else
2050 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2051 device);
2052
2053 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2054 cdm->status = CAM_DEV_MATCH_ERROR;
2055 return(0);
2056 }
2057
2058 /*
2059 * If the copy flag is set, copy this device out.
2060 */
2061 if (retval & DM_RET_COPY) {
2062 int spaceleft, j;
2063
2064 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2065 sizeof(struct dev_match_result));
2066
2067 /*
2068 * If we don't have enough space to put in another
2069 * match result, save our position and tell the
2070 * user there are more devices to check.
2071 */
2072 if (spaceleft < sizeof(struct dev_match_result)) {
2073 bzero(&cdm->pos, sizeof(cdm->pos));
2074 cdm->pos.position_type =
2075 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2076 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2077
2078 cdm->pos.cookie.bus = device->target->bus;
2079 cdm->pos.generations[CAM_BUS_GENERATION]=
2080 bus_generation;
2081 cdm->pos.cookie.target = device->target;
2082 cdm->pos.generations[CAM_TARGET_GENERATION] =
2083 device->target->bus->generation;
2084 cdm->pos.cookie.device = device;
2085 cdm->pos.generations[CAM_DEV_GENERATION] =
2086 device->target->generation;
2087 cdm->status = CAM_DEV_MATCH_MORE;
2088 return(0);
2089 }
2090 j = cdm->num_matches;
2091 cdm->num_matches++;
2092 cdm->matches[j].type = DEV_MATCH_DEVICE;
2093 cdm->matches[j].result.device_result.path_id =
2094 device->target->bus->path_id;
2095 cdm->matches[j].result.device_result.target_id =
2096 device->target->target_id;
2097 cdm->matches[j].result.device_result.target_lun =
2098 device->lun_id;
2099 bcopy(&device->inq_data,
2100 &cdm->matches[j].result.device_result.inq_data,
2101 sizeof(struct scsi_inquiry_data));
2102
2103 /* Let the user know whether this device is unconfigured */
2104 if (device->flags & CAM_DEV_UNCONFIGURED)
2105 cdm->matches[j].result.device_result.flags =
2106 DEV_RESULT_UNCONFIGURED;
2107 else
2108 cdm->matches[j].result.device_result.flags =
2109 DEV_RESULT_NOFLAG;
2110 }
2111
2112 /*
2113 * If the user isn't interested in peripherals, don't descend
2114 * the tree any further.
2115 */
2116 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2117 return(1);
2118
2119 /*
2120 * If there is a peripheral list generation recorded, make sure
2121 * it hasn't changed.
2122 */
2123 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2124 && (device->target->bus == cdm->pos.cookie.bus)
2125 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2126 && (device->target == cdm->pos.cookie.target)
2127 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2128 && (device == cdm->pos.cookie.device)
2129 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2130 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2131 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2132 device->generation)){
2133 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2134 return(0);
2135 }
2136
2137 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2138 && (cdm->pos.cookie.bus == device->target->bus)
2139 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2140 && (cdm->pos.cookie.target == device->target)
2141 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2142 && (cdm->pos.cookie.device == device)
2143 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2144 && (cdm->pos.cookie.periph != NULL))
2145 return(xptperiphtraverse(device,
2146 (struct cam_periph *)cdm->pos.cookie.periph,
2147 xptedtperiphfunc, arg));
2148 else
2149 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2150}
2151
2152static int
2153xptedtperiphfunc(struct cam_periph *periph, void *arg)
2154{
2155 struct ccb_dev_match *cdm;
2156 dev_match_ret retval;
2157
2158 cdm = (struct ccb_dev_match *)arg;
2159
2160 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2161
2162 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2163 cdm->status = CAM_DEV_MATCH_ERROR;
2164 return(0);
2165 }
2166
2167 /*
2168 * If the copy flag is set, copy this peripheral out.
2169 */
2170 if (retval & DM_RET_COPY) {
2171 int spaceleft, j;
2172
2173 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2174 sizeof(struct dev_match_result));
2175
2176 /*
2177 * If we don't have enough space to put in another
2178 * match result, save our position and tell the
2179 * user there are more devices to check.
2180 */
2181 if (spaceleft < sizeof(struct dev_match_result)) {
2182 bzero(&cdm->pos, sizeof(cdm->pos));
2183 cdm->pos.position_type =
2184 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2185 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2186 CAM_DEV_POS_PERIPH;
2187
2188 cdm->pos.cookie.bus = periph->path->bus;
2189 cdm->pos.generations[CAM_BUS_GENERATION]=
2190 bus_generation;
2191 cdm->pos.cookie.target = periph->path->target;
2192 cdm->pos.generations[CAM_TARGET_GENERATION] =
2193 periph->path->bus->generation;
2194 cdm->pos.cookie.device = periph->path->device;
2195 cdm->pos.generations[CAM_DEV_GENERATION] =
2196 periph->path->target->generation;
2197 cdm->pos.cookie.periph = periph;
2198 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2199 periph->path->device->generation;
2200 cdm->status = CAM_DEV_MATCH_MORE;
2201 return(0);
2202 }
2203
2204 j = cdm->num_matches;
2205 cdm->num_matches++;
2206 cdm->matches[j].type = DEV_MATCH_PERIPH;
2207 cdm->matches[j].result.periph_result.path_id =
2208 periph->path->bus->path_id;
2209 cdm->matches[j].result.periph_result.target_id =
2210 periph->path->target->target_id;
2211 cdm->matches[j].result.periph_result.target_lun =
2212 periph->path->device->lun_id;
2213 cdm->matches[j].result.periph_result.unit_number =
2214 periph->unit_number;
2215 strncpy(cdm->matches[j].result.periph_result.periph_name,
2216 periph->periph_name, DEV_IDLEN);
2217 }
2218
2219 return(1);
2220}
2221
2222static int
2223xptedtmatch(struct ccb_dev_match *cdm)
2224{
2225 int ret;
2226
2227 cdm->num_matches = 0;
2228
2229 /*
2230 * Check the bus list generation. If it has changed, the user
2231 * needs to reset everything and start over.
2232 */
2233 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2234 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2235 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2236 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2237 return(0);
2238 }
2239
2240 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2241 && (cdm->pos.cookie.bus != NULL))
2242 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2243 xptedtbusfunc, cdm);
2244 else
2245 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2246
2247 /*
2248 * If we get back 0, that means that we had to stop before fully
2249 * traversing the EDT. It also means that one of the subroutines
2250 * has set the status field to the proper value. If we get back 1,
2251 * we've fully traversed the EDT and copied out any matching entries.
2252 */
2253 if (ret == 1)
2254 cdm->status = CAM_DEV_MATCH_LAST;
2255
2256 return(ret);
2257}
2258
2259static int
2260xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2261{
2262 struct ccb_dev_match *cdm;
2263
2264 cdm = (struct ccb_dev_match *)arg;
2265
2266 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2267 && (cdm->pos.cookie.pdrv == pdrv)
2268 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2269 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2270 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2271 (*pdrv)->generation)) {
2272 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2273 return(0);
2274 }
2275
2276 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2277 && (cdm->pos.cookie.pdrv == pdrv)
2278 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2279 && (cdm->pos.cookie.periph != NULL))
2280 return(xptpdperiphtraverse(pdrv,
2281 (struct cam_periph *)cdm->pos.cookie.periph,
2282 xptplistperiphfunc, arg));
2283 else
2284 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2285}
2286
2287static int
2288xptplistperiphfunc(struct cam_periph *periph, void *arg)
2289{
2290 struct ccb_dev_match *cdm;
2291 dev_match_ret retval;
2292
2293 cdm = (struct ccb_dev_match *)arg;
2294
2295 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2296
2297 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2298 cdm->status = CAM_DEV_MATCH_ERROR;
2299 return(0);
2300 }
2301
2302 /*
2303 * If the copy flag is set, copy this peripheral out.
2304 */
2305 if (retval & DM_RET_COPY) {
2306 int spaceleft, j;
2307
2308 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2309 sizeof(struct dev_match_result));
2310
2311 /*
2312 * If we don't have enough space to put in another
2313 * match result, save our position and tell the
2314 * user there are more devices to check.
2315 */
2316 if (spaceleft < sizeof(struct dev_match_result)) {
2317 struct periph_driver **pdrv;
2318
2319 pdrv = NULL;
2320 bzero(&cdm->pos, sizeof(cdm->pos));
2321 cdm->pos.position_type =
2322 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2323 CAM_DEV_POS_PERIPH;
2324
2325 /*
2326 * This may look a bit non-sensical, but it is
2327 * actually quite logical. There are very few
2328 * peripheral drivers, and bloating every peripheral
2329 * structure with a pointer back to its parent
2330 * peripheral driver linker set entry would cost
2331 * more in the long run than doing this quick lookup.
2332 */
dc62b251 2333 SET_FOREACH(pdrv, periphdriver_set) {
984263bc
MD
2334 if (strcmp((*pdrv)->driver_name,
2335 periph->periph_name) == 0)
2336 break;
2337 }
2338
2339 if (pdrv == NULL) {
2340 cdm->status = CAM_DEV_MATCH_ERROR;
2341 return(0);
2342 }
2343
2344 cdm->pos.cookie.pdrv = pdrv;
2345 /*
2346 * The periph generation slot does double duty, as
2347 * does the periph pointer slot. They are used for
2348 * both edt and pdrv lookups and positioning.
2349 */
2350 cdm->pos.cookie.periph = periph;
2351 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2352 (*pdrv)->generation;
2353 cdm->status = CAM_DEV_MATCH_MORE;
2354 return(0);
2355 }
2356
2357 j = cdm->num_matches;
2358 cdm->num_matches++;
2359 cdm->matches[j].type = DEV_MATCH_PERIPH;
2360 cdm->matches[j].result.periph_result.path_id =
2361 periph->path->bus->path_id;
2362
2363 /*
2364 * The transport layer peripheral doesn't have a target or
2365 * lun.
2366 */
2367 if (periph->path->target)
2368 cdm->matches[j].result.periph_result.target_id =
2369 periph->path->target->target_id;
2370 else
2371 cdm->matches[j].result.periph_result.target_id = -1;
2372
2373 if (periph->path->device)
2374 cdm->matches[j].result.periph_result.target_lun =
2375 periph->path->device->lun_id;
2376 else
2377 cdm->matches[j].result.periph_result.target_lun = -1;
2378
2379 cdm->matches[j].result.periph_result.unit_number =
2380 periph->unit_number;
2381 strncpy(cdm->matches[j].result.periph_result.periph_name,
2382 periph->periph_name, DEV_IDLEN);
2383 }
2384
2385 return(1);
2386}
2387
2388static int
2389xptperiphlistmatch(struct ccb_dev_match *cdm)
2390{
2391 int ret;
2392
2393 cdm->num_matches = 0;
2394
2395 /*
2396 * At this point in the edt traversal function, we check the bus
2397 * list generation to make sure that no busses have been added or
2398 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2399 * For the peripheral driver list traversal function, however, we
2400 * don't have to worry about new peripheral driver types coming or
2401 * going; they're in a linker set, and therefore can't change
2402 * without a recompile.
2403 */
2404
2405 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2406 && (cdm->pos.cookie.pdrv != NULL))
2407 ret = xptpdrvtraverse(
2408 (struct periph_driver **)cdm->pos.cookie.pdrv,
2409 xptplistpdrvfunc, cdm);
2410 else
2411 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2412
2413 /*
2414 * If we get back 0, that means that we had to stop before fully
2415 * traversing the peripheral driver tree. It also means that one of
2416 * the subroutines has set the status field to the proper value. If
2417 * we get back 1, we've fully traversed the EDT and copied out any
2418 * matching entries.
2419 */
2420 if (ret == 1)
2421 cdm->status = CAM_DEV_MATCH_LAST;
2422
2423 return(ret);
2424}
2425
2426static int
2427xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2428{
2429 struct cam_eb *bus, *next_bus;
2430 int retval;
2431
2432 retval = 1;
2433
2434 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2435 bus != NULL;
2436 bus = next_bus) {
2437 next_bus = TAILQ_NEXT(bus, links);
2438
2439 retval = tr_func(bus, arg);
2440 if (retval == 0)
2441 return(retval);
2442 }
2443
2444 return(retval);
2445}
2446
2447static int
2448xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2449 xpt_targetfunc_t *tr_func, void *arg)
2450{
2451 struct cam_et *target, *next_target;
2452 int retval;
2453
2454 retval = 1;
2455 for (target = (start_target ? start_target :
2456 TAILQ_FIRST(&bus->et_entries));
2457 target != NULL; target = next_target) {
2458
2459 next_target = TAILQ_NEXT(target, links);
2460
2461 retval = tr_func(target, arg);
2462
2463 if (retval == 0)
2464 return(retval);
2465 }
2466
2467 return(retval);
2468}
2469
2470static int
2471xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2472 xpt_devicefunc_t *tr_func, void *arg)
2473{
2474 struct cam_ed *device, *next_device;
2475 int retval;
2476
2477 retval = 1;
2478 for (device = (start_device ? start_device :
2479 TAILQ_FIRST(&target->ed_entries));
2480 device != NULL;
2481 device = next_device) {
2482
2483 next_device = TAILQ_NEXT(device, links);
2484
2485 retval = tr_func(device, arg);
2486
2487 if (retval == 0)
2488 return(retval);
2489 }
2490
2491 return(retval);
2492}
2493
2494static int
2495xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2496 xpt_periphfunc_t *tr_func, void *arg)
2497{
2498 struct cam_periph *periph, *next_periph;
2499 int retval;
2500
2501 retval = 1;
2502
2503 for (periph = (start_periph ? start_periph :
2504 SLIST_FIRST(&device->periphs));
2505 periph != NULL;
2506 periph = next_periph) {
2507
2508 next_periph = SLIST_NEXT(periph, periph_links);
2509
2510 retval = tr_func(periph, arg);
2511 if (retval == 0)
2512 return(retval);
2513 }
2514
2515 return(retval);
2516}
2517
2518static int
2519xptpdrvtraverse(struct periph_driver **start_pdrv,
2520 xpt_pdrvfunc_t *tr_func, void *arg)
2521{
2522 struct periph_driver **pdrv;
2523 int retval;
2524
2525 retval = 1;
2526
2527 /*
2528 * We don't traverse the peripheral driver list like we do the
2529 * other lists, because it is a linker set, and therefore cannot be
2530 * changed during runtime. If the peripheral driver list is ever
2531 * re-done to be something other than a linker set (i.e. it can
2532 * change while the system is running), the list traversal should
2533 * be modified to work like the other traversal functions.
2534 */
98593f25
MD
2535 SET_FOREACH(pdrv, periphdriver_set) {
2536 if (start_pdrv == NULL || start_pdrv == pdrv) {
2537 retval = tr_func(pdrv, arg);
dc62b251
MD
2538 if (retval == 0)
2539 return(retval);
98593f25 2540 start_pdrv = NULL; /* traverse remainder */
dc62b251 2541 }
984263bc 2542 }
984263bc
MD
2543 return(retval);
2544}
2545
2546static int
2547xptpdperiphtraverse(struct periph_driver **pdrv,
2548 struct cam_periph *start_periph,
2549 xpt_periphfunc_t *tr_func, void *arg)
2550{
2551 struct cam_periph *periph, *next_periph;
2552 int retval;
2553
2554 retval = 1;
2555
2556 for (periph = (start_periph ? start_periph :
2557 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2558 periph = next_periph) {
2559
2560 next_periph = TAILQ_NEXT(periph, unit_links);
2561
2562 retval = tr_func(periph, arg);
2563 if (retval == 0)
2564 return(retval);
2565 }
2566 return(retval);
2567}
2568
2569static int
2570xptdefbusfunc(struct cam_eb *bus, void *arg)
2571{
2572 struct xpt_traverse_config *tr_config;
2573
2574 tr_config = (struct xpt_traverse_config *)arg;
2575
2576 if (tr_config->depth == XPT_DEPTH_BUS) {
2577 xpt_busfunc_t *tr_func;
2578
2579 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2580
2581 return(tr_func(bus, tr_config->tr_arg));
2582 } else
2583 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2584}
2585
2586static int
2587xptdeftargetfunc(struct cam_et *target, void *arg)
2588{
2589 struct xpt_traverse_config *tr_config;
2590
2591 tr_config = (struct xpt_traverse_config *)arg;
2592
2593 if (tr_config->depth == XPT_DEPTH_TARGET) {
2594 xpt_targetfunc_t *tr_func;
2595
2596 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2597
2598 return(tr_func(target, tr_config->tr_arg));
2599 } else
2600 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2601}
2602
2603static int
2604xptdefdevicefunc(struct cam_ed *device, void *arg)
2605{
2606 struct xpt_traverse_config *tr_config;
2607
2608 tr_config = (struct xpt_traverse_config *)arg;
2609
2610 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2611 xpt_devicefunc_t *tr_func;
2612
2613 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2614
2615 return(tr_func(device, tr_config->tr_arg));
2616 } else
2617 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2618}
2619
2620static int
2621xptdefperiphfunc(struct cam_periph *periph, void *arg)
2622{
2623 struct xpt_traverse_config *tr_config;
2624 xpt_periphfunc_t *tr_func;
2625
2626 tr_config = (struct xpt_traverse_config *)arg;
2627
2628 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2629
2630 /*
2631 * Unlike the other default functions, we don't check for depth
2632 * here. The peripheral driver level is the last level in the EDT,
2633 * so if we're here, we should execute the function in question.
2634 */
2635 return(tr_func(periph, tr_config->tr_arg));
2636}
2637
2638/*
2639 * Execute the given function for every bus in the EDT.
2640 */
2641static int
2642xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2643{
2644 struct xpt_traverse_config tr_config;
2645
2646 tr_config.depth = XPT_DEPTH_BUS;
2647 tr_config.tr_func = tr_func;
2648 tr_config.tr_arg = arg;
2649
2650 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2651}
2652
2653#ifdef notusedyet
2654/*
2655 * Execute the given function for every target in the EDT.
2656 */
2657static int
2658xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2659{
2660 struct xpt_traverse_config tr_config;
2661
2662 tr_config.depth = XPT_DEPTH_TARGET;
2663 tr_config.tr_func = tr_func;
2664 tr_config.tr_arg = arg;
2665
2666 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2667}
2668#endif /* notusedyet */
2669
2670/*
2671 * Execute the given function for every device in the EDT.
2672 */
2673static int
2674xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2675{
2676 struct xpt_traverse_config tr_config;
2677
2678 tr_config.depth = XPT_DEPTH_DEVICE;
2679 tr_config.tr_func = tr_func;
2680 tr_config.tr_arg = arg;
2681
2682 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2683}
2684
2685#ifdef notusedyet
2686/*
2687 * Execute the given function for every peripheral in the EDT.
2688 */
2689static int
2690xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2691{
2692 struct xpt_traverse_config tr_config;
2693
2694 tr_config.depth = XPT_DEPTH_PERIPH;
2695 tr_config.tr_func = tr_func;
2696 tr_config.tr_arg = arg;
2697
2698 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2699}
2700#endif /* notusedyet */
2701
2702static int
2703xptsetasyncfunc(struct cam_ed *device, void *arg)
2704{
2705 struct cam_path path;
2706 struct ccb_getdev cgd;
2707 struct async_node *cur_entry;
2708
2709 cur_entry = (struct async_node *)arg;
2710
2711 /*
2712 * Don't report unconfigured devices (Wildcard devs,
2713 * devices only for target mode, device instances
2714 * that have been invalidated but are waiting for
2715 * their last reference count to be released).
2716 */
2717 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2718 return (1);
2719
2720 xpt_compile_path(&path,
2721 NULL,
2722 device->target->bus->path_id,
2723 device->target->target_id,
2724 device->lun_id);
2725 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2726 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2727 xpt_action((union ccb *)&cgd);
2728 cur_entry->callback(cur_entry->callback_arg,
2729 AC_FOUND_DEVICE,
2730 &path, &cgd);
2731 xpt_release_path(&path);
2732
2733 return(1);
2734}
2735
2736static int
2737xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2738{
2739 struct cam_path path;
2740 struct ccb_pathinq cpi;
2741 struct async_node *cur_entry;
2742
2743 cur_entry = (struct async_node *)arg;
2744
2745 xpt_compile_path(&path, /*periph*/NULL,
2746 bus->sim->path_id,
2747 CAM_TARGET_WILDCARD,
2748 CAM_LUN_WILDCARD);
2749 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2750 cpi.ccb_h.func_code = XPT_PATH_INQ;
2751 xpt_action((union ccb *)&cpi);
2752 cur_entry->callback(cur_entry->callback_arg,
2753 AC_PATH_REGISTERED,
2754 &path, &cpi);
2755 xpt_release_path(&path);
2756
2757 return(1);
2758}
2759
2760void
2761xpt_action(union ccb *start_ccb)
2762{
2763 int iopl;
2764
2765 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2766
2767 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2768
2769 iopl = splsoftcam();
2770 switch (start_ccb->ccb_h.func_code) {
2771 case XPT_SCSI_IO:
2772 {
2773#ifdef CAMDEBUG
2774 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2775 struct cam_path *path;
2776
2777 path = start_ccb->ccb_h.path;
2778#endif
2779
2780 /*
2781 * For the sake of compatibility with SCSI-1
2782 * devices that may not understand the identify
2783 * message, we include lun information in the
2784 * second byte of all commands. SCSI-1 specifies
2785 * that luns are a 3 bit value and reserves only 3
2786 * bits for lun information in the CDB. Later
2787 * revisions of the SCSI spec allow for more than 8
2788 * luns, but have deprecated lun information in the
2789 * CDB. So, if the lun won't fit, we must omit.
2790 *
2791 * Also be aware that during initial probing for devices,
2792 * the inquiry information is unknown but initialized to 0.
2793 * This means that this code will be exercised while probing
2794 * devices with an ANSI revision greater than 2.
2795 */
2796 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2797 && start_ccb->ccb_h.target_lun < 8
2798 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2799
2800 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2801 start_ccb->ccb_h.target_lun << 5;
2802 }
2803 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2804 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2805 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2806 &path->device->inq_data),
2807 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2808 cdb_str, sizeof(cdb_str))));
2809 /* FALLTHROUGH */
2810 }
2811 case XPT_TARGET_IO:
2812 case XPT_CONT_TARGET_IO:
2813 start_ccb->csio.sense_resid = 0;
2814 start_ccb->csio.resid = 0;
2815 /* FALLTHROUGH */
2816 case XPT_RESET_DEV:
2817 case XPT_ENG_EXEC:
2818 {
2819 struct cam_path *path;
2820 int s;
2821 int runq;
2822
2823 path = start_ccb->ccb_h.path;
2824 s = splsoftcam();
2825
2826 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2827 if (path->device->qfrozen_cnt == 0)
2828 runq = xpt_schedule_dev_sendq(path->bus, path->device);
2829 else
2830 runq = 0;
2831 splx(s);
2832 if (runq != 0)
2833 xpt_run_dev_sendq(path->bus);
2834 break;
2835 }
2836 case XPT_SET_TRAN_SETTINGS:
2837 {
2838 xpt_set_transfer_settings(&start_ccb->cts,
2839 start_ccb->ccb_h.path->device,
2840 /*async_update*/FALSE);
2841 break;
2842 }
2843 case XPT_CALC_GEOMETRY:
2844 {
2845 struct cam_sim *sim;
2846
2847 /* Filter out garbage */
2848 if (start_ccb->ccg.block_size == 0
2849 || start_ccb->ccg.volume_size == 0) {
2850 start_ccb->ccg.cylinders = 0;
2851 start_ccb->ccg.heads = 0;
2852 start_ccb->ccg.secs_per_track = 0;
2853 start_ccb->ccb_h.status = CAM_REQ_CMP;
2854 break;
2855 }
2856#ifdef PC98
2857 /*
2858 * In a PC-98 system, geometry translation depens on
2859 * the "real" device geometry obtained from mode page 4.
2860 * SCSI geometry translation is performed in the
2861 * initialization routine of the SCSI BIOS and the result
2862 * stored in host memory. If the translation is available
2863 * in host memory, use it. If not, rely on the default
2864 * translation the device driver performs.
2865 */
2866 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2867 start_ccb->ccb_h.status = CAM_REQ_CMP;
2868 break;
2869 }
2870#endif
2871 sim = start_ccb->ccb_h.path->bus->sim;
2872 (*(sim->sim_action))(sim, start_ccb);
2873 break;
2874 }
2875 case XPT_ABORT:
2876 {
2877 union ccb* abort_ccb;
2878 int s;
2879
2880 abort_ccb = start_ccb->cab.abort_ccb;
2881 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2882
2883 if (abort_ccb->ccb_h.pinfo.index >= 0) {
2884 struct cam_ccbq *ccbq;
2885
2886 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2887 cam_ccbq_remove_ccb(ccbq, abort_ccb);
2888 abort_ccb->ccb_h.status =
2889 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2890 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2891 s = splcam();
2892 xpt_done(abort_ccb);
2893 splx(s);
2894 start_ccb->ccb_h.status = CAM_REQ_CMP;
2895 break;
2896 }
2897 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2898 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2899 /*
2900 * We've caught this ccb en route to
2901 * the SIM. Flag it for abort and the
2902 * SIM will do so just before starting
2903 * real work on the CCB.
2904 */
2905 abort_ccb->ccb_h.status =
2906 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2907 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2908 start_ccb->ccb_h.status = CAM_REQ_CMP;
2909 break;
2910 }
2911 }
2912 if (XPT_FC_IS_QUEUED(abort_ccb)
2913 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2914 /*
2915 * It's already completed but waiting
2916 * for our SWI to get to it.
2917 */
2918 start_ccb->ccb_h.status = CAM_UA_ABORT;
2919 break;
2920 }
2921 /*
2922 * If we weren't able to take care of the abort request
2923 * in the XPT, pass the request down to the SIM for processing.
2924 */
2925 /* FALLTHROUGH */
2926 }
2927 case XPT_ACCEPT_TARGET_IO:
2928 case XPT_EN_LUN:
2929 case XPT_IMMED_NOTIFY:
2930 case XPT_NOTIFY_ACK:
2931 case XPT_GET_TRAN_SETTINGS:
2932 case XPT_RESET_BUS:
2933 {
2934 struct cam_sim *sim;
2935
2936 sim = start_ccb->ccb_h.path->bus->sim;
2937 (*(sim->sim_action))(sim, start_ccb);
2938 break;
2939 }
2940 case XPT_PATH_INQ:
2941 {
2942 struct cam_sim *sim;
2943
2944 sim = start_ccb->ccb_h.path->bus->sim;
2945 (*(sim->sim_action))(sim, start_ccb);
2946 break;
2947 }
2948 case XPT_PATH_STATS:
2949 start_ccb->cpis.last_reset =
2950 start_ccb->ccb_h.path->bus->last_reset;
2951 start_ccb->ccb_h.status = CAM_REQ_CMP;
2952 break;
2953 case XPT_GDEV_TYPE:
2954 {
2955 struct cam_ed *dev;
2956 int s;
2957
2958 dev = start_ccb->ccb_h.path->device;
2959 s = splcam();
2960 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2961 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2962 } else {
2963 struct ccb_getdev *cgd;
2964 struct cam_eb *bus;
2965 struct cam_et *tar;
2966
2967 cgd = &start_ccb->cgd;
2968 bus = cgd->ccb_h.path->bus;
2969 tar = cgd->ccb_h.path->target;
2970 cgd->inq_data = dev->inq_data;
2971 cgd->ccb_h.status = CAM_REQ_CMP;
2972 cgd->serial_num_len = dev->serial_num_len;
2973 if ((dev->serial_num_len > 0)
2974 && (dev->serial_num != NULL))
2975 bcopy(dev->serial_num, cgd->serial_num,
2976 dev->serial_num_len);
2977 }
2978 splx(s);
2979 break;
2980 }
2981 case XPT_GDEV_STATS:
2982 {
2983 struct cam_ed *dev;
2984 int s;
2985
2986 dev = start_ccb->ccb_h.path->device;
2987 s = splcam();
2988 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2989 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2990 } else {
2991 struct ccb_getdevstats *cgds;
2992 struct cam_eb *bus;
2993 struct cam_et *tar;
2994
2995 cgds = &start_ccb->cgds;
2996 bus = cgds->ccb_h.path->bus;
2997 tar = cgds->ccb_h.path->target;
2998 cgds->dev_openings = dev->ccbq.dev_openings;
2999 cgds->dev_active = dev->ccbq.dev_active;
3000 cgds->devq_openings = dev->ccbq.devq_openings;
3001 cgds->devq_queued = dev->ccbq.queue.entries;
3002 cgds->held = dev->ccbq.held;
3003 cgds->last_reset = tar->last_reset;
3004 cgds->maxtags = dev->quirk->maxtags;
3005 cgds->mintags = dev->quirk->mintags;
3006 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3007 cgds->last_reset = bus->last_reset;
3008 cgds->ccb_h.status = CAM_REQ_CMP;
3009 }
3010 splx(s);
3011 break;
3012 }
3013 case XPT_GDEVLIST:
3014 {
3015 struct cam_periph *nperiph;
3016 struct periph_list *periph_head;
3017 struct ccb_getdevlist *cgdl;
3018 int i;
3019 int s;
3020 struct cam_ed *device;
3021 int found;
3022
3023
3024 found = 0;
3025
3026 /*
3027 * Don't want anyone mucking with our data.
3028 */
3029 s = splcam();
3030 device = start_ccb->ccb_h.path->device;
3031 periph_head = &device->periphs;
3032 cgdl = &start_ccb->cgdl;
3033
3034 /*
3035 * Check and see if the list has changed since the user
3036 * last requested a list member. If so, tell them that the
3037 * list has changed, and therefore they need to start over
3038 * from the beginning.
3039 */
3040 if ((cgdl->index != 0) &&
3041 (cgdl->generation != device->generation)) {
3042 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3043 splx(s);
3044 break;
3045 }
3046
3047 /*
3048 * Traverse the list of peripherals and attempt to find
3049 * the requested peripheral.
3050 */
3051 for (nperiph = periph_head->slh_first, i = 0;
3052 (nperiph != NULL) && (i <= cgdl->index);
3053 nperiph = nperiph->periph_links.sle_next, i++) {
3054 if (i == cgdl->index) {
3055 strncpy(cgdl->periph_name,
3056 nperiph->periph_name,
3057 DEV_IDLEN);
3058 cgdl->unit_number = nperiph->unit_number;
3059 found = 1;
3060 }
3061 }
3062 if (found == 0) {
3063 cgdl->status = CAM_GDEVLIST_ERROR;
3064 splx(s);
3065 break;
3066 }
3067
3068 if (nperiph == NULL)
3069 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3070 else
3071 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3072
3073 cgdl->index++;
3074 cgdl->generation = device->generation;
3075
3076 splx(s);
3077 cgdl->ccb_h.status = CAM_REQ_CMP;
3078 break;
3079 }
3080 case XPT_DEV_MATCH:
3081 {
3082 int s;
3083 dev_pos_type position_type;
3084 struct ccb_dev_match *cdm;
3085 int ret;
3086
3087 cdm = &start_ccb->cdm;
3088
3089 /*
3090 * Prevent EDT changes while we traverse it.
3091 */
3092 s = splcam();
3093 /*
3094 * There are two ways of getting at information in the EDT.
3095 * The first way is via the primary EDT tree. It starts
3096 * with a list of busses, then a list of targets on a bus,
3097 * then devices/luns on a target, and then peripherals on a
3098 * device/lun. The "other" way is by the peripheral driver
3099 * lists. The peripheral driver lists are organized by
3100 * peripheral driver. (obviously) So it makes sense to
3101 * use the peripheral driver list if the user is looking
3102 * for something like "da1", or all "da" devices. If the
3103 * user is looking for something on a particular bus/target
3104 * or lun, it's generally better to go through the EDT tree.
3105 */
3106
3107 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3108 position_type = cdm->pos.position_type;
3109 else {
3110 int i;
3111
3112 position_type = CAM_DEV_POS_NONE;
3113
3114 for (i = 0; i < cdm->num_patterns; i++) {
3115 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3116 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3117 position_type = CAM_DEV_POS_EDT;
3118 break;
3119 }
3120 }
3121
3122 if (cdm->num_patterns == 0)
3123 position_type = CAM_DEV_POS_EDT;
3124 else if (position_type == CAM_DEV_POS_NONE)
3125 position_type = CAM_DEV_POS_PDRV;
3126 }
3127
3128 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3129 case CAM_DEV_POS_EDT:
3130 ret = xptedtmatch(cdm);
3131 break;
3132 case CAM_DEV_POS_PDRV:
3133 ret = xptperiphlistmatch(cdm);
3134 break;
3135 default:
3136 cdm->status = CAM_DEV_MATCH_ERROR;
3137 break;
3138 }
3139
3140 splx(s);
3141
3142 if (cdm->status == CAM_DEV_MATCH_ERROR)
3143 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3144 else
3145 start_ccb->ccb_h.status = CAM_REQ_CMP;
3146
3147 break;
3148 }
3149 case XPT_SASYNC_CB:
3150 {
3151 struct ccb_setasync *csa;
3152 struct async_node *cur_entry;
3153 struct async_list *async_head;
3154 u_int32_t added;
3155 int s;
3156
3157 csa = &start_ccb->csa;
3158 added = csa->event_enable;
3159 async_head = &csa->ccb_h.path->device->asyncs;
3160
3161 /*
3162 * If there is already an entry for us, simply
3163 * update it.
3164 */
3165 s = splcam();
3166 cur_entry = SLIST_FIRST(async_head);
3167 while (cur_entry != NULL) {
3168 if ((cur_entry->callback_arg == csa->callback_arg)
3169 && (cur_entry->callback == csa->callback))
3170 break;
3171 cur_entry = SLIST_NEXT(cur_entry, links);
3172 }
3173
3174 if (cur_entry != NULL) {
3175 /*
3176 * If the request has no flags set,
3177 * remove the entry.
3178 */
3179 added &= ~cur_entry->event_enable;
3180 if (csa->event_enable == 0) {
3181 SLIST_REMOVE(async_head, cur_entry,
3182 async_node, links);
3183 csa->ccb_h.path->device->refcount--;
3184 free(cur_entry, M_DEVBUF);
3185 } else {
3186 cur_entry->event_enable = csa->event_enable;
3187 }
3188 } else {
898d961b
MD
3189 cur_entry = malloc(sizeof(*cur_entry),
3190 M_DEVBUF, M_INTWAIT);
984263bc
MD
3191 cur_entry->event_enable = csa->event_enable;
3192 cur_entry->callback_arg = csa->callback_arg;
3193 cur_entry->callback = csa->callback;
3194 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3195 csa->ccb_h.path->device->refcount++;
3196 }
3197
3198 if ((added & AC_FOUND_DEVICE) != 0) {
3199 /*
3200 * Get this peripheral up to date with all
3201 * the currently existing devices.
3202 */
3203 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3204 }
3205 if ((added & AC_PATH_REGISTERED) != 0) {
3206 /*
3207 * Get this peripheral up to date with all
3208 * the currently existing busses.
3209 */
3210 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3211 }
3212 splx(s);
3213 start_ccb->ccb_h.status = CAM_REQ_CMP;
3214 break;
3215 }
3216 case XPT_REL_SIMQ:
3217 {
3218 struct ccb_relsim *crs;
3219 struct cam_ed *dev;
3220 int s;
3221
3222 crs = &start_ccb->crs;
3223 dev = crs->ccb_h.path->device;
3224 if (dev == NULL) {
3225
3226 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3227 break;
3228 }
3229
3230 s = splcam();
3231
3232 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3233
3234 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3235
3236 /* Don't ever go below one opening */
3237 if (crs->openings > 0) {
3238 xpt_dev_ccbq_resize(crs->ccb_h.path,
3239 crs->openings);
3240
3241 if (bootverbose) {
3242 xpt_print_path(crs->ccb_h.path);
3243 printf("tagged openings "
3244 "now %d\n",
3245 crs->openings);
3246 }
3247 }
3248 }
3249 }
3250
3251 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3252
3253 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3254
3255 /*
3256 * Just extend the old timeout and decrement
3257 * the freeze count so that a single timeout
3258 * is sufficient for releasing the queue.
3259 */
3260 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3261 untimeout(xpt_release_devq_timeout,
3262 dev, dev->c_handle);
3263 } else {
3264
3265 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3266 }
3267
3268 dev->c_handle =
3269 timeout(xpt_release_devq_timeout,
3270 dev,
3271 (crs->release_timeout * hz) / 1000);
3272
3273 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3274
3275 }
3276
3277 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3278
3279 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3280 /*
3281 * Decrement the freeze count so that a single
3282 * completion is still sufficient to unfreeze
3283 * the queue.
3284 */
3285 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3286 } else {
3287
3288 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3289 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3290 }
3291 }
3292
3293 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3294
3295 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3296 || (dev->ccbq.dev_active == 0)) {
3297
3298 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3299 } else {
3300
3301 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3302 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3303 }
3304 }
3305 splx(s);
3306
3307 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3308
3309 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3310 /*run_queue*/TRUE);
3311 }
3312 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3313 start_ccb->ccb_h.status = CAM_REQ_CMP;
3314 break;
3315 }
3316 case XPT_SCAN_BUS:
3317 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3318 break;
3319 case XPT_SCAN_LUN:
3320 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3321 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3322 start_ccb);
3323 break;
3324 case XPT_DEBUG: {
3325#ifdef CAMDEBUG
3326 int s;
3327
3328 s = splcam();
3329#ifdef CAM_DEBUG_DELAY
3330 cam_debug_delay = CAM_DEBUG_DELAY;
3331#endif
3332 cam_dflags = start_ccb->cdbg.flags;
3333 if (cam_dpath != NULL) {
3334 xpt_free_path(cam_dpath);
3335 cam_dpath = NULL;
3336 }
3337
3338 if (cam_dflags != CAM_DEBUG_NONE) {
3339 if (xpt_create_path(&cam_dpath, xpt_periph,
3340 start_ccb->ccb_h.path_id,
3341 start_ccb->ccb_h.target_id,
3342 start_ccb->ccb_h.target_lun) !=
3343 CAM_REQ_CMP) {
3344 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3345 cam_dflags = CAM_DEBUG_NONE;
3346 } else {
3347 start_ccb->ccb_h.status = CAM_REQ_CMP;
3348 xpt_print_path(cam_dpath);
3349 printf("debugging flags now %x\n", cam_dflags);
3350 }
3351 } else {
3352 cam_dpath = NULL;
3353 start_ccb->ccb_h.status = CAM_REQ_CMP;
3354 }
3355 splx(s);
3356#else /* !CAMDEBUG */
3357 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3358#endif /* CAMDEBUG */
3359 break;
3360 }
3361 case XPT_NOOP:
3362 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3363 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3364 start_ccb->ccb_h.status = CAM_REQ_CMP;
3365 break;
3366 default:
3367 case XPT_SDEV_TYPE:
3368 case XPT_TERM_IO:
3369 case XPT_ENG_INQ:
3370 /* XXX Implement */
3371 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3372 break;
3373 }
3374 splx(iopl);
3375}
3376
3377void
3378xpt_polled_action(union ccb *start_ccb)
3379{
3380 int s;
3381 u_int32_t timeout;
3382 struct cam_sim *sim;
3383 struct cam_devq *devq;
3384 struct cam_ed *dev;
3385
3386 timeout = start_ccb->ccb_h.timeout;
3387 sim = start_ccb->ccb_h.path->bus->sim;
3388 devq = sim->devq;
3389 dev = start_ccb->ccb_h.path->device;
3390
3391 s = splcam();
3392
3393 /*
3394 * Steal an opening so that no other queued requests
3395 * can get it before us while we simulate interrupts.
3396 */
3397 dev->ccbq.devq_openings--;
3398 dev->ccbq.dev_openings--;
3399
3400 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3401 && (--timeout > 0)) {
3402 DELAY(1000);
3403 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3404 swi_camnet(NULL);
3405 swi_cambio(NULL);
984263bc
MD
3406 }
3407
3408 dev->ccbq.devq_openings++;
3409 dev->ccbq.dev_openings++;
3410
3411 if (timeout != 0) {
3412 xpt_action(start_ccb);
3413 while(--timeout > 0) {
3414 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3415 swi_camnet(NULL);
3416 swi_cambio(NULL);
984263bc
MD
3417 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3418 != CAM_REQ_INPROG)
3419 break;
3420 DELAY(1000);
3421 }
3422 if (timeout == 0) {
3423 /*
3424 * XXX Is it worth adding a sim_timeout entry
3425 * point so we can attempt recovery? If
3426 * this is only used for dumps, I don't think
3427 * it is.
3428 */
3429 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3430 }
3431 } else {
3432 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3433 }
3434 splx(s);
3435}
3436
3437/*
3438 * Schedule a peripheral driver to receive a ccb when it's
3439 * target device has space for more transactions.
3440 */
3441void
3442xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3443{
3444 struct cam_ed *device;
3445 int s;
3446 int runq;
3447
3448 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3449 device = perph->path->device;
3450 s = splsoftcam();
3451 if (periph_is_queued(perph)) {
3452 /* Simply reorder based on new priority */
3453 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3454 (" change priority to %d\n", new_priority));
3455 if (new_priority < perph->pinfo.priority) {
3456 camq_change_priority(&device->drvq,
3457 perph->pinfo.index,
3458 new_priority);
3459 }
3460 runq = 0;
3461 } else {
3462 /* New entry on the queue */
3463 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3464 (" added periph to queue\n"));
3465 perph->pinfo.priority = new_priority;
3466 perph->pinfo.generation = ++device->drvq.generation;
3467 camq_insert(&device->drvq, &perph->pinfo);
3468 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3469 }
3470 splx(s);
3471 if (runq != 0) {
3472 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3473 (" calling xpt_run_devq\n"));
3474 xpt_run_dev_allocq(perph->path->bus);
3475 }
3476}
3477
3478
3479/*
3480 * Schedule a device to run on a given queue.
3481 * If the device was inserted as a new entry on the queue,
3482 * return 1 meaning the device queue should be run. If we
3483 * were already queued, implying someone else has already
3484 * started the queue, return 0 so the caller doesn't attempt
3485 * to run the queue. Must be run at either splsoftcam
3486 * (or splcam since that encompases splsoftcam).
3487 */
3488static int
3489xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3490 u_int32_t new_priority)
3491{
3492 int retval;
3493 u_int32_t old_priority;
3494
3495 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3496
3497 old_priority = pinfo->priority;
3498
3499 /*
3500 * Are we already queued?
3501 */
3502 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3503 /* Simply reorder based on new priority */
3504 if (new_priority < old_priority) {
3505 camq_change_priority(queue, pinfo->index,
3506 new_priority);
3507 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3508 ("changed priority to %d\n",
3509 new_priority));
3510 }
3511 retval = 0;
3512 } else {
3513 /* New entry on the queue */
3514 if (new_priority < old_priority)
3515 pinfo->priority = new_priority;
3516
3517 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3518 ("Inserting onto queue\n"));
3519 pinfo->generation = ++queue->generation;
3520 camq_insert(queue, pinfo);
3521 retval = 1;
3522 }
3523 return (retval);
3524}
3525
3526static void
3527xpt_run_dev_allocq(struct cam_eb *bus)
3528{
3529 struct cam_devq *devq;
3530 int s;
3531
3532 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3533 devq = bus->sim->devq;
3534
3535 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3536 (" qfrozen_cnt == 0x%x, entries == %d, "
3537 "openings == %d, active == %d\n",
3538 devq->alloc_queue.qfrozen_cnt,
3539 devq->alloc_queue.entries,
3540 devq->alloc_openings,
3541 devq->alloc_active));
3542
3543 s = splsoftcam();
3544 devq->alloc_queue.qfrozen_cnt++;
3545 while ((devq->alloc_queue.entries > 0)
3546 && (devq->alloc_openings > 0)
3547 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3548 struct cam_ed_qinfo *qinfo;
3549 struct cam_ed *device;
3550 union ccb *work_ccb;
3551 struct cam_periph *drv;
3552 struct camq *drvq;
3553
3554 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3555 CAMQ_HEAD);
3556 device = qinfo->device;
3557
3558 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3559 ("running device %p\n", device));
3560
3561 drvq = &device->drvq;
3562
3563#ifdef CAMDEBUG
3564 if (drvq->entries <= 0) {
3565 panic("xpt_run_dev_allocq: "
3566 "Device on queue without any work to do");
3567 }
3568#endif
3569 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3570 devq->alloc_openings--;
3571 devq->alloc_active++;
3572 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3573 splx(s);
3574 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3575 drv->pinfo.priority);
3576 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3577 ("calling periph start\n"));
3578 drv->periph_start(drv, work_ccb);
3579 } else {
3580 /*
3581 * Malloc failure in alloc_ccb
3582 */
3583 /*
3584 * XXX add us to a list to be run from free_ccb
3585 * if we don't have any ccbs active on this
3586 * device queue otherwise we may never get run
3587 * again.
3588 */
3589 break;
3590 }
3591
3592 /* Raise IPL for possible insertion and test at top of loop */
3593 s = splsoftcam();
3594
3595 if (drvq->entries > 0) {
3596 /* We have more work. Attempt to reschedule */
3597 xpt_schedule_dev_allocq(bus, device);
3598 }
3599 }
3600 devq->alloc_queue.qfrozen_cnt--;
3601 splx(s);
3602}
3603
3604static void
3605xpt_run_dev_sendq(struct cam_eb *bus)
3606{
3607 struct cam_devq *devq;
3608 int s;
3609
3610 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3611
3612 devq = bus->sim->devq;
3613
3614 s = splcam();
3615 devq->send_queue.qfrozen_cnt++;
3616 splx(s);
3617 s = splsoftcam();
3618 while ((devq->send_queue.entries > 0)
3619 && (devq->send_openings > 0)) {
3620 struct cam_ed_qinfo *qinfo;
3621 struct cam_ed *device;
3622 union ccb *work_ccb;
3623 struct cam_sim *sim;
3624 int ospl;
3625
3626 ospl = splcam();
3627 if (devq->send_queue.qfrozen_cnt > 1) {
3628 splx(ospl);
3629 break;
3630 }
3631
3632 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3633 CAMQ_HEAD);
3634 device = qinfo->device;
3635
3636 /*
3637 * If the device has been "frozen", don't attempt
3638 * to run it.
3639 */
3640 if (device->qfrozen_cnt > 0) {
3641 splx(ospl);
3642 continue;
3643 }
3644
3645 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3646 ("running device %p\n", device));
3647
3648 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3649 if (work_ccb == NULL) {
3650 printf("device on run queue with no ccbs???\n");
3651 splx(ospl);
3652 continue;
3653 }
3654
3655 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3656
3657 if (num_highpower <= 0) {
3658 /*
3659 * We got a high power command, but we
3660 * don't have any available slots. Freeze
3661 * the device queue until we have a slot
3662 * available.
3663 */
3664 device->qfrozen_cnt++;
3665 STAILQ_INSERT_TAIL(&highpowerq,
3666 &work_ccb->ccb_h,
3667 xpt_links.stqe);
3668
3669 splx(ospl);
3670 continue;
3671 } else {
3672 /*
3673 * Consume a high power slot while
3674 * this ccb runs.
3675 */
3676 num_highpower--;
3677 }
3678 }
3679 devq->active_dev = device;
3680 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3681
3682 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3683 splx(ospl);
3684
3685 devq->send_openings--;
3686 devq->send_active++;
3687
3688 if (device->ccbq.queue.entries > 0)
3689 xpt_schedule_dev_sendq(bus, device);
3690
3691 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3692 /*
3693 * The client wants to freeze the queue
3694 * after this CCB is sent.
3695 */
3696 ospl = splcam();
3697 device->qfrozen_cnt++;
3698 splx(ospl);
3699 }
3700
3701 splx(s);
3702
3703 /* In Target mode, the peripheral driver knows best... */
3704 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3705 if ((device->inq_flags & SID_CmdQue) != 0
3706 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3707 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3708 else
3709 /*
3710 * Clear this in case of a retried CCB that
3711 * failed due to a rejected tag.
3712 */
3713 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3714 }
3715
3716 /*
3717 * Device queues can be shared among multiple sim instances
3718 * that reside on different busses. Use the SIM in the queue
3719 * CCB's path, rather than the one in the bus that was passed
3720 * into this function.
3721 */
3722 sim = work_ccb->ccb_h.path->bus->sim;
3723 (*(sim->sim_action))(sim, work_ccb);
3724
3725 ospl = splcam();
3726 devq->active_dev = NULL;
3727 splx(ospl);
3728 /* Raise IPL for possible insertion and test at top of loop */
3729 s = splsoftcam();
3730 }
3731 splx(s);
3732 s = splcam();
3733 devq->send_queue.qfrozen_cnt--;
3734 splx(s);
3735}
3736
3737/*
3738 * This function merges stuff from the slave ccb into the master ccb, while
3739 * keeping important fields in the master ccb constant.
3740 */
3741void
3742xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3743{
3744 /*
3745 * Pull fields that are valid for peripheral drivers to set
3746 * into the master CCB along with the CCB "payload".
3747 */
3748 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3749 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3750 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3751 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3752 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3753 sizeof(union ccb) - sizeof(struct ccb_hdr));
3754}
3755
3756void
3757xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3758{
3759 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3760 ccb_h->pinfo.priority = priority;
3761 ccb_h->path = path;
3762 ccb_h->path_id = path->bus->path_id;
3763 if (path->target)
3764 ccb_h->target_id = path->target->target_id;
3765 else
3766 ccb_h->target_id = CAM_TARGET_WILDCARD;
3767 if (path->device) {
3768 ccb_h->target_lun = path->device->lun_id;
3769 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3770 } else {
3771 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3772 }
3773 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3774 ccb_h->flags = 0;
3775}
3776
3777/* Path manipulation functions */
3778cam_status
3779xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3780 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3781{
3782 struct cam_path *path;
3783 cam_status status;
3784
898d961b 3785 path = malloc(sizeof(*path), M_DEVBUF, M_INTWAIT);
984263bc
MD
3786 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3787 if (status != CAM_REQ_CMP) {
3788 free(path, M_DEVBUF);
3789 path = NULL;
3790 }
3791 *new_path_ptr = path;
3792 return (status);
3793}
3794
3795static cam_status
3796xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3797 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3798{
3799 struct cam_eb *bus;
3800 struct cam_et *target;
3801 struct cam_ed *device;
3802 cam_status status;
3803 int s;
3804
3805 status = CAM_REQ_CMP; /* Completed without error */
3806 target = NULL; /* Wildcarded */
3807 device = NULL; /* Wildcarded */
3808
3809 /*
3810 * We will potentially modify the EDT, so block interrupts
3811 * that may attempt to create cam paths.
3812 */
3813 s = splcam();
3814 bus = xpt_find_bus(path_id);
3815 if (bus == NULL) {
3816 status = CAM_PATH_INVALID;
3817 } else {
3818 target = xpt_find_target(bus, target_id);
3819 if (target == NULL) {
3820 /* Create one */
3821 struct cam_et *new_target;
3822
3823 new_target = xpt_alloc_target(bus, target_id);
3824 if (new_target == NULL) {
3825 status = CAM_RESRC_UNAVAIL;
3826 } else {
3827 target = new_target;
3828 }
3829 }
3830 if (target != NULL) {
3831 device = xpt_find_device(target, lun_id);
3832 if (device == NULL) {
3833 /* Create one */
3834 struct cam_ed *new_device;
3835
3836 new_device = xpt_alloc_device(bus,
3837 target,
3838 lun_id);
3839 if (new_device == NULL) {
3840 status = CAM_RESRC_UNAVAIL;
3841 } else {
3842 device = new_device;
3843 }
3844 }
3845 }
3846 }
3847 splx(s);
3848
3849 /*
3850 * Only touch the user's data if we are successful.
3851 */
3852 if (status == CAM_REQ_CMP) {
3853 new_path->periph = perph;
3854 new_path->bus = bus;
3855 new_path->target = target;
3856 new_path->device = device;
3857 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3858 } else {
3859 if (device != NULL)
3860 xpt_release_device(bus, target, device);
3861 if (target != NULL)
3862 xpt_release_target(bus, target);
3863 if (bus != NULL)
3864 xpt_release_bus(bus);
3865 }
3866 return (status);
3867}
3868
3869static void
3870xpt_release_path(struct cam_path *path)
3871{
3872 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3873 if (path->device != NULL) {
3874 xpt_release_device(path->bus, path->target, path->device);
3875 path->device = NULL;
3876 }
3877 if (path->target != NULL) {
3878 xpt_release_target(path->bus, path->target);
3879 path->target = NULL;
3880 }
3881 if (path->bus != NULL) {
3882 xpt_release_bus(path->bus);
3883 path->bus = NULL;
3884 }
3885}
3886
3887void
3888xpt_free_path(struct cam_path *path)
3889{
3890 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3891 xpt_release_path(path);
3892 free(path, M_DEVBUF);
3893}
3894
3895
3896/*
3897 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3898 * in path1, 2 for match with wildcards in path2.
3899 */
3900int
3901xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3902{
3903 int retval = 0;
3904
3905 if (path1->bus != path2->bus) {
3906 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3907 retval = 1;
3908 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3909 retval = 2;
3910 else
3911 return (-1);
3912 }
3913 if (path1->target != path2->target) {
3914 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3915 if (retval == 0)
3916 retval = 1;
3917 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3918 retval = 2;
3919 else
3920 return (-1);
3921 }
3922 if (path1->device != path2->device) {
3923 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3924 if (retval == 0)
3925 retval = 1;
3926 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3927 retval = 2;
3928 else
3929 return (-1);
3930 }
3931 return (retval);
3932}
3933
3934void
3935xpt_print_path(struct cam_path *path)
3936{
3937 if (path == NULL)
3938 printf("(nopath): ");
3939 else {
3940 if (path->periph != NULL)
3941 printf("(%s%d:", path->periph->periph_name,
3942 path->periph->unit_number);
3943 else
3944 printf("(noperiph:");
3945
3946 if (path->bus != NULL)
3947 printf("%s%d:%d:", path->bus->sim->sim_name,
3948 path->bus->sim->unit_number,
3949 path->bus->sim->bus_id);
3950 else
3951 printf("nobus:");
3952
3953 if (path->target != NULL)
3954 printf("%d:", path->target->target_id);
3955 else
3956 printf("X:");
3957
3958 if (path->device != NULL)
3959 printf("%d): ", path->device->lun_id);
3960 else
3961 printf("X): ");
3962 }
3963}
3964
3965path_id_t
3966xpt_path_path_id(struct cam_path *path)
3967{
3968 return(path->bus->path_id);
3969}
3970
3971target_id_t
3972xpt_path_target_id(struct cam_path *path)
3973{
3974 if (path->target != NULL)
3975 return (path->target->target_id);
3976 else
3977 return (CAM_TARGET_WILDCARD);
3978}
3979
3980lun_id_t
3981xpt_path_lun_id(struct cam_path *path)
3982{
3983 if (path->device != NULL)
3984 return (path->device->lun_id);
3985 else
3986 return (CAM_LUN_WILDCARD);
3987}
3988
3989struct cam_sim *
3990xpt_path_sim(struct cam_path *path)
3991{
3992 return (path->bus->sim);
3993}
3994
3995struct cam_periph*
3996xpt_path_periph(struct cam_path *path)
3997{
3998 return (path->periph);
3999}
4000
4001/*
4002 * Release a CAM control block for the caller. Remit the cost of the structure
4003 * to the device referenced by the path. If the this device had no 'credits'
4004 * and peripheral drivers have registered async callbacks for this notification
4005 * call them now.
4006 */
4007void
4008xpt_release_ccb(union ccb *free_ccb)
4009{
4010 int s;
4011 struct cam_path *path;
4012 struct cam_ed *device;
4013 struct cam_eb *bus;
4014
4015 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4016 path = free_ccb->ccb_h.path;
4017 device = path->device;
4018 bus = path->bus;
4019 s = splsoftcam();
4020 cam_ccbq_release_opening(&device->ccbq);
4021 if (xpt_ccb_count > xpt_max_ccbs) {
4022 xpt_free_ccb(free_ccb);
4023 xpt_ccb_count--;
4024 } else {
4025 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4026 }
4027 bus->sim->devq->alloc_openings++;
4028 bus->sim->devq->alloc_active--;
4029 /* XXX Turn this into an inline function - xpt_run_device?? */
4030 if ((device_is_alloc_queued(device) == 0)
4031 && (device->drvq.entries > 0)) {
4032 xpt_schedule_dev_allocq(bus, device);
4033 }
4034 splx(s);
4035 if (dev_allocq_is_runnable(bus->sim->devq))
4036 xpt_run_dev_allocq(bus);
4037}
4038
4039/* Functions accessed by SIM drivers */
4040
4041/*
4042 * A sim structure, listing the SIM entry points and instance
4043 * identification info is passed to xpt_bus_register to hook the SIM
4044 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4045 * for this new bus and places it in the array of busses and assigns
4046 * it a path_id. The path_id may be influenced by "hard wiring"
4047 * information specified by the user. Once interrupt services are
4048 * availible, the bus will be probed.
4049 */
4050int32_t
4051xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4052{
4053 struct cam_eb *new_bus;
4054 struct cam_eb *old_bus;
4055 struct ccb_pathinq cpi;
4056 int s;
4057
4058 sim->bus_id = bus;
898d961b 4059 new_bus = malloc(sizeof(*new_bus), M_DEVBUF, M_INTWAIT);
984263bc
MD
4060
4061 if (strcmp(sim->sim_name, "xpt") != 0) {
984263bc
MD
4062 sim->path_id =
4063 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4064 }
4065
4066 TAILQ_INIT(&new_bus->et_entries);
4067 new_bus->path_id = sim->path_id;
4068 new_bus->sim = sim;
3aed1355 4069 ++sim->refcount;
984263bc
MD
4070 timevalclear(&new_bus->last_reset);
4071 new_bus->flags = 0;
4072 new_bus->refcount = 1; /* Held until a bus_deregister event */
4073 new_bus->generation = 0;
4074 s = splcam();
4075 old_bus = TAILQ_FIRST(&xpt_busses);
4076 while (old_bus != NULL
4077 && old_bus->path_id < new_bus->path_id)
4078 old_bus = TAILQ_NEXT(old_bus, links);
4079 if (old_bus != NULL)
4080 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4081 els