Device layer rollup commit.
[dragonfly.git] / sys / bus / cam / cam_xpt.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
e4c9c0c8 30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.15 2004/05/19 22:52:37 dillon Exp $
984263bc
MD
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/devicestat.h>
42#include <sys/interrupt.h>
43#include <sys/bus.h>
3aed1355
MD
44#include <sys/thread.h>
45#include <sys/thread2.h>
984263bc
MD
46
47#ifdef PC98
48#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
49#endif
50
51#include <machine/clock.h>
52#include <machine/ipl.h>
53
1f2de5d4
MD
54#include "cam.h"
55#include "cam_ccb.h"
56#include "cam_periph.h"
57#include "cam_sim.h"
58#include "cam_xpt.h"
59#include "cam_xpt_sim.h"
60#include "cam_xpt_periph.h"
61#include "cam_debug.h"
984263bc 62
1f2de5d4
MD
63#include "scsi/scsi_all.h"
64#include "scsi/scsi_message.h"
65#include "scsi/scsi_pass.h"
984263bc
MD
66#include "opt_cam.h"
67
68/* Datastructures internal to the xpt layer */
69
70/*
71 * Definition of an async handler callback block. These are used to add
72 * SIMs and peripherals to the async callback lists.
73 */
74struct async_node {
75 SLIST_ENTRY(async_node) links;
76 u_int32_t event_enable; /* Async Event enables */
77 void (*callback)(void *arg, u_int32_t code,
78 struct cam_path *path, void *args);
79 void *callback_arg;
80};
81
82SLIST_HEAD(async_list, async_node);
83SLIST_HEAD(periph_list, cam_periph);
84static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
85
86/*
87 * This is the maximum number of high powered commands (e.g. start unit)
88 * that can be outstanding at a particular time.
89 */
90#ifndef CAM_MAX_HIGHPOWER
91#define CAM_MAX_HIGHPOWER 4
92#endif
93
94/* number of high powered commands that can go through right now */
95static int num_highpower = CAM_MAX_HIGHPOWER;
96
97/*
98 * Structure for queueing a device in a run queue.
99 * There is one run queue for allocating new ccbs,
100 * and another for sending ccbs to the controller.
101 */
102struct cam_ed_qinfo {
103 cam_pinfo pinfo;
104 struct cam_ed *device;
105};
106
107/*
108 * The CAM EDT (Existing Device Table) contains the device information for
109 * all devices for all busses in the system. The table contains a
110 * cam_ed structure for each device on the bus.
111 */
112struct cam_ed {
113 TAILQ_ENTRY(cam_ed) links;
114 struct cam_ed_qinfo alloc_ccb_entry;
115 struct cam_ed_qinfo send_ccb_entry;
116 struct cam_et *target;
117 lun_id_t lun_id;
118 struct camq drvq; /*
119 * Queue of type drivers wanting to do
120 * work on this device.
121 */
122 struct cam_ccbq ccbq; /* Queue of pending ccbs */
123 struct async_list asyncs; /* Async callback info for this B/T/L */
124 struct periph_list periphs; /* All attached devices */
125 u_int generation; /* Generation number */
126 struct cam_periph *owner; /* Peripheral driver's ownership tag */
127 struct xpt_quirk_entry *quirk; /* Oddities about this device */
128 /* Storage for the inquiry data */
129 struct scsi_inquiry_data inq_data;
130 u_int8_t inq_flags; /*
131 * Current settings for inquiry flags.
132 * This allows us to override settings
133 * like disconnection and tagged
134 * queuing for a device.
135 */
136 u_int8_t queue_flags; /* Queue flags from the control page */
137 u_int8_t serial_num_len;
138 u_int8_t *serial_num;
139 u_int32_t qfrozen_cnt;
140 u_int32_t flags;
141#define CAM_DEV_UNCONFIGURED 0x01
142#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
143#define CAM_DEV_REL_ON_COMPLETE 0x04
144#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
145#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
146#define CAM_DEV_TAG_AFTER_COUNT 0x20
147#define CAM_DEV_INQUIRY_DATA_VALID 0x40
148 u_int32_t tag_delay_count;
149#define CAM_TAG_DELAY_COUNT 5
150 u_int32_t refcount;
151 struct callout_handle c_handle;
152};
153
154/*
155 * Each target is represented by an ET (Existing Target). These
156 * entries are created when a target is successfully probed with an
157 * identify, and removed when a device fails to respond after a number
158 * of retries, or a bus rescan finds the device missing.
159 */
160struct cam_et {
161 TAILQ_HEAD(, cam_ed) ed_entries;
162 TAILQ_ENTRY(cam_et) links;
163 struct cam_eb *bus;
164 target_id_t target_id;
165 u_int32_t refcount;
166 u_int generation;
88c4d2f6 167 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
168};
169
170/*
171 * Each bus is represented by an EB (Existing Bus). These entries
172 * are created by calls to xpt_bus_register and deleted by calls to
173 * xpt_bus_deregister.
174 */
175struct cam_eb {
176 TAILQ_HEAD(, cam_et) et_entries;
177 TAILQ_ENTRY(cam_eb) links;
178 path_id_t path_id;
179 struct cam_sim *sim;
88c4d2f6 180 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
181 u_int32_t flags;
182#define CAM_EB_RUNQ_SCHEDULED 0x01
183 u_int32_t refcount;
184 u_int generation;
185};
186
187struct cam_path {
188 struct cam_periph *periph;
189 struct cam_eb *bus;
190 struct cam_et *target;
191 struct cam_ed *device;
192};
193
194struct xpt_quirk_entry {
195 struct scsi_inquiry_pattern inq_pat;
196 u_int8_t quirks;
197#define CAM_QUIRK_NOLUNS 0x01
198#define CAM_QUIRK_NOSERIAL 0x02
199#define CAM_QUIRK_HILUNS 0x04
200 u_int mintags;
201 u_int maxtags;
202};
203#define CAM_SCSI2_MAXLUN 8
204
205typedef enum {
206 XPT_FLAG_OPEN = 0x01
207} xpt_flags;
208
209struct xpt_softc {
210 xpt_flags flags;
211 u_int32_t generation;
212};
213
214static const char quantum[] = "QUANTUM";
215static const char sony[] = "SONY";
216static const char west_digital[] = "WDIGTL";
217static const char samsung[] = "SAMSUNG";
218static const char seagate[] = "SEAGATE";
219static const char microp[] = "MICROP";
220
221static struct xpt_quirk_entry xpt_quirk_table[] =
222{
223 {
224 /* Reports QUEUE FULL for temporary resource shortages */
225 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
226 /*quirks*/0, /*mintags*/24, /*maxtags*/32
227 },
228 {
229 /* Reports QUEUE FULL for temporary resource shortages */
230 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
231 /*quirks*/0, /*mintags*/24, /*maxtags*/32
232 },
233 {
234 /* Reports QUEUE FULL for temporary resource shortages */
235 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
236 /*quirks*/0, /*mintags*/24, /*maxtags*/32
237 },
238 {
239 /* Broken tagged queuing drive */
240 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
241 /*quirks*/0, /*mintags*/0, /*maxtags*/0
242 },
243 {
244 /* Broken tagged queuing drive */
245 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
246 /*quirks*/0, /*mintags*/0, /*maxtags*/0
247 },
248 {
249 /* Broken tagged queuing drive */
250 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
251 /*quirks*/0, /*mintags*/0, /*maxtags*/0
252 },
253 {
254 /*
255 * Unfortunately, the Quantum Atlas III has the same
256 * problem as the Atlas II drives above.
257 * Reported by: "Johan Granlund" <johan@granlund.nu>
258 *
259 * For future reference, the drive with the problem was:
260 * QUANTUM QM39100TD-SW N1B0
261 *
262 * It's possible that Quantum will fix the problem in later
263 * firmware revisions. If that happens, the quirk entry
264 * will need to be made specific to the firmware revisions
265 * with the problem.
266 *
267 */
268 /* Reports QUEUE FULL for temporary resource shortages */
269 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
270 /*quirks*/0, /*mintags*/24, /*maxtags*/32
271 },
272 {
273 /*
274 * 18 Gig Atlas III, same problem as the 9G version.
275 * Reported by: Andre Albsmeier
276 * <andre.albsmeier@mchp.siemens.de>
277 *
278 * For future reference, the drive with the problem was:
279 * QUANTUM QM318000TD-S N491
280 */
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /*
287 * Broken tagged queuing drive
288 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
289 * and: Martin Renters <martin@tdc.on.ca>
290 */
291 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
292 /*quirks*/0, /*mintags*/0, /*maxtags*/0
293 },
294 /*
295 * The Seagate Medalist Pro drives have very poor write
296 * performance with anything more than 2 tags.
297 *
298 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
299 * Drive: <SEAGATE ST36530N 1444>
300 *
301 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
302 * Drive: <SEAGATE ST34520W 1281>
303 *
304 * No one has actually reported that the 9G version
305 * (ST39140*) of the Medalist Pro has the same problem, but
306 * we're assuming that it does because the 4G and 6.5G
307 * versions of the drive are broken.
308 */
309 {
310 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
311 /*quirks*/0, /*mintags*/2, /*maxtags*/2
312 },
313 {
314 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
315 /*quirks*/0, /*mintags*/2, /*maxtags*/2
316 },
317 {
318 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
319 /*quirks*/0, /*mintags*/2, /*maxtags*/2
320 },
321 {
322 /*
323 * Slow when tagged queueing is enabled. Write performance
324 * steadily drops off with more and more concurrent
325 * transactions. Best sequential write performance with
326 * tagged queueing turned off and write caching turned on.
327 *
328 * PR: kern/10398
329 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
330 * Drive: DCAS-34330 w/ "S65A" firmware.
331 *
332 * The drive with the problem had the "S65A" firmware
333 * revision, and has also been reported (by Stephen J.
334 * Roznowski <sjr@home.net>) for a drive with the "S61A"
335 * firmware revision.
336 *
337 * Although no one has reported problems with the 2 gig
338 * version of the DCAS drive, the assumption is that it
339 * has the same problems as the 4 gig version. Therefore
340 * this quirk entries disables tagged queueing for all
341 * DCAS drives.
342 */
343 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
344 /*quirks*/0, /*mintags*/0, /*maxtags*/0
345 },
346 {
347 /* Broken tagged queuing drive */
348 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 {
352 /* Broken tagged queuing drive */
353 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
354 /*quirks*/0, /*mintags*/0, /*maxtags*/0
355 },
356 {
357 /*
358 * Broken tagged queuing drive.
359 * Submitted by:
360 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
361 * in PR kern/9535
362 */
363 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
364 /*quirks*/0, /*mintags*/0, /*maxtags*/0
365 },
366 {
367 /*
368 * Slow when tagged queueing is enabled. (1.5MB/sec versus
369 * 8MB/sec.)
370 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
371 * Best performance with these drives is achieved with
372 * tagged queueing turned off, and write caching turned on.
373 */
374 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
375 /*quirks*/0, /*mintags*/0, /*maxtags*/0
376 },
377 {
378 /*
379 * Slow when tagged queueing is enabled. (1.5MB/sec versus
380 * 8MB/sec.)
381 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
382 * Best performance with these drives is achieved with
383 * tagged queueing turned off, and write caching turned on.
384 */
385 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
386 /*quirks*/0, /*mintags*/0, /*maxtags*/0
387 },
388 {
389 /*
390 * Doesn't handle queue full condition correctly,
391 * so we need to limit maxtags to what the device
392 * can handle instead of determining this automatically.
393 */
394 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
395 /*quirks*/0, /*mintags*/2, /*maxtags*/32
396 },
397 {
398 /* Really only one LUN */
399 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
400 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
401 },
402 {
403 /* I can't believe we need a quirk for DPT volumes. */
404 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
405 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
406 /*mintags*/0, /*maxtags*/255
407 },
408 {
409 /*
410 * Many Sony CDROM drives don't like multi-LUN probing.
411 */
412 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
413 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
414 },
415 {
416 /*
417 * This drive doesn't like multiple LUN probing.
418 * Submitted by: Parag Patel <parag@cgt.com>
419 */
420 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
421 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
422 },
423 {
424 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
425 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
426 },
427 {
428 /*
429 * The 8200 doesn't like multi-lun probing, and probably
430 * don't like serial number requests either.
431 */
432 {
433 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
434 "EXB-8200*", "*"
435 },
436 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
437 },
438 {
439 /*
440 * Let's try the same as above, but for a drive that says
441 * it's an IPL-6860 but is actually an EXB 8200.
442 */
443 {
444 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
445 "IPL-6860*", "*"
446 },
447 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
448 },
449 {
450 /*
451 * These Hitachi drives don't like multi-lun probing.
452 * The PR submitter has a DK319H, but says that the Linux
453 * kernel has a similar work-around for the DK312 and DK314,
454 * so all DK31* drives are quirked here.
455 * PR: misc/18793
456 * Submitted by: Paul Haddad <paul@pth.com>
457 */
458 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
459 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
460 },
461 {
462 /*
463 * This old revision of the TDC3600 is also SCSI-1, and
464 * hangs upon serial number probing.
465 */
466 {
467 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
468 " TDC 3600", "U07:"
469 },
470 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
471 },
472 {
473 /*
474 * Would repond to all LUNs if asked for.
475 */
476 {
477 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
478 "CP150", "*"
479 },
480 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
481 },
482 {
483 /*
484 * Would repond to all LUNs if asked for.
485 */
486 {
487 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
488 "96X2*", "*"
489 },
490 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
491 },
492 {
493 /* Submitted by: Matthew Dodd <winter@jurai.net> */
494 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
495 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
496 },
497 {
498 /* Submitted by: Matthew Dodd <winter@jurai.net> */
499 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
500 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
501 },
502 {
503 /* TeraSolutions special settings for TRC-22 RAID */
504 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
505 /*quirks*/0, /*mintags*/55, /*maxtags*/255
506 },
507 {
508 /* Veritas Storage Appliance */
509 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
510 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
511 },
512 {
513 /*
514 * Would respond to all LUNs. Device type and removable
515 * flag are jumper-selectable.
516 */
517 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
518 "Tahiti 1", "*"
519 },
520 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
521 },
522 {
523 /* Default tagged queuing parameters for all devices */
524 {
525 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
526 /*vendor*/"*", /*product*/"*", /*revision*/"*"
527 },
528 /*quirks*/0, /*mintags*/2, /*maxtags*/255
529 },
530};
531
532static const int xpt_quirk_table_size =
533 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
534
535typedef enum {
536 DM_RET_COPY = 0x01,
537 DM_RET_FLAG_MASK = 0x0f,
538 DM_RET_NONE = 0x00,
539 DM_RET_STOP = 0x10,
540 DM_RET_DESCEND = 0x20,
541 DM_RET_ERROR = 0x30,
542 DM_RET_ACTION_MASK = 0xf0
543} dev_match_ret;
544
545typedef enum {
546 XPT_DEPTH_BUS,
547 XPT_DEPTH_TARGET,
548 XPT_DEPTH_DEVICE,
549 XPT_DEPTH_PERIPH
550} xpt_traverse_depth;
551
552struct xpt_traverse_config {
553 xpt_traverse_depth depth;
554 void *tr_func;
555 void *tr_arg;
556};
557
558typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
559typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
560typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
561typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
562typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
563
564/* Transport layer configuration information */
565static struct xpt_softc xsoftc;
566
567/* Queues for our software interrupt handler */
568typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
569static cam_isrq_t cam_bioq;
570static cam_isrq_t cam_netq;
571
572/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
573static SLIST_HEAD(,ccb_hdr) ccb_freeq;
574static u_int xpt_max_ccbs; /*
575 * Maximum size of ccb pool. Modified as
576 * devices are added/removed or have their
577 * opening counts changed.
578 */
579static u_int xpt_ccb_count; /* Current count of allocated ccbs */
580
581struct cam_periph *xpt_periph;
582
583static periph_init_t xpt_periph_init;
584
585static periph_init_t probe_periph_init;
586
587static struct periph_driver xpt_driver =
588{
589 xpt_periph_init, "xpt",
590 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
591};
592
593static struct periph_driver probe_driver =
594{
595 probe_periph_init, "probe",
596 TAILQ_HEAD_INITIALIZER(probe_driver.units)
597};
598
599DATA_SET(periphdriver_set, xpt_driver);
600DATA_SET(periphdriver_set, probe_driver);
601
602#define XPT_CDEV_MAJOR 104
603
604static d_open_t xptopen;
605static d_close_t xptclose;
606static d_ioctl_t xptioctl;
607
608static struct cdevsw xpt_cdevsw = {
fabb8ceb
MD
609 /* name */ "xpt",
610 /* maj */ XPT_CDEV_MAJOR,
611 /* flags */ 0,
612 /* port */ NULL,
455fcd7e 613 /* clone */ NULL,
fabb8ceb 614
984263bc
MD
615 /* open */ xptopen,
616 /* close */ xptclose,
617 /* read */ noread,
618 /* write */ nowrite,
619 /* ioctl */ xptioctl,
620 /* poll */ nopoll,
621 /* mmap */ nommap,
622 /* strategy */ nostrategy,
984263bc 623 /* dump */ nodump,
fabb8ceb 624 /* psize */ nopsize
984263bc
MD
625};
626
627static struct intr_config_hook *xpt_config_hook;
628
629/* Registered busses */
630static TAILQ_HEAD(,cam_eb) xpt_busses;
631static u_int bus_generation;
632
633/* Storage for debugging datastructures */
634#ifdef CAMDEBUG
635struct cam_path *cam_dpath;
636u_int32_t cam_dflags;
637u_int32_t cam_debug_delay;
638#endif
639
640#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
641#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
642#endif
643
644/*
645 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
646 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
647 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
648 */
649#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
650 || defined(CAM_DEBUG_LUN)
651#ifdef CAMDEBUG
652#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
653 || !defined(CAM_DEBUG_LUN)
654#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
655 and CAM_DEBUG_LUN"
656#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
657#else /* !CAMDEBUG */
658#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
659#endif /* CAMDEBUG */
660#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
661
662/* Our boot-time initialization hook */
663static void xpt_init(void *);
664SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
665
666static cam_status xpt_compile_path(struct cam_path *new_path,
667 struct cam_periph *perph,
668 path_id_t path_id,
669 target_id_t target_id,
670 lun_id_t lun_id);
671
672static void xpt_release_path(struct cam_path *path);
673
674static void xpt_async_bcast(struct async_list *async_head,
675 u_int32_t async_code,
676 struct cam_path *path,
677 void *async_arg);
678static void xpt_dev_async(u_int32_t async_code,
679 struct cam_eb *bus,
680 struct cam_et *target,
681 struct cam_ed *device,
682 void *async_arg);
683static path_id_t xptnextfreepathid(void);
684static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
685static union ccb *xpt_get_ccb(struct cam_ed *device);
686static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
687 u_int32_t new_priority);
688static void xpt_run_dev_allocq(struct cam_eb *bus);
689static void xpt_run_dev_sendq(struct cam_eb *bus);
690static timeout_t xpt_release_devq_timeout;
691static timeout_t xpt_release_simq_timeout;
692static void xpt_release_bus(struct cam_eb *bus);
693static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
694 int run_queue);
695static struct cam_et*
696 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
697static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
698static struct cam_ed*
699 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
700 lun_id_t lun_id);
701static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
702 struct cam_ed *device);
703static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
704static struct cam_eb*
705 xpt_find_bus(path_id_t path_id);
706static struct cam_et*
707 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
708static struct cam_ed*
709 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
710static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
711static void xpt_scan_lun(struct cam_periph *periph,
712 struct cam_path *path, cam_flags flags,
713 union ccb *ccb);
714static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
715static xpt_busfunc_t xptconfigbuscountfunc;
716static xpt_busfunc_t xptconfigfunc;
717static void xpt_config(void *arg);
718static xpt_devicefunc_t xptpassannouncefunc;
719static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
720static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
721static void xptpoll(struct cam_sim *sim);
ef0fdad1
MD
722static inthand2_t swi_camnet;
723static inthand2_t swi_cambio;
984263bc
MD
724static void camisr(cam_isrq_t *queue);
725#if 0
726static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
727static void xptasync(struct cam_periph *periph,
728 u_int32_t code, cam_path *path);
729#endif
730static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
731 int num_patterns, struct cam_eb *bus);
732static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
733 int num_patterns, struct cam_ed *device);
734static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
735 int num_patterns,
736 struct cam_periph *periph);
737static xpt_busfunc_t xptedtbusfunc;
738static xpt_targetfunc_t xptedttargetfunc;
739static xpt_devicefunc_t xptedtdevicefunc;
740static xpt_periphfunc_t xptedtperiphfunc;
741static xpt_pdrvfunc_t xptplistpdrvfunc;
742static xpt_periphfunc_t xptplistperiphfunc;
743static int xptedtmatch(struct ccb_dev_match *cdm);
744static int xptperiphlistmatch(struct ccb_dev_match *cdm);
745static int xptbustraverse(struct cam_eb *start_bus,
746 xpt_busfunc_t *tr_func, void *arg);
747static int xpttargettraverse(struct cam_eb *bus,
748 struct cam_et *start_target,
749 xpt_targetfunc_t *tr_func, void *arg);
750static int xptdevicetraverse(struct cam_et *target,
751 struct cam_ed *start_device,
752 xpt_devicefunc_t *tr_func, void *arg);
753static int xptperiphtraverse(struct cam_ed *device,
754 struct cam_periph *start_periph,
755 xpt_periphfunc_t *tr_func, void *arg);
756static int xptpdrvtraverse(struct periph_driver **start_pdrv,
757 xpt_pdrvfunc_t *tr_func, void *arg);
758static int xptpdperiphtraverse(struct periph_driver **pdrv,
759 struct cam_periph *start_periph,
760 xpt_periphfunc_t *tr_func,
761 void *arg);
762static xpt_busfunc_t xptdefbusfunc;
763static xpt_targetfunc_t xptdeftargetfunc;
764static xpt_devicefunc_t xptdefdevicefunc;
765static xpt_periphfunc_t xptdefperiphfunc;
766static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
767#ifdef notusedyet
768static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
769 void *arg);
770#endif
771static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
772 void *arg);
773#ifdef notusedyet
774static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
775 void *arg);
776#endif
777static xpt_devicefunc_t xptsetasyncfunc;
778static xpt_busfunc_t xptsetasyncbusfunc;
779static cam_status xptregister(struct cam_periph *periph,
780 void *arg);
781static cam_status proberegister(struct cam_periph *periph,
782 void *arg);
783static void probeschedule(struct cam_periph *probe_periph);
784static void probestart(struct cam_periph *periph, union ccb *start_ccb);
785static void proberequestdefaultnegotiation(struct cam_periph *periph);
786static void probedone(struct cam_periph *periph, union ccb *done_ccb);
787static void probecleanup(struct cam_periph *periph);
788static void xpt_find_quirk(struct cam_ed *device);
789static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
790 struct cam_ed *device,
791 int async_update);
792static void xpt_toggle_tags(struct cam_path *path);
793static void xpt_start_tags(struct cam_path *path);
794static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
795 struct cam_ed *dev);
796static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
797 struct cam_ed *dev);
798static __inline int periph_is_queued(struct cam_periph *periph);
799static __inline int device_is_alloc_queued(struct cam_ed *device);
800static __inline int device_is_send_queued(struct cam_ed *device);
801static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
802
803static __inline int
804xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
805{
806 int retval;
807
808 if (dev->ccbq.devq_openings > 0) {
809 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
810 cam_ccbq_resize(&dev->ccbq,
811 dev->ccbq.dev_openings
812 + dev->ccbq.dev_active);
813 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
814 }
815 /*
816 * The priority of a device waiting for CCB resources
817 * is that of the the highest priority peripheral driver
818 * enqueued.
819 */
820 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
821 &dev->alloc_ccb_entry.pinfo,
822 CAMQ_GET_HEAD(&dev->drvq)->priority);
823 } else {
824 retval = 0;
825 }
826
827 return (retval);
828}
829
830static __inline int
831xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
832{
833 int retval;
834
835 if (dev->ccbq.dev_openings > 0) {
836 /*
837 * The priority of a device waiting for controller
838 * resources is that of the the highest priority CCB
839 * enqueued.
840 */
841 retval =
842 xpt_schedule_dev(&bus->sim->devq->send_queue,
843 &dev->send_ccb_entry.pinfo,
844 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
845 } else {
846 retval = 0;
847 }
848 return (retval);
849}
850
851static __inline int
852periph_is_queued(struct cam_periph *periph)
853{
854 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
855}
856
857static __inline int
858device_is_alloc_queued(struct cam_ed *device)
859{
860 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
861}
862
863static __inline int
864device_is_send_queued(struct cam_ed *device)
865{
866 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
867}
868
869static __inline int
870dev_allocq_is_runnable(struct cam_devq *devq)
871{
872 /*
873 * Have work to do.
874 * Have space to do more work.
875 * Allowed to do work.
876 */
877 return ((devq->alloc_queue.qfrozen_cnt == 0)
878 && (devq->alloc_queue.entries > 0)
879 && (devq->alloc_openings > 0));
880}
881
882static void
883xpt_periph_init()
884{
e4c9c0c8 885 cdevsw_add(&xpt_cdevsw, 0, 0);
984263bc
MD
886 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
887}
888
889static void
890probe_periph_init()
891{
892}
893
894
895static void
896xptdone(struct cam_periph *periph, union ccb *done_ccb)
897{
898 /* Caller will release the CCB */
899 wakeup(&done_ccb->ccb_h.cbfcnp);
900}
901
902static int
41c20dac 903xptopen(dev_t dev, int flags, int fmt, struct thread *td)
984263bc
MD
904{
905 int unit;
906
907 unit = minor(dev) & 0xff;
908
909 /*
910 * Only allow read-write access.
911 */
912 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
913 return(EPERM);
914
915 /*
916 * We don't allow nonblocking access.
917 */
918 if ((flags & O_NONBLOCK) != 0) {
919 printf("xpt%d: can't do nonblocking access\n", unit);
920 return(ENODEV);
921 }
922
923 /*
924 * We only have one transport layer right now. If someone accesses
925 * us via something other than minor number 1, point out their
926 * mistake.
927 */
928 if (unit != 0) {
929 printf("xptopen: got invalid xpt unit %d\n", unit);
930 return(ENXIO);
931 }
932
933 /* Mark ourselves open */
934 xsoftc.flags |= XPT_FLAG_OPEN;
935
936 return(0);
937}
938
939static int
41c20dac 940xptclose(dev_t dev, int flag, int fmt, struct thread *td)
984263bc
MD
941{
942 int unit;
943
944 unit = minor(dev) & 0xff;
945
946 /*
947 * We only have one transport layer right now. If someone accesses
948 * us via something other than minor number 1, point out their
949 * mistake.
950 */
951 if (unit != 0) {
952 printf("xptclose: got invalid xpt unit %d\n", unit);
953 return(ENXIO);
954 }
955
956 /* Mark ourselves closed */
957 xsoftc.flags &= ~XPT_FLAG_OPEN;
958
959 return(0);
960}
961
962static int
41c20dac 963xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
984263bc
MD
964{
965 int unit, error;
966
967 error = 0;
968 unit = minor(dev) & 0xff;
969
970 /*
971 * We only have one transport layer right now. If someone accesses
972 * us via something other than minor number 1, point out their
973 * mistake.
974 */
975 if (unit != 0) {
976 printf("xptioctl: got invalid xpt unit %d\n", unit);
977 return(ENXIO);
978 }
979
980 switch(cmd) {
981 /*
982 * For the transport layer CAMIOCOMMAND ioctl, we really only want
983 * to accept CCB types that don't quite make sense to send through a
984 * passthrough driver.
985 */
986 case CAMIOCOMMAND: {
987 union ccb *ccb;
988 union ccb *inccb;
989
990 inccb = (union ccb *)addr;
991
992 switch(inccb->ccb_h.func_code) {
993 case XPT_SCAN_BUS:
994 case XPT_RESET_BUS:
995 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
996 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
997 error = EINVAL;
998 break;
999 }
1000 /* FALLTHROUGH */
1001 case XPT_PATH_INQ:
1002 case XPT_ENG_INQ:
1003 case XPT_SCAN_LUN:
1004
1005 ccb = xpt_alloc_ccb();
1006
1007 /*
1008 * Create a path using the bus, target, and lun the
1009 * user passed in.
1010 */
1011 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1012 inccb->ccb_h.path_id,
1013 inccb->ccb_h.target_id,
1014 inccb->ccb_h.target_lun) !=
1015 CAM_REQ_CMP){
1016 error = EINVAL;
1017 xpt_free_ccb(ccb);
1018 break;
1019 }
1020 /* Ensure all of our fields are correct */
1021 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1022 inccb->ccb_h.pinfo.priority);
1023 xpt_merge_ccb(ccb, inccb);
1024 ccb->ccb_h.cbfcnp = xptdone;
1025 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1026 bcopy(ccb, inccb, sizeof(union ccb));
1027 xpt_free_path(ccb->ccb_h.path);
1028 xpt_free_ccb(ccb);
1029 break;
1030
1031 case XPT_DEBUG: {
1032 union ccb ccb;
1033
1034 /*
1035 * This is an immediate CCB, so it's okay to
1036 * allocate it on the stack.
1037 */
1038
1039 /*
1040 * Create a path using the bus, target, and lun the
1041 * user passed in.
1042 */
1043 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1044 inccb->ccb_h.path_id,
1045 inccb->ccb_h.target_id,
1046 inccb->ccb_h.target_lun) !=
1047 CAM_REQ_CMP){
1048 error = EINVAL;
1049 break;
1050 }
1051 /* Ensure all of our fields are correct */
1052 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1053 inccb->ccb_h.pinfo.priority);
1054 xpt_merge_ccb(&ccb, inccb);
1055 ccb.ccb_h.cbfcnp = xptdone;
1056 xpt_action(&ccb);
1057 bcopy(&ccb, inccb, sizeof(union ccb));
1058 xpt_free_path(ccb.ccb_h.path);
1059 break;
1060
1061 }
1062 case XPT_DEV_MATCH: {
1063 struct cam_periph_map_info mapinfo;
1064 struct cam_path *old_path;
1065
1066 /*
1067 * We can't deal with physical addresses for this
1068 * type of transaction.
1069 */
1070 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1071 error = EINVAL;
1072 break;
1073 }
1074
1075 /*
1076 * Save this in case the caller had it set to
1077 * something in particular.
1078 */
1079 old_path = inccb->ccb_h.path;
1080
1081 /*
1082 * We really don't need a path for the matching
1083 * code. The path is needed because of the
1084 * debugging statements in xpt_action(). They
1085 * assume that the CCB has a valid path.
1086 */
1087 inccb->ccb_h.path = xpt_periph->path;
1088
1089 bzero(&mapinfo, sizeof(mapinfo));
1090
1091 /*
1092 * Map the pattern and match buffers into kernel
1093 * virtual address space.
1094 */
1095 error = cam_periph_mapmem(inccb, &mapinfo);
1096
1097 if (error) {
1098 inccb->ccb_h.path = old_path;
1099 break;
1100 }
1101
1102 /*
1103 * This is an immediate CCB, we can send it on directly.
1104 */
1105 xpt_action(inccb);
1106
1107 /*
1108 * Map the buffers back into user space.
1109 */
1110 cam_periph_unmapmem(inccb, &mapinfo);
1111
1112 inccb->ccb_h.path = old_path;
1113
1114 error = 0;
1115 break;
1116 }
1117 default:
1118 error = ENOTSUP;
1119 break;
1120 }
1121 break;
1122 }
1123 /*
1124 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1125 * with the periphal driver name and unit name filled in. The other
1126 * fields don't really matter as input. The passthrough driver name
1127 * ("pass"), and unit number are passed back in the ccb. The current
1128 * device generation number, and the index into the device peripheral
1129 * driver list, and the status are also passed back. Note that
1130 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1131 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1132 * (or rather should be) impossible for the device peripheral driver
1133 * list to change since we look at the whole thing in one pass, and
1134 * we do it with splcam protection.
1135 *
1136 */
1137 case CAMGETPASSTHRU: {
1138 union ccb *ccb;
1139 struct cam_periph *periph;
1140 struct periph_driver **p_drv;
1141 char *name;
1142 int unit;
1143 int cur_generation;
1144 int base_periph_found;
1145 int splbreaknum;
1146 int s;
1147
1148 ccb = (union ccb *)addr;
1149 unit = ccb->cgdl.unit_number;
1150 name = ccb->cgdl.periph_name;
1151 /*
1152 * Every 100 devices, we want to drop our spl protection to
1153 * give the software interrupt handler a chance to run.
1154 * Most systems won't run into this check, but this should
1155 * avoid starvation in the software interrupt handler in
1156 * large systems.
1157 */
1158 splbreaknum = 100;
1159
1160 ccb = (union ccb *)addr;
1161
1162 base_periph_found = 0;
1163
1164 /*
1165 * Sanity check -- make sure we don't get a null peripheral
1166 * driver name.
1167 */
1168 if (*ccb->cgdl.periph_name == '\0') {
1169 error = EINVAL;
1170 break;
1171 }
1172
1173 /* Keep the list from changing while we traverse it */
1174 s = splcam();
1175ptstartover:
1176 cur_generation = xsoftc.generation;
1177
1178 /* first find our driver in the list of drivers */
dc62b251 1179 SET_FOREACH(p_drv, periphdriver_set) {
984263bc
MD
1180 if (strcmp((*p_drv)->driver_name, name) == 0)
1181 break;
dc62b251 1182 }
984263bc
MD
1183
1184 if (*p_drv == NULL) {
1185 splx(s);
1186 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1187 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1188 *ccb->cgdl.periph_name = '\0';
1189 ccb->cgdl.unit_number = 0;
1190 error = ENOENT;
1191 break;
1192 }
1193
1194 /*
1195 * Run through every peripheral instance of this driver
1196 * and check to see whether it matches the unit passed
1197 * in by the user. If it does, get out of the loops and
1198 * find the passthrough driver associated with that
1199 * peripheral driver.
1200 */
1201 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1202 periph = TAILQ_NEXT(periph, unit_links)) {
1203
1204 if (periph->unit_number == unit) {
1205 break;
1206 } else if (--splbreaknum == 0) {
1207 splx(s);
1208 s = splcam();
1209 splbreaknum = 100;
1210 if (cur_generation != xsoftc.generation)
1211 goto ptstartover;
1212 }
1213 }
1214 /*
1215 * If we found the peripheral driver that the user passed
1216 * in, go through all of the peripheral drivers for that
1217 * particular device and look for a passthrough driver.
1218 */
1219 if (periph != NULL) {
1220 struct cam_ed *device;
1221 int i;
1222
1223 base_periph_found = 1;
1224 device = periph->path->device;
1225 for (i = 0, periph = device->periphs.slh_first;
1226 periph != NULL;
1227 periph = periph->periph_links.sle_next, i++) {
1228 /*
1229 * Check to see whether we have a
1230 * passthrough device or not.
1231 */
1232 if (strcmp(periph->periph_name, "pass") == 0) {
1233 /*
1234 * Fill in the getdevlist fields.
1235 */
1236 strcpy(ccb->cgdl.periph_name,
1237 periph->periph_name);
1238 ccb->cgdl.unit_number =
1239 periph->unit_number;
1240 if (periph->periph_links.sle_next)
1241 ccb->cgdl.status =
1242 CAM_GDEVLIST_MORE_DEVS;
1243 else
1244 ccb->cgdl.status =
1245 CAM_GDEVLIST_LAST_DEVICE;
1246 ccb->cgdl.generation =
1247 device->generation;
1248 ccb->cgdl.index = i;
1249 /*
1250 * Fill in some CCB header fields
1251 * that the user may want.
1252 */
1253 ccb->ccb_h.path_id =
1254 periph->path->bus->path_id;
1255 ccb->ccb_h.target_id =
1256 periph->path->target->target_id;
1257 ccb->ccb_h.target_lun =
1258 periph->path->device->lun_id;
1259 ccb->ccb_h.status = CAM_REQ_CMP;
1260 break;
1261 }
1262 }
1263 }
1264
1265 /*
1266 * If the periph is null here, one of two things has
1267 * happened. The first possibility is that we couldn't
1268 * find the unit number of the particular peripheral driver
1269 * that the user is asking about. e.g. the user asks for
1270 * the passthrough driver for "da11". We find the list of
1271 * "da" peripherals all right, but there is no unit 11.
1272 * The other possibility is that we went through the list
1273 * of peripheral drivers attached to the device structure,
1274 * but didn't find one with the name "pass". Either way,
1275 * we return ENOENT, since we couldn't find something.
1276 */
1277 if (periph == NULL) {
1278 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1279 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1280 *ccb->cgdl.periph_name = '\0';
1281 ccb->cgdl.unit_number = 0;
1282 error = ENOENT;
1283 /*
1284 * It is unfortunate that this is even necessary,
1285 * but there are many, many clueless users out there.
1286 * If this is true, the user is looking for the
1287 * passthrough driver, but doesn't have one in his
1288 * kernel.
1289 */
1290 if (base_periph_found == 1) {
1291 printf("xptioctl: pass driver is not in the "
1292 "kernel\n");
1293 printf("xptioctl: put \"device pass0\" in "
1294 "your kernel config file\n");
1295 }
1296 }
1297 splx(s);
1298 break;
1299 }
1300 default:
1301 error = ENOTTY;
1302 break;
1303 }
1304
1305 return(error);
1306}
1307
1308/* Functions accessed by the peripheral drivers */
1309static void
1310xpt_init(dummy)
1311 void *dummy;
1312{
1313 struct cam_sim *xpt_sim;
1314 struct cam_path *path;
1315 struct cam_devq *devq;
1316 cam_status status;
1317
1318 TAILQ_INIT(&xpt_busses);
1319 TAILQ_INIT(&cam_bioq);
1320 TAILQ_INIT(&cam_netq);
1321 SLIST_INIT(&ccb_freeq);
1322 STAILQ_INIT(&highpowerq);
1323
1324 /*
1325 * The xpt layer is, itself, the equivelent of a SIM.
1326 * Allow 16 ccbs in the ccb pool for it. This should
1327 * give decent parallelism when we probe busses and
1328 * perform other XPT functions.
1329 */
1330 devq = cam_simq_alloc(16);
1331 xpt_sim = cam_sim_alloc(xptaction,
1332 xptpoll,
1333 "xpt",
1334 /*softc*/NULL,
1335 /*unit*/0,
1336 /*max_dev_transactions*/0,
1337 /*max_tagged_dev_transactions*/0,
1338 devq);
3aed1355 1339 cam_simq_release(devq);
984263bc
MD
1340 xpt_max_ccbs = 16;
1341
1342 xpt_bus_register(xpt_sim, /*bus #*/0);
1343
1344 /*
1345 * Looking at the XPT from the SIM layer, the XPT is
1346 * the equivelent of a peripheral driver. Allocate
1347 * a peripheral driver entry for us.
1348 */
1349 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1350 CAM_TARGET_WILDCARD,
1351 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1352 printf("xpt_init: xpt_create_path failed with status %#x,"
1353 " failing attach\n", status);
1354 return;
1355 }
1356
1357 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1358 path, NULL, 0, NULL);
1359 xpt_free_path(path);
1360
1361 xpt_sim->softc = xpt_periph;
1362
1363 /*
1364 * Register a callback for when interrupts are enabled.
1365 */
898d961b
MD
1366 xpt_config_hook = malloc(sizeof(struct intr_config_hook),
1367 M_TEMP, M_INTWAIT | M_ZERO);
984263bc
MD
1368 xpt_config_hook->ich_func = xpt_config;
1369 if (config_intrhook_establish(xpt_config_hook) != 0) {
1370 free (xpt_config_hook, M_TEMP);
1371 printf("xpt_init: config_intrhook_establish failed "
1372 "- failing attach\n");
1373 }
1374
1375 /* Install our software interrupt handlers */
ef0fdad1
MD
1376 register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet");
1377 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio");
984263bc
MD
1378}
1379
1380static cam_status
1381xptregister(struct cam_periph *periph, void *arg)
1382{
1383 if (periph == NULL) {
1384 printf("xptregister: periph was NULL!!\n");
1385 return(CAM_REQ_CMP_ERR);
1386 }
1387
1388 periph->softc = NULL;
1389
1390 xpt_periph = periph;
1391
1392 return(CAM_REQ_CMP);
1393}
1394
1395int32_t
1396xpt_add_periph(struct cam_periph *periph)
1397{
1398 struct cam_ed *device;
1399 int32_t status;
1400 struct periph_list *periph_head;
1401
1402 device = periph->path->device;
1403
1404 periph_head = &device->periphs;
1405
1406 status = CAM_REQ_CMP;
1407
1408 if (device != NULL) {
1409 int s;
1410
1411 /*
1412 * Make room for this peripheral
1413 * so it will fit in the queue
1414 * when it's scheduled to run
1415 */
1416 s = splsoftcam();
1417 status = camq_resize(&device->drvq,
1418 device->drvq.array_size + 1);
1419
1420 device->generation++;
1421
1422 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1423
1424 splx(s);
1425 }
1426
1427 xsoftc.generation++;
1428
1429 return (status);
1430}
1431
1432void
1433xpt_remove_periph(struct cam_periph *periph)
1434{
1435 struct cam_ed *device;
1436
1437 device = periph->path->device;
1438
1439 if (device != NULL) {
1440 int s;
1441 struct periph_list *periph_head;
1442
1443 periph_head = &device->periphs;
1444
1445 /* Release the slot for this peripheral */
1446 s = splsoftcam();
1447 camq_resize(&device->drvq, device->drvq.array_size - 1);
1448
1449 device->generation++;
1450
1451 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1452
1453 splx(s);
1454 }
1455
1456 xsoftc.generation++;
1457
1458}
1459
1460void
1461xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1462{
1463 int s;
1464 u_int mb;
1465 struct cam_path *path;
1466 struct ccb_trans_settings cts;
1467
1468 path = periph->path;
1469 /*
1470 * To ensure that this is printed in one piece,
1471 * mask out CAM interrupts.
1472 */
1473 s = splsoftcam();
1474 printf("%s%d at %s%d bus %d target %d lun %d\n",
1475 periph->periph_name, periph->unit_number,
1476 path->bus->sim->sim_name,
1477 path->bus->sim->unit_number,
1478 path->bus->sim->bus_id,
1479 path->target->target_id,
1480 path->device->lun_id);
1481 printf("%s%d: ", periph->periph_name, periph->unit_number);
1482 scsi_print_inquiry(&path->device->inq_data);
1483 if ((bootverbose)
1484 && (path->device->serial_num_len > 0)) {
1485 /* Don't wrap the screen - print only the first 60 chars */
1486 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1487 periph->unit_number, path->device->serial_num);
1488 }
1489 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1490 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1491 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1492 xpt_action((union ccb*)&cts);
1493 if (cts.ccb_h.status == CAM_REQ_CMP) {
1494 u_int speed;
1495 u_int freq;
1496
1497 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1498 && cts.sync_offset != 0) {
1499 freq = scsi_calc_syncsrate(cts.sync_period);
1500 speed = freq;
1501 } else {
1502 struct ccb_pathinq cpi;
1503
1504 /* Ask the SIM for its base transfer speed */
1505 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1506 cpi.ccb_h.func_code = XPT_PATH_INQ;
1507 xpt_action((union ccb *)&cpi);
1508
1509 speed = cpi.base_transfer_speed;
1510 freq = 0;
1511 }
1512 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1513 speed *= (0x01 << cts.bus_width);
1514 mb = speed / 1000;
1515 if (mb > 0)
1516 printf("%s%d: %d.%03dMB/s transfers",
1517 periph->periph_name, periph->unit_number,
1518 mb, speed % 1000);
1519 else
1520 printf("%s%d: %dKB/s transfers", periph->periph_name,
1521 periph->unit_number, speed);
1522 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1523 && cts.sync_offset != 0) {
1524 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1525 freq % 1000, cts.sync_offset);
1526 }
1527 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1528 && cts.bus_width > 0) {
1529 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1530 && cts.sync_offset != 0) {
1531 printf(", ");
1532 } else {
1533 printf(" (");
1534 }
1535 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1536 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1537 && cts.sync_offset != 0) {
1538 printf(")");
1539 }
1540
1541 if (path->device->inq_flags & SID_CmdQue
1542 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1543 printf(", Tagged Queueing Enabled");
1544 }
1545
1546 printf("\n");
1547 } else if (path->device->inq_flags & SID_CmdQue
1548 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1549 printf("%s%d: Tagged Queueing Enabled\n",
1550 periph->periph_name, periph->unit_number);
1551 }
1552
1553 /*
1554 * We only want to print the caller's announce string if they've
1555 * passed one in..
1556 */
1557 if (announce_string != NULL)
1558 printf("%s%d: %s\n", periph->periph_name,
1559 periph->unit_number, announce_string);
1560 splx(s);
1561}
1562
1563
1564static dev_match_ret
1565xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1566 struct cam_eb *bus)
1567{
1568 dev_match_ret retval;
1569 int i;
1570
1571 retval = DM_RET_NONE;
1572
1573 /*
1574 * If we aren't given something to match against, that's an error.
1575 */
1576 if (bus == NULL)
1577 return(DM_RET_ERROR);
1578
1579 /*
1580 * If there are no match entries, then this bus matches no
1581 * matter what.
1582 */
1583 if ((patterns == NULL) || (num_patterns == 0))
1584 return(DM_RET_DESCEND | DM_RET_COPY);
1585
1586 for (i = 0; i < num_patterns; i++) {
1587 struct bus_match_pattern *cur_pattern;
1588
1589 /*
1590 * If the pattern in question isn't for a bus node, we
1591 * aren't interested. However, we do indicate to the
1592 * calling routine that we should continue descending the
1593 * tree, since the user wants to match against lower-level
1594 * EDT elements.
1595 */
1596 if (patterns[i].type != DEV_MATCH_BUS) {
1597 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1598 retval |= DM_RET_DESCEND;
1599 continue;
1600 }
1601
1602 cur_pattern = &patterns[i].pattern.bus_pattern;
1603
1604 /*
1605 * If they want to match any bus node, we give them any
1606 * device node.
1607 */
1608 if (cur_pattern->flags == BUS_MATCH_ANY) {
1609 /* set the copy flag */
1610 retval |= DM_RET_COPY;
1611
1612 /*
1613 * If we've already decided on an action, go ahead
1614 * and return.
1615 */
1616 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1617 return(retval);
1618 }
1619
1620 /*
1621 * Not sure why someone would do this...
1622 */
1623 if (cur_pattern->flags == BUS_MATCH_NONE)
1624 continue;
1625
1626 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1627 && (cur_pattern->path_id != bus->path_id))
1628 continue;
1629
1630 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1631 && (cur_pattern->bus_id != bus->sim->bus_id))
1632 continue;
1633
1634 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1635 && (cur_pattern->unit_number != bus->sim->unit_number))
1636 continue;
1637
1638 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1639 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1640 DEV_IDLEN) != 0))
1641 continue;
1642
1643 /*
1644 * If we get to this point, the user definitely wants
1645 * information on this bus. So tell the caller to copy the
1646 * data out.
1647 */
1648 retval |= DM_RET_COPY;
1649
1650 /*
1651 * If the return action has been set to descend, then we
1652 * know that we've already seen a non-bus matching
1653 * expression, therefore we need to further descend the tree.
1654 * This won't change by continuing around the loop, so we
1655 * go ahead and return. If we haven't seen a non-bus
1656 * matching expression, we keep going around the loop until
1657 * we exhaust the matching expressions. We'll set the stop
1658 * flag once we fall out of the loop.
1659 */
1660 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1661 return(retval);
1662 }
1663
1664 /*
1665 * If the return action hasn't been set to descend yet, that means
1666 * we haven't seen anything other than bus matching patterns. So
1667 * tell the caller to stop descending the tree -- the user doesn't
1668 * want to match against lower level tree elements.
1669 */
1670 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1671 retval |= DM_RET_STOP;
1672
1673 return(retval);
1674}
1675
1676static dev_match_ret
1677xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1678 struct cam_ed *device)
1679{
1680 dev_match_ret retval;
1681 int i;
1682
1683 retval = DM_RET_NONE;
1684
1685 /*
1686 * If we aren't given something to match against, that's an error.
1687 */
1688 if (device == NULL)
1689 return(DM_RET_ERROR);
1690
1691 /*
1692 * If there are no match entries, then this device matches no
1693 * matter what.
1694 */
1695 if ((patterns == NULL) || (patterns == 0))
1696 return(DM_RET_DESCEND | DM_RET_COPY);
1697
1698 for (i = 0; i < num_patterns; i++) {
1699 struct device_match_pattern *cur_pattern;
1700
1701 /*
1702 * If the pattern in question isn't for a device node, we
1703 * aren't interested.
1704 */
1705 if (patterns[i].type != DEV_MATCH_DEVICE) {
1706 if ((patterns[i].type == DEV_MATCH_PERIPH)
1707 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1708 retval |= DM_RET_DESCEND;
1709 continue;
1710 }
1711
1712 cur_pattern = &patterns[i].pattern.device_pattern;
1713
1714 /*
1715 * If they want to match any device node, we give them any
1716 * device node.
1717 */
1718 if (cur_pattern->flags == DEV_MATCH_ANY) {
1719 /* set the copy flag */
1720 retval |= DM_RET_COPY;
1721
1722
1723 /*
1724 * If we've already decided on an action, go ahead
1725 * and return.
1726 */
1727 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1728 return(retval);
1729 }
1730
1731 /*
1732 * Not sure why someone would do this...
1733 */
1734 if (cur_pattern->flags == DEV_MATCH_NONE)
1735 continue;
1736
1737 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1738 && (cur_pattern->path_id != device->target->bus->path_id))
1739 continue;
1740
1741 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1742 && (cur_pattern->target_id != device->target->target_id))
1743 continue;
1744
1745 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1746 && (cur_pattern->target_lun != device->lun_id))
1747 continue;
1748
1749 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1750 && (cam_quirkmatch((caddr_t)&device->inq_data,
1751 (caddr_t)&cur_pattern->inq_pat,
1752 1, sizeof(cur_pattern->inq_pat),
1753 scsi_static_inquiry_match) == NULL))
1754 continue;
1755
1756 /*
1757 * If we get to this point, the user definitely wants
1758 * information on this device. So tell the caller to copy
1759 * the data out.
1760 */
1761 retval |= DM_RET_COPY;
1762
1763 /*
1764 * If the return action has been set to descend, then we
1765 * know that we've already seen a peripheral matching
1766 * expression, therefore we need to further descend the tree.
1767 * This won't change by continuing around the loop, so we
1768 * go ahead and return. If we haven't seen a peripheral
1769 * matching expression, we keep going around the loop until
1770 * we exhaust the matching expressions. We'll set the stop
1771 * flag once we fall out of the loop.
1772 */
1773 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1774 return(retval);
1775 }
1776
1777 /*
1778 * If the return action hasn't been set to descend yet, that means
1779 * we haven't seen any peripheral matching patterns. So tell the
1780 * caller to stop descending the tree -- the user doesn't want to
1781 * match against lower level tree elements.
1782 */
1783 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1784 retval |= DM_RET_STOP;
1785
1786 return(retval);
1787}
1788
1789/*
1790 * Match a single peripheral against any number of match patterns.
1791 */
1792static dev_match_ret
1793xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1794 struct cam_periph *periph)
1795{
1796 dev_match_ret retval;
1797 int i;
1798
1799 /*
1800 * If we aren't given something to match against, that's an error.
1801 */
1802 if (periph == NULL)
1803 return(DM_RET_ERROR);
1804
1805 /*
1806 * If there are no match entries, then this peripheral matches no
1807 * matter what.
1808 */
1809 if ((patterns == NULL) || (num_patterns == 0))
1810 return(DM_RET_STOP | DM_RET_COPY);
1811
1812 /*
1813 * There aren't any nodes below a peripheral node, so there's no
1814 * reason to descend the tree any further.
1815 */
1816 retval = DM_RET_STOP;
1817
1818 for (i = 0; i < num_patterns; i++) {
1819 struct periph_match_pattern *cur_pattern;
1820
1821 /*
1822 * If the pattern in question isn't for a peripheral, we
1823 * aren't interested.
1824 */
1825 if (patterns[i].type != DEV_MATCH_PERIPH)
1826 continue;
1827
1828 cur_pattern = &patterns[i].pattern.periph_pattern;
1829
1830 /*
1831 * If they want to match on anything, then we will do so.
1832 */
1833 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1834 /* set the copy flag */
1835 retval |= DM_RET_COPY;
1836
1837 /*
1838 * We've already set the return action to stop,
1839 * since there are no nodes below peripherals in
1840 * the tree.
1841 */
1842 return(retval);
1843 }
1844
1845 /*
1846 * Not sure why someone would do this...
1847 */
1848 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1849 continue;
1850
1851 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1852 && (cur_pattern->path_id != periph->path->bus->path_id))
1853 continue;
1854
1855 /*
1856 * For the target and lun id's, we have to make sure the
1857 * target and lun pointers aren't NULL. The xpt peripheral
1858 * has a wildcard target and device.
1859 */
1860 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1861 && ((periph->path->target == NULL)
1862 ||(cur_pattern->target_id != periph->path->target->target_id)))
1863 continue;
1864
1865 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1866 && ((periph->path->device == NULL)
1867 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1868 continue;
1869
1870 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1871 && (cur_pattern->unit_number != periph->unit_number))
1872 continue;
1873
1874 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1875 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1876 DEV_IDLEN) != 0))
1877 continue;
1878
1879 /*
1880 * If we get to this point, the user definitely wants
1881 * information on this peripheral. So tell the caller to
1882 * copy the data out.
1883 */
1884 retval |= DM_RET_COPY;
1885
1886 /*
1887 * The return action has already been set to stop, since
1888 * peripherals don't have any nodes below them in the EDT.
1889 */
1890 return(retval);
1891 }
1892
1893 /*
1894 * If we get to this point, the peripheral that was passed in
1895 * doesn't match any of the patterns.
1896 */
1897 return(retval);
1898}
1899
1900static int
1901xptedtbusfunc(struct cam_eb *bus, void *arg)
1902{
1903 struct ccb_dev_match *cdm;
1904 dev_match_ret retval;
1905
1906 cdm = (struct ccb_dev_match *)arg;
1907
1908 /*
1909 * If our position is for something deeper in the tree, that means
1910 * that we've already seen this node. So, we keep going down.
1911 */
1912 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1913 && (cdm->pos.cookie.bus == bus)
1914 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1915 && (cdm->pos.cookie.target != NULL))
1916 retval = DM_RET_DESCEND;
1917 else
1918 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1919
1920 /*
1921 * If we got an error, bail out of the search.
1922 */
1923 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1924 cdm->status = CAM_DEV_MATCH_ERROR;
1925 return(0);
1926 }
1927
1928 /*
1929 * If the copy flag is set, copy this bus out.
1930 */
1931 if (retval & DM_RET_COPY) {
1932 int spaceleft, j;
1933
1934 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1935 sizeof(struct dev_match_result));
1936
1937 /*
1938 * If we don't have enough space to put in another
1939 * match result, save our position and tell the
1940 * user there are more devices to check.
1941 */
1942 if (spaceleft < sizeof(struct dev_match_result)) {
1943 bzero(&cdm->pos, sizeof(cdm->pos));
1944 cdm->pos.position_type =
1945 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1946
1947 cdm->pos.cookie.bus = bus;
1948 cdm->pos.generations[CAM_BUS_GENERATION]=
1949 bus_generation;
1950 cdm->status = CAM_DEV_MATCH_MORE;
1951 return(0);
1952 }
1953 j = cdm->num_matches;
1954 cdm->num_matches++;
1955 cdm->matches[j].type = DEV_MATCH_BUS;
1956 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1957 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1958 cdm->matches[j].result.bus_result.unit_number =
1959 bus->sim->unit_number;
1960 strncpy(cdm->matches[j].result.bus_result.dev_name,
1961 bus->sim->sim_name, DEV_IDLEN);
1962 }
1963
1964 /*
1965 * If the user is only interested in busses, there's no
1966 * reason to descend to the next level in the tree.
1967 */
1968 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1969 return(1);
1970
1971 /*
1972 * If there is a target generation recorded, check it to
1973 * make sure the target list hasn't changed.
1974 */
1975 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1976 && (bus == cdm->pos.cookie.bus)
1977 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1978 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1979 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1980 bus->generation)) {
1981 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1982 return(0);
1983 }
1984
1985 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1986 && (cdm->pos.cookie.bus == bus)
1987 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1988 && (cdm->pos.cookie.target != NULL))
1989 return(xpttargettraverse(bus,
1990 (struct cam_et *)cdm->pos.cookie.target,
1991 xptedttargetfunc, arg));
1992 else
1993 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1994}
1995
1996static int
1997xptedttargetfunc(struct cam_et *target, void *arg)
1998{
1999 struct ccb_dev_match *cdm;
2000
2001 cdm = (struct ccb_dev_match *)arg;
2002
2003 /*
2004 * If there is a device list generation recorded, check it to
2005 * make sure the device list hasn't changed.
2006 */
2007 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2008 && (cdm->pos.cookie.bus == target->bus)
2009 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2010 && (cdm->pos.cookie.target == target)
2011 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2012 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2013 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2014 target->generation)) {
2015 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2016 return(0);
2017 }
2018
2019 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2020 && (cdm->pos.cookie.bus == target->bus)
2021 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2022 && (cdm->pos.cookie.target == target)
2023 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2024 && (cdm->pos.cookie.device != NULL))
2025 return(xptdevicetraverse(target,
2026 (struct cam_ed *)cdm->pos.cookie.device,
2027 xptedtdevicefunc, arg));
2028 else
2029 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2030}
2031
2032static int
2033xptedtdevicefunc(struct cam_ed *device, void *arg)
2034{
2035
2036 struct ccb_dev_match *cdm;
2037 dev_match_ret retval;
2038
2039 cdm = (struct ccb_dev_match *)arg;
2040
2041 /*
2042 * If our position is for something deeper in the tree, that means
2043 * that we've already seen this node. So, we keep going down.
2044 */
2045 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2046 && (cdm->pos.cookie.device == device)
2047 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2048 && (cdm->pos.cookie.periph != NULL))
2049 retval = DM_RET_DESCEND;
2050 else
2051 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2052 device);
2053
2054 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2055 cdm->status = CAM_DEV_MATCH_ERROR;
2056 return(0);
2057 }
2058
2059 /*
2060 * If the copy flag is set, copy this device out.
2061 */
2062 if (retval & DM_RET_COPY) {
2063 int spaceleft, j;
2064
2065 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2066 sizeof(struct dev_match_result));
2067
2068 /*
2069 * If we don't have enough space to put in another
2070 * match result, save our position and tell the
2071 * user there are more devices to check.
2072 */
2073 if (spaceleft < sizeof(struct dev_match_result)) {
2074 bzero(&cdm->pos, sizeof(cdm->pos));
2075 cdm->pos.position_type =
2076 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2077 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2078
2079 cdm->pos.cookie.bus = device->target->bus;
2080 cdm->pos.generations[CAM_BUS_GENERATION]=
2081 bus_generation;
2082 cdm->pos.cookie.target = device->target;
2083 cdm->pos.generations[CAM_TARGET_GENERATION] =
2084 device->target->bus->generation;
2085 cdm->pos.cookie.device = device;
2086 cdm->pos.generations[CAM_DEV_GENERATION] =
2087 device->target->generation;
2088 cdm->status = CAM_DEV_MATCH_MORE;
2089 return(0);
2090 }
2091 j = cdm->num_matches;
2092 cdm->num_matches++;
2093 cdm->matches[j].type = DEV_MATCH_DEVICE;
2094 cdm->matches[j].result.device_result.path_id =
2095 device->target->bus->path_id;
2096 cdm->matches[j].result.device_result.target_id =
2097 device->target->target_id;
2098 cdm->matches[j].result.device_result.target_lun =
2099 device->lun_id;
2100 bcopy(&device->inq_data,
2101 &cdm->matches[j].result.device_result.inq_data,
2102 sizeof(struct scsi_inquiry_data));
2103
2104 /* Let the user know whether this device is unconfigured */
2105 if (device->flags & CAM_DEV_UNCONFIGURED)
2106 cdm->matches[j].result.device_result.flags =
2107 DEV_RESULT_UNCONFIGURED;
2108 else
2109 cdm->matches[j].result.device_result.flags =
2110 DEV_RESULT_NOFLAG;
2111 }
2112
2113 /*
2114 * If the user isn't interested in peripherals, don't descend
2115 * the tree any further.
2116 */
2117 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2118 return(1);
2119
2120 /*
2121 * If there is a peripheral list generation recorded, make sure
2122 * it hasn't changed.
2123 */
2124 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2125 && (device->target->bus == cdm->pos.cookie.bus)
2126 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2127 && (device->target == cdm->pos.cookie.target)
2128 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2129 && (device == cdm->pos.cookie.device)
2130 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2131 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2132 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2133 device->generation)){
2134 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2135 return(0);
2136 }
2137
2138 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2139 && (cdm->pos.cookie.bus == device->target->bus)
2140 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2141 && (cdm->pos.cookie.target == device->target)
2142 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2143 && (cdm->pos.cookie.device == device)
2144 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2145 && (cdm->pos.cookie.periph != NULL))
2146 return(xptperiphtraverse(device,
2147 (struct cam_periph *)cdm->pos.cookie.periph,
2148 xptedtperiphfunc, arg));
2149 else
2150 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2151}
2152
2153static int
2154xptedtperiphfunc(struct cam_periph *periph, void *arg)
2155{
2156 struct ccb_dev_match *cdm;
2157 dev_match_ret retval;
2158
2159 cdm = (struct ccb_dev_match *)arg;
2160
2161 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2162
2163 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2164 cdm->status = CAM_DEV_MATCH_ERROR;
2165 return(0);
2166 }
2167
2168 /*
2169 * If the copy flag is set, copy this peripheral out.
2170 */
2171 if (retval & DM_RET_COPY) {
2172 int spaceleft, j;
2173
2174 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2175 sizeof(struct dev_match_result));
2176
2177 /*
2178 * If we don't have enough space to put in another
2179 * match result, save our position and tell the
2180 * user there are more devices to check.
2181 */
2182 if (spaceleft < sizeof(struct dev_match_result)) {
2183 bzero(&cdm->pos, sizeof(cdm->pos));
2184 cdm->pos.position_type =
2185 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2186 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2187 CAM_DEV_POS_PERIPH;
2188
2189 cdm->pos.cookie.bus = periph->path->bus;
2190 cdm->pos.generations[CAM_BUS_GENERATION]=
2191 bus_generation;
2192 cdm->pos.cookie.target = periph->path->target;
2193 cdm->pos.generations[CAM_TARGET_GENERATION] =
2194 periph->path->bus->generation;
2195 cdm->pos.cookie.device = periph->path->device;
2196 cdm->pos.generations[CAM_DEV_GENERATION] =
2197 periph->path->target->generation;
2198 cdm->pos.cookie.periph = periph;
2199 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2200 periph->path->device->generation;
2201 cdm->status = CAM_DEV_MATCH_MORE;
2202 return(0);
2203 }
2204
2205 j = cdm->num_matches;
2206 cdm->num_matches++;
2207 cdm->matches[j].type = DEV_MATCH_PERIPH;
2208 cdm->matches[j].result.periph_result.path_id =
2209 periph->path->bus->path_id;
2210 cdm->matches[j].result.periph_result.target_id =
2211 periph->path->target->target_id;
2212 cdm->matches[j].result.periph_result.target_lun =
2213 periph->path->device->lun_id;
2214 cdm->matches[j].result.periph_result.unit_number =
2215 periph->unit_number;
2216 strncpy(cdm->matches[j].result.periph_result.periph_name,
2217 periph->periph_name, DEV_IDLEN);
2218 }
2219
2220 return(1);
2221}
2222
2223static int
2224xptedtmatch(struct ccb_dev_match *cdm)
2225{
2226 int ret;
2227
2228 cdm->num_matches = 0;
2229
2230 /*
2231 * Check the bus list generation. If it has changed, the user
2232 * needs to reset everything and start over.
2233 */
2234 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2235 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2236 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2237 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2238 return(0);
2239 }
2240
2241 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2242 && (cdm->pos.cookie.bus != NULL))
2243 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2244 xptedtbusfunc, cdm);
2245 else
2246 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2247
2248 /*
2249 * If we get back 0, that means that we had to stop before fully
2250 * traversing the EDT. It also means that one of the subroutines
2251 * has set the status field to the proper value. If we get back 1,
2252 * we've fully traversed the EDT and copied out any matching entries.
2253 */
2254 if (ret == 1)
2255 cdm->status = CAM_DEV_MATCH_LAST;
2256
2257 return(ret);
2258}
2259
2260static int
2261xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2262{
2263 struct ccb_dev_match *cdm;
2264
2265 cdm = (struct ccb_dev_match *)arg;
2266
2267 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2268 && (cdm->pos.cookie.pdrv == pdrv)
2269 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2270 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2271 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2272 (*pdrv)->generation)) {
2273 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2274 return(0);
2275 }
2276
2277 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2278 && (cdm->pos.cookie.pdrv == pdrv)
2279 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2280 && (cdm->pos.cookie.periph != NULL))
2281 return(xptpdperiphtraverse(pdrv,
2282 (struct cam_periph *)cdm->pos.cookie.periph,
2283 xptplistperiphfunc, arg));
2284 else
2285 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2286}
2287
2288static int
2289xptplistperiphfunc(struct cam_periph *periph, void *arg)
2290{
2291 struct ccb_dev_match *cdm;
2292 dev_match_ret retval;
2293
2294 cdm = (struct ccb_dev_match *)arg;
2295
2296 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2297
2298 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2299 cdm->status = CAM_DEV_MATCH_ERROR;
2300 return(0);
2301 }
2302
2303 /*
2304 * If the copy flag is set, copy this peripheral out.
2305 */
2306 if (retval & DM_RET_COPY) {
2307 int spaceleft, j;
2308
2309 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2310 sizeof(struct dev_match_result));
2311
2312 /*
2313 * If we don't have enough space to put in another
2314 * match result, save our position and tell the
2315 * user there are more devices to check.
2316 */
2317 if (spaceleft < sizeof(struct dev_match_result)) {
2318 struct periph_driver **pdrv;
2319
2320 pdrv = NULL;
2321 bzero(&cdm->pos, sizeof(cdm->pos));
2322 cdm->pos.position_type =
2323 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2324 CAM_DEV_POS_PERIPH;
2325
2326 /*
2327 * This may look a bit non-sensical, but it is
2328 * actually quite logical. There are very few
2329 * peripheral drivers, and bloating every peripheral
2330 * structure with a pointer back to its parent
2331 * peripheral driver linker set entry would cost
2332 * more in the long run than doing this quick lookup.
2333 */
dc62b251 2334 SET_FOREACH(pdrv, periphdriver_set) {
984263bc
MD
2335 if (strcmp((*pdrv)->driver_name,
2336 periph->periph_name) == 0)
2337 break;
2338 }
2339
2340 if (pdrv == NULL) {
2341 cdm->status = CAM_DEV_MATCH_ERROR;
2342 return(0);
2343 }
2344
2345 cdm->pos.cookie.pdrv = pdrv;
2346 /*
2347 * The periph generation slot does double duty, as
2348 * does the periph pointer slot. They are used for
2349 * both edt and pdrv lookups and positioning.
2350 */
2351 cdm->pos.cookie.periph = periph;
2352 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2353 (*pdrv)->generation;
2354 cdm->status = CAM_DEV_MATCH_MORE;
2355 return(0);
2356 }
2357
2358 j = cdm->num_matches;
2359 cdm->num_matches++;
2360 cdm->matches[j].type = DEV_MATCH_PERIPH;
2361 cdm->matches[j].result.periph_result.path_id =
2362 periph->path->bus->path_id;
2363
2364 /*
2365 * The transport layer peripheral doesn't have a target or
2366 * lun.
2367 */
2368 if (periph->path->target)
2369 cdm->matches[j].result.periph_result.target_id =
2370 periph->path->target->target_id;
2371 else
2372 cdm->matches[j].result.periph_result.target_id = -1;
2373
2374 if (periph->path->device)
2375 cdm->matches[j].result.periph_result.target_lun =
2376 periph->path->device->lun_id;
2377 else
2378 cdm->matches[j].result.periph_result.target_lun = -1;
2379
2380 cdm->matches[j].result.periph_result.unit_number =
2381 periph->unit_number;
2382 strncpy(cdm->matches[j].result.periph_result.periph_name,
2383 periph->periph_name, DEV_IDLEN);
2384 }
2385
2386 return(1);
2387}
2388
2389static int
2390xptperiphlistmatch(struct ccb_dev_match *cdm)
2391{
2392 int ret;
2393
2394 cdm->num_matches = 0;
2395
2396 /*
2397 * At this point in the edt traversal function, we check the bus
2398 * list generation to make sure that no busses have been added or
2399 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2400 * For the peripheral driver list traversal function, however, we
2401 * don't have to worry about new peripheral driver types coming or
2402 * going; they're in a linker set, and therefore can't change
2403 * without a recompile.
2404 */
2405
2406 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2407 && (cdm->pos.cookie.pdrv != NULL))
2408 ret = xptpdrvtraverse(
2409 (struct periph_driver **)cdm->pos.cookie.pdrv,
2410 xptplistpdrvfunc, cdm);
2411 else
2412 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2413
2414 /*
2415 * If we get back 0, that means that we had to stop before fully
2416 * traversing the peripheral driver tree. It also means that one of
2417 * the subroutines has set the status field to the proper value. If
2418 * we get back 1, we've fully traversed the EDT and copied out any
2419 * matching entries.
2420 */
2421 if (ret == 1)
2422 cdm->status = CAM_DEV_MATCH_LAST;
2423
2424 return(ret);
2425}
2426
2427static int
2428xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2429{
2430 struct cam_eb *bus, *next_bus;
2431 int retval;
2432
2433 retval = 1;
2434
2435 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2436 bus != NULL;
2437 bus = next_bus) {
2438 next_bus = TAILQ_NEXT(bus, links);
2439
2440 retval = tr_func(bus, arg);
2441 if (retval == 0)
2442 return(retval);
2443 }
2444
2445 return(retval);
2446}
2447
2448static int
2449xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2450 xpt_targetfunc_t *tr_func, void *arg)
2451{
2452 struct cam_et *target, *next_target;
2453 int retval;
2454
2455 retval = 1;
2456 for (target = (start_target ? start_target :
2457 TAILQ_FIRST(&bus->et_entries));
2458 target != NULL; target = next_target) {
2459
2460 next_target = TAILQ_NEXT(target, links);
2461
2462 retval = tr_func(target, arg);
2463
2464 if (retval == 0)
2465 return(retval);
2466 }
2467
2468 return(retval);
2469}
2470
2471static int
2472xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2473 xpt_devicefunc_t *tr_func, void *arg)
2474{
2475 struct cam_ed *device, *next_device;
2476 int retval;
2477
2478 retval = 1;
2479 for (device = (start_device ? start_device :
2480 TAILQ_FIRST(&target->ed_entries));
2481 device != NULL;
2482 device = next_device) {
2483
2484 next_device = TAILQ_NEXT(device, links);
2485
2486 retval = tr_func(device, arg);
2487
2488 if (retval == 0)
2489 return(retval);
2490 }
2491
2492 return(retval);
2493}
2494
2495static int
2496xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2497 xpt_periphfunc_t *tr_func, void *arg)
2498{
2499 struct cam_periph *periph, *next_periph;
2500 int retval;
2501
2502 retval = 1;
2503
2504 for (periph = (start_periph ? start_periph :
2505 SLIST_FIRST(&device->periphs));
2506 periph != NULL;
2507 periph = next_periph) {
2508
2509 next_periph = SLIST_NEXT(periph, periph_links);
2510
2511 retval = tr_func(periph, arg);
2512 if (retval == 0)
2513 return(retval);
2514 }
2515
2516 return(retval);
2517}
2518
2519static int
2520xptpdrvtraverse(struct periph_driver **start_pdrv,
2521 xpt_pdrvfunc_t *tr_func, void *arg)
2522{
2523 struct periph_driver **pdrv;
2524 int retval;
2525
2526 retval = 1;
2527
2528 /*
2529 * We don't traverse the peripheral driver list like we do the
2530 * other lists, because it is a linker set, and therefore cannot be
2531 * changed during runtime. If the peripheral driver list is ever
2532 * re-done to be something other than a linker set (i.e. it can
2533 * change while the system is running), the list traversal should
2534 * be modified to work like the other traversal functions.
2535 */
98593f25
MD
2536 SET_FOREACH(pdrv, periphdriver_set) {
2537 if (start_pdrv == NULL || start_pdrv == pdrv) {
2538 retval = tr_func(pdrv, arg);
dc62b251
MD
2539 if (retval == 0)
2540 return(retval);
98593f25 2541 start_pdrv = NULL; /* traverse remainder */
dc62b251 2542 }
984263bc 2543 }
984263bc
MD
2544 return(retval);
2545}
2546
2547static int
2548xptpdperiphtraverse(struct periph_driver **pdrv,
2549 struct cam_periph *start_periph,
2550 xpt_periphfunc_t *tr_func, void *arg)
2551{
2552 struct cam_periph *periph, *next_periph;
2553 int retval;
2554
2555 retval = 1;
2556
2557 for (periph = (start_periph ? start_periph :
2558 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2559 periph = next_periph) {
2560
2561 next_periph = TAILQ_NEXT(periph, unit_links);
2562
2563 retval = tr_func(periph, arg);
2564 if (retval == 0)
2565 return(retval);
2566 }
2567 return(retval);
2568}
2569
2570static int
2571xptdefbusfunc(struct cam_eb *bus, void *arg)
2572{
2573 struct xpt_traverse_config *tr_config;
2574
2575 tr_config = (struct xpt_traverse_config *)arg;
2576
2577 if (tr_config->depth == XPT_DEPTH_BUS) {
2578 xpt_busfunc_t *tr_func;
2579
2580 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2581
2582 return(tr_func(bus, tr_config->tr_arg));
2583 } else
2584 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2585}
2586
2587static int
2588xptdeftargetfunc(struct cam_et *target, void *arg)
2589{
2590 struct xpt_traverse_config *tr_config;
2591
2592 tr_config = (struct xpt_traverse_config *)arg;
2593
2594 if (tr_config->depth == XPT_DEPTH_TARGET) {
2595 xpt_targetfunc_t *tr_func;
2596
2597 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2598
2599 return(tr_func(target, tr_config->tr_arg));
2600 } else
2601 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2602}
2603
2604static int
2605xptdefdevicefunc(struct cam_ed *device, void *arg)
2606{
2607 struct xpt_traverse_config *tr_config;
2608
2609 tr_config = (struct xpt_traverse_config *)arg;
2610
2611 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2612 xpt_devicefunc_t *tr_func;
2613
2614 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2615
2616 return(tr_func(device, tr_config->tr_arg));
2617 } else
2618 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2619}
2620
2621static int
2622xptdefperiphfunc(struct cam_periph *periph, void *arg)
2623{
2624 struct xpt_traverse_config *tr_config;
2625 xpt_periphfunc_t *tr_func;
2626
2627 tr_config = (struct xpt_traverse_config *)arg;
2628
2629 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2630
2631 /*
2632 * Unlike the other default functions, we don't check for depth
2633 * here. The peripheral driver level is the last level in the EDT,
2634 * so if we're here, we should execute the function in question.
2635 */
2636 return(tr_func(periph, tr_config->tr_arg));
2637}
2638
2639/*
2640 * Execute the given function for every bus in the EDT.
2641 */
2642static int
2643xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2644{
2645 struct xpt_traverse_config tr_config;
2646
2647 tr_config.depth = XPT_DEPTH_BUS;
2648 tr_config.tr_func = tr_func;
2649 tr_config.tr_arg = arg;
2650
2651 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2652}
2653
2654#ifdef notusedyet
2655/*
2656 * Execute the given function for every target in the EDT.
2657 */
2658static int
2659xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2660{
2661 struct xpt_traverse_config tr_config;
2662
2663 tr_config.depth = XPT_DEPTH_TARGET;
2664 tr_config.tr_func = tr_func;
2665 tr_config.tr_arg = arg;
2666
2667 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2668}
2669#endif /* notusedyet */
2670
2671/*
2672 * Execute the given function for every device in the EDT.
2673 */
2674static int
2675xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2676{
2677 struct xpt_traverse_config tr_config;
2678
2679 tr_config.depth = XPT_DEPTH_DEVICE;
2680 tr_config.tr_func = tr_func;
2681 tr_config.tr_arg = arg;
2682
2683 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2684}
2685
2686#ifdef notusedyet
2687/*
2688 * Execute the given function for every peripheral in the EDT.
2689 */
2690static int
2691xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2692{
2693 struct xpt_traverse_config tr_config;
2694
2695 tr_config.depth = XPT_DEPTH_PERIPH;
2696 tr_config.tr_func = tr_func;
2697 tr_config.tr_arg = arg;
2698
2699 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2700}
2701#endif /* notusedyet */
2702
2703static int
2704xptsetasyncfunc(struct cam_ed *device, void *arg)
2705{
2706 struct cam_path path;
2707 struct ccb_getdev cgd;
2708 struct async_node *cur_entry;
2709
2710 cur_entry = (struct async_node *)arg;
2711
2712 /*
2713 * Don't report unconfigured devices (Wildcard devs,
2714 * devices only for target mode, device instances
2715 * that have been invalidated but are waiting for
2716 * their last reference count to be released).
2717 */
2718 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2719 return (1);
2720
2721 xpt_compile_path(&path,
2722 NULL,
2723 device->target->bus->path_id,
2724 device->target->target_id,
2725 device->lun_id);
2726 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2727 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2728 xpt_action((union ccb *)&cgd);
2729 cur_entry->callback(cur_entry->callback_arg,
2730 AC_FOUND_DEVICE,
2731 &path, &cgd);
2732 xpt_release_path(&path);
2733
2734 return(1);
2735}
2736
2737static int
2738xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2739{
2740 struct cam_path path;
2741 struct ccb_pathinq cpi;
2742 struct async_node *cur_entry;
2743
2744 cur_entry = (struct async_node *)arg;
2745
2746 xpt_compile_path(&path, /*periph*/NULL,
2747 bus->sim->path_id,
2748 CAM_TARGET_WILDCARD,
2749 CAM_LUN_WILDCARD);
2750 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2751 cpi.ccb_h.func_code = XPT_PATH_INQ;
2752 xpt_action((union ccb *)&cpi);
2753 cur_entry->callback(cur_entry->callback_arg,
2754 AC_PATH_REGISTERED,
2755 &path, &cpi);
2756 xpt_release_path(&path);
2757
2758 return(1);
2759}
2760
2761void
2762xpt_action(union ccb *start_ccb)
2763{
2764 int iopl;
2765
2766 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2767
2768 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2769
2770 iopl = splsoftcam();
2771 switch (start_ccb->ccb_h.func_code) {
2772 case XPT_SCSI_IO:
2773 {
2774#ifdef CAMDEBUG
2775 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2776 struct cam_path *path;
2777
2778 path = start_ccb->ccb_h.path;
2779#endif
2780
2781 /*
2782 * For the sake of compatibility with SCSI-1
2783 * devices that may not understand the identify
2784 * message, we include lun information in the
2785 * second byte of all commands. SCSI-1 specifies
2786 * that luns are a 3 bit value and reserves only 3
2787 * bits for lun information in the CDB. Later
2788 * revisions of the SCSI spec allow for more than 8
2789 * luns, but have deprecated lun information in the
2790 * CDB. So, if the lun won't fit, we must omit.
2791 *
2792 * Also be aware that during initial probing for devices,
2793 * the inquiry information is unknown but initialized to 0.
2794 * This means that this code will be exercised while probing
2795 * devices with an ANSI revision greater than 2.
2796 */
2797 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2798 && start_ccb->ccb_h.target_lun < 8
2799 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2800
2801 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2802 start_ccb->ccb_h.target_lun << 5;
2803 }
2804 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2805 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2806 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2807 &path->device->inq_data),
2808 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2809 cdb_str, sizeof(cdb_str))));
2810 /* FALLTHROUGH */
2811 }
2812 case XPT_TARGET_IO:
2813 case XPT_CONT_TARGET_IO:
2814 start_ccb->csio.sense_resid = 0;
2815 start_ccb->csio.resid = 0;
2816 /* FALLTHROUGH */
2817 case XPT_RESET_DEV:
2818 case XPT_ENG_EXEC:
2819 {
2820 struct cam_path *path;
2821 int s;
2822 int runq;
2823
2824 path = start_ccb->ccb_h.path;
2825 s = splsoftcam();
2826
2827 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2828 if (path->device->qfrozen_cnt == 0)
2829 runq = xpt_schedule_dev_sendq(path->bus, path->device);
2830 else
2831 runq = 0;
2832 splx(s);
2833 if (runq != 0)
2834 xpt_run_dev_sendq(path->bus);
2835 break;
2836 }
2837 case XPT_SET_TRAN_SETTINGS:
2838 {
2839 xpt_set_transfer_settings(&start_ccb->cts,
2840 start_ccb->ccb_h.path->device,
2841 /*async_update*/FALSE);
2842 break;
2843 }
2844 case XPT_CALC_GEOMETRY:
2845 {
2846 struct cam_sim *sim;
2847
2848 /* Filter out garbage */
2849 if (start_ccb->ccg.block_size == 0
2850 || start_ccb->ccg.volume_size == 0) {
2851 start_ccb->ccg.cylinders = 0;
2852 start_ccb->ccg.heads = 0;
2853 start_ccb->ccg.secs_per_track = 0;
2854 start_ccb->ccb_h.status = CAM_REQ_CMP;
2855 break;
2856 }
2857#ifdef PC98
2858 /*
2859 * In a PC-98 system, geometry translation depens on
2860 * the "real" device geometry obtained from mode page 4.
2861 * SCSI geometry translation is performed in the
2862 * initialization routine of the SCSI BIOS and the result
2863 * stored in host memory. If the translation is available
2864 * in host memory, use it. If not, rely on the default
2865 * translation the device driver performs.
2866 */
2867 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2868 start_ccb->ccb_h.status = CAM_REQ_CMP;
2869 break;
2870 }
2871#endif
2872 sim = start_ccb->ccb_h.path->bus->sim;
2873 (*(sim->sim_action))(sim, start_ccb);
2874 break;
2875 }
2876 case XPT_ABORT:
2877 {
2878 union ccb* abort_ccb;
2879 int s;
2880
2881 abort_ccb = start_ccb->cab.abort_ccb;
2882 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2883
2884 if (abort_ccb->ccb_h.pinfo.index >= 0) {
2885 struct cam_ccbq *ccbq;
2886
2887 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2888 cam_ccbq_remove_ccb(ccbq, abort_ccb);
2889 abort_ccb->ccb_h.status =
2890 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2891 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2892 s = splcam();
2893 xpt_done(abort_ccb);
2894 splx(s);
2895 start_ccb->ccb_h.status = CAM_REQ_CMP;
2896 break;
2897 }
2898 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2899 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2900 /*
2901 * We've caught this ccb en route to
2902 * the SIM. Flag it for abort and the
2903 * SIM will do so just before starting
2904 * real work on the CCB.
2905 */
2906 abort_ccb->ccb_h.status =
2907 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2908 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2909 start_ccb->ccb_h.status = CAM_REQ_CMP;
2910 break;
2911 }
2912 }
2913 if (XPT_FC_IS_QUEUED(abort_ccb)
2914 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2915 /*
2916 * It's already completed but waiting
2917 * for our SWI to get to it.
2918 */
2919 start_ccb->ccb_h.status = CAM_UA_ABORT;
2920 break;
2921 }
2922 /*
2923 * If we weren't able to take care of the abort request
2924 * in the XPT, pass the request down to the SIM for processing.
2925 */
2926 /* FALLTHROUGH */
2927 }
2928 case XPT_ACCEPT_TARGET_IO:
2929 case XPT_EN_LUN:
2930 case XPT_IMMED_NOTIFY:
2931 case XPT_NOTIFY_ACK:
2932 case XPT_GET_TRAN_SETTINGS:
2933 case XPT_RESET_BUS:
2934 {
2935 struct cam_sim *sim;
2936
2937 sim = start_ccb->ccb_h.path->bus->sim;
2938 (*(sim->sim_action))(sim, start_ccb);
2939 break;
2940 }
2941 case XPT_PATH_INQ:
2942 {
2943 struct cam_sim *sim;
2944
2945 sim = start_ccb->ccb_h.path->bus->sim;
2946 (*(sim->sim_action))(sim, start_ccb);
2947 break;
2948 }
2949 case XPT_PATH_STATS:
2950 start_ccb->cpis.last_reset =
2951 start_ccb->ccb_h.path->bus->last_reset;
2952 start_ccb->ccb_h.status = CAM_REQ_CMP;
2953 break;
2954 case XPT_GDEV_TYPE:
2955 {
2956 struct cam_ed *dev;
2957 int s;
2958
2959 dev = start_ccb->ccb_h.path->device;
2960 s = splcam();
2961 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2962 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2963 } else {
2964 struct ccb_getdev *cgd;
2965 struct cam_eb *bus;
2966 struct cam_et *tar;
2967
2968 cgd = &start_ccb->cgd;
2969 bus = cgd->ccb_h.path->bus;
2970 tar = cgd->ccb_h.path->target;
2971 cgd->inq_data = dev->inq_data;
2972 cgd->ccb_h.status = CAM_REQ_CMP;
2973 cgd->serial_num_len = dev->serial_num_len;
2974 if ((dev->serial_num_len > 0)
2975 && (dev->serial_num != NULL))
2976 bcopy(dev->serial_num, cgd->serial_num,
2977 dev->serial_num_len);
2978 }
2979 splx(s);
2980 break;
2981 }
2982 case XPT_GDEV_STATS:
2983 {
2984 struct cam_ed *dev;
2985 int s;
2986
2987 dev = start_ccb->ccb_h.path->device;
2988 s = splcam();
2989 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2990 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2991 } else {
2992 struct ccb_getdevstats *cgds;
2993 struct cam_eb *bus;
2994 struct cam_et *tar;
2995
2996 cgds = &start_ccb->cgds;
2997 bus = cgds->ccb_h.path->bus;
2998 tar = cgds->ccb_h.path->target;
2999 cgds->dev_openings = dev->ccbq.dev_openings;
3000 cgds->dev_active = dev->ccbq.dev_active;
3001 cgds->devq_openings = dev->ccbq.devq_openings;
3002 cgds->devq_queued = dev->ccbq.queue.entries;
3003 cgds->held = dev->ccbq.held;
3004 cgds->last_reset = tar->last_reset;
3005 cgds->maxtags = dev->quirk->maxtags;
3006 cgds->mintags = dev->quirk->mintags;
3007 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3008 cgds->last_reset = bus->last_reset;
3009 cgds->ccb_h.status = CAM_REQ_CMP;
3010 }
3011 splx(s);
3012 break;
3013 }
3014 case XPT_GDEVLIST:
3015 {
3016 struct cam_periph *nperiph;
3017 struct periph_list *periph_head;
3018 struct ccb_getdevlist *cgdl;
3019 int i;
3020 int s;
3021 struct cam_ed *device;
3022 int found;
3023
3024
3025 found = 0;
3026
3027 /*
3028 * Don't want anyone mucking with our data.
3029 */
3030 s = splcam();
3031 device = start_ccb->ccb_h.path->device;
3032 periph_head = &device->periphs;
3033 cgdl = &start_ccb->cgdl;
3034
3035 /*
3036 * Check and see if the list has changed since the user
3037 * last requested a list member. If so, tell them that the
3038 * list has changed, and therefore they need to start over
3039 * from the beginning.
3040 */
3041 if ((cgdl->index != 0) &&
3042 (cgdl->generation != device->generation)) {
3043 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3044 splx(s);
3045 break;
3046 }
3047
3048 /*
3049 * Traverse the list of peripherals and attempt to find
3050 * the requested peripheral.
3051 */
3052 for (nperiph = periph_head->slh_first, i = 0;
3053 (nperiph != NULL) && (i <= cgdl->index);
3054 nperiph = nperiph->periph_links.sle_next, i++) {
3055 if (i == cgdl->index) {
3056 strncpy(cgdl->periph_name,
3057 nperiph->periph_name,
3058 DEV_IDLEN);
3059 cgdl->unit_number = nperiph->unit_number;
3060 found = 1;
3061 }
3062 }
3063 if (found == 0) {
3064 cgdl->status = CAM_GDEVLIST_ERROR;
3065 splx(s);
3066 break;
3067 }
3068
3069 if (nperiph == NULL)
3070 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3071 else
3072 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3073
3074 cgdl->index++;
3075 cgdl->generation = device->generation;
3076
3077 splx(s);
3078 cgdl->ccb_h.status = CAM_REQ_CMP;
3079 break;
3080 }
3081 case XPT_DEV_MATCH:
3082 {
3083 int s;
3084 dev_pos_type position_type;
3085 struct ccb_dev_match *cdm;
3086 int ret;
3087
3088 cdm = &start_ccb->cdm;
3089
3090 /*
3091 * Prevent EDT changes while we traverse it.
3092 */
3093 s = splcam();
3094 /*
3095 * There are two ways of getting at information in the EDT.
3096 * The first way is via the primary EDT tree. It starts
3097 * with a list of busses, then a list of targets on a bus,
3098 * then devices/luns on a target, and then peripherals on a
3099 * device/lun. The "other" way is by the peripheral driver
3100 * lists. The peripheral driver lists are organized by
3101 * peripheral driver. (obviously) So it makes sense to
3102 * use the peripheral driver list if the user is looking
3103 * for something like "da1", or all "da" devices. If the
3104 * user is looking for something on a particular bus/target
3105 * or lun, it's generally better to go through the EDT tree.
3106 */
3107
3108 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3109 position_type = cdm->pos.position_type;
3110 else {
3111 int i;
3112
3113 position_type = CAM_DEV_POS_NONE;
3114
3115 for (i = 0; i < cdm->num_patterns; i++) {
3116 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3117 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3118 position_type = CAM_DEV_POS_EDT;
3119 break;
3120 }
3121 }
3122
3123 if (cdm->num_patterns == 0)
3124 position_type = CAM_DEV_POS_EDT;
3125 else if (position_type == CAM_DEV_POS_NONE)
3126 position_type = CAM_DEV_POS_PDRV;
3127 }
3128
3129 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3130 case CAM_DEV_POS_EDT:
3131 ret = xptedtmatch(cdm);
3132 break;
3133 case CAM_DEV_POS_PDRV:
3134 ret = xptperiphlistmatch(cdm);
3135 break;
3136 default:
3137 cdm->status = CAM_DEV_MATCH_ERROR;
3138 break;
3139 }
3140
3141 splx(s);
3142
3143 if (cdm->status == CAM_DEV_MATCH_ERROR)
3144 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3145 else
3146 start_ccb->ccb_h.status = CAM_REQ_CMP;
3147
3148 break;
3149 }
3150 case XPT_SASYNC_CB:
3151 {
3152 struct ccb_setasync *csa;
3153 struct async_node *cur_entry;
3154 struct async_list *async_head;
3155 u_int32_t added;
3156 int s;
3157
3158 csa = &start_ccb->csa;
3159 added = csa->event_enable;
3160 async_head = &csa->ccb_h.path->device->asyncs;
3161
3162 /*
3163 * If there is already an entry for us, simply
3164 * update it.
3165 */
3166 s = splcam();
3167 cur_entry = SLIST_FIRST(async_head);
3168 while (cur_entry != NULL) {
3169 if ((cur_entry->callback_arg == csa->callback_arg)
3170 && (cur_entry->callback == csa->callback))
3171 break;
3172 cur_entry = SLIST_NEXT(cur_entry, links);
3173 }
3174
3175 if (cur_entry != NULL) {
3176 /*
3177 * If the request has no flags set,
3178 * remove the entry.
3179 */
3180 added &= ~cur_entry->event_enable;
3181 if (csa->event_enable == 0) {
3182 SLIST_REMOVE(async_head, cur_entry,
3183 async_node, links);
3184 csa->ccb_h.path->device->refcount--;
3185 free(cur_entry, M_DEVBUF);
3186 } else {
3187 cur_entry->event_enable = csa->event_enable;
3188 }
3189 } else {
898d961b
MD
3190 cur_entry = malloc(sizeof(*cur_entry),
3191 M_DEVBUF, M_INTWAIT);
984263bc
MD
3192 cur_entry->event_enable = csa->event_enable;
3193 cur_entry->callback_arg = csa->callback_arg;
3194 cur_entry->callback = csa->callback;
3195 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3196 csa->ccb_h.path->device->refcount++;
3197 }
3198
3199 if ((added & AC_FOUND_DEVICE) != 0) {
3200 /*
3201 * Get this peripheral up to date with all
3202 * the currently existing devices.
3203 */
3204 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3205 }
3206 if ((added & AC_PATH_REGISTERED) != 0) {
3207 /*
3208 * Get this peripheral up to date with all
3209 * the currently existing busses.
3210 */
3211 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3212 }
3213 splx(s);
3214 start_ccb->ccb_h.status = CAM_REQ_CMP;
3215 break;
3216 }
3217 case XPT_REL_SIMQ:
3218 {
3219 struct ccb_relsim *crs;
3220 struct cam_ed *dev;
3221 int s;
3222
3223 crs = &start_ccb->crs;
3224 dev = crs->ccb_h.path->device;
3225 if (dev == NULL) {
3226
3227 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3228 break;
3229 }
3230
3231 s = splcam();
3232
3233 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3234
3235 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3236
3237 /* Don't ever go below one opening */
3238 if (crs->openings > 0) {
3239 xpt_dev_ccbq_resize(crs->ccb_h.path,
3240 crs->openings);
3241
3242 if (bootverbose) {
3243 xpt_print_path(crs->ccb_h.path);
3244 printf("tagged openings "
3245 "now %d\n",
3246 crs->openings);
3247 }
3248 }
3249 }
3250 }
3251
3252 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3253
3254 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3255
3256 /*
3257 * Just extend the old timeout and decrement
3258 * the freeze count so that a single timeout
3259 * is sufficient for releasing the queue.
3260 */
3261 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3262 untimeout(xpt_release_devq_timeout,
3263 dev, dev->c_handle);
3264 } else {
3265
3266 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3267 }
3268
3269 dev->c_handle =
3270 timeout(xpt_release_devq_timeout,
3271 dev,
3272 (crs->release_timeout * hz) / 1000);
3273
3274 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3275
3276 }
3277
3278 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3279
3280 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3281 /*
3282 * Decrement the freeze count so that a single
3283 * completion is still sufficient to unfreeze
3284 * the queue.
3285 */
3286 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3287 } else {
3288
3289 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3290 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3291 }
3292 }
3293
3294 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3295
3296 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3297 || (dev->ccbq.dev_active == 0)) {
3298
3299 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3300 } else {
3301
3302 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3303 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3304 }
3305 }
3306 splx(s);
3307
3308 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3309
3310 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3311 /*run_queue*/TRUE);
3312 }
3313 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3314 start_ccb->ccb_h.status = CAM_REQ_CMP;
3315 break;
3316 }
3317 case XPT_SCAN_BUS:
3318 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3319 break;
3320 case XPT_SCAN_LUN:
3321 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3322 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3323 start_ccb);
3324 break;
3325 case XPT_DEBUG: {
3326#ifdef CAMDEBUG
3327 int s;
3328
3329 s = splcam();
3330#ifdef CAM_DEBUG_DELAY
3331 cam_debug_delay = CAM_DEBUG_DELAY;
3332#endif
3333 cam_dflags = start_ccb->cdbg.flags;
3334 if (cam_dpath != NULL) {
3335 xpt_free_path(cam_dpath);
3336 cam_dpath = NULL;
3337 }
3338
3339 if (cam_dflags != CAM_DEBUG_NONE) {
3340 if (xpt_create_path(&cam_dpath, xpt_periph,
3341 start_ccb->ccb_h.path_id,
3342 start_ccb->ccb_h.target_id,
3343 start_ccb->ccb_h.target_lun) !=
3344 CAM_REQ_CMP) {
3345 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3346 cam_dflags = CAM_DEBUG_NONE;
3347 } else {
3348 start_ccb->ccb_h.status = CAM_REQ_CMP;
3349 xpt_print_path(cam_dpath);
3350 printf("debugging flags now %x\n", cam_dflags);
3351 }
3352 } else {
3353 cam_dpath = NULL;
3354 start_ccb->ccb_h.status = CAM_REQ_CMP;
3355 }
3356 splx(s);
3357#else /* !CAMDEBUG */
3358 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3359#endif /* CAMDEBUG */
3360 break;
3361 }
3362 case XPT_NOOP:
3363 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3364 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3365 start_ccb->ccb_h.status = CAM_REQ_CMP;
3366 break;
3367 default:
3368 case XPT_SDEV_TYPE:
3369 case XPT_TERM_IO:
3370 case XPT_ENG_INQ:
3371 /* XXX Implement */
3372 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3373 break;
3374 }
3375 splx(iopl);
3376}
3377
3378void
3379xpt_polled_action(union ccb *start_ccb)
3380{
3381 int s;
3382 u_int32_t timeout;
3383 struct cam_sim *sim;
3384 struct cam_devq *devq;
3385 struct cam_ed *dev;
3386
3387 timeout = start_ccb->ccb_h.timeout;
3388 sim = start_ccb->ccb_h.path->bus->sim;
3389 devq = sim->devq;
3390 dev = start_ccb->ccb_h.path->device;
3391
3392 s = splcam();
3393
3394 /*
3395 * Steal an opening so that no other queued requests
3396 * can get it before us while we simulate interrupts.
3397 */
3398 dev->ccbq.devq_openings--;
3399 dev->ccbq.dev_openings--;
3400
3401 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3402 && (--timeout > 0)) {
3403 DELAY(1000);
3404 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3405 swi_camnet(NULL);
3406 swi_cambio(NULL);
984263bc
MD
3407 }
3408
3409 dev->ccbq.devq_openings++;
3410 dev->ccbq.dev_openings++;
3411
3412 if (timeout != 0) {
3413 xpt_action(start_ccb);
3414 while(--timeout > 0) {
3415 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3416 swi_camnet(NULL);
3417 swi_cambio(NULL);
984263bc
MD
3418 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3419 != CAM_REQ_INPROG)
3420 break;
3421 DELAY(1000);
3422 }
3423 if (timeout == 0) {
3424 /*
3425 * XXX Is it worth adding a sim_timeout entry
3426 * point so we can attempt recovery? If
3427 * this is only used for dumps, I don't think
3428 * it is.
3429 */
3430 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3431 }
3432 } else {
3433 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3434 }
3435 splx(s);
3436}
3437
3438/*
3439 * Schedule a peripheral driver to receive a ccb when it's
3440 * target device has space for more transactions.
3441 */
3442void
3443xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3444{
3445 struct cam_ed *device;
3446 int s;
3447 int runq;
3448
3449 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3450 device = perph->path->device;
3451 s = splsoftcam();
3452 if (periph_is_queued(perph)) {
3453 /* Simply reorder based on new priority */
3454 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3455 (" change priority to %d\n", new_priority));
3456 if (new_priority < perph->pinfo.priority) {
3457 camq_change_priority(&device->drvq,
3458 perph->pinfo.index,
3459 new_priority);
3460 }
3461 runq = 0;
3462 } else {
3463 /* New entry on the queue */
3464 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3465 (" added periph to queue\n"));
3466 perph->pinfo.priority = new_priority;
3467 perph->pinfo.generation = ++device->drvq.generation;
3468 camq_insert(&device->drvq, &perph->pinfo);
3469 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3470 }
3471 splx(s);
3472 if (runq != 0) {
3473 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3474 (" calling xpt_run_devq\n"));
3475 xpt_run_dev_allocq(perph->path->bus);
3476 }
3477}
3478
3479
3480/*
3481 * Schedule a device to run on a given queue.
3482 * If the device was inserted as a new entry on the queue,
3483 * return 1 meaning the device queue should be run. If we
3484 * were already queued, implying someone else has already
3485 * started the queue, return 0 so the caller doesn't attempt
3486 * to run the queue. Must be run at either splsoftcam
3487 * (or splcam since that encompases splsoftcam).
3488 */
3489static int
3490xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3491 u_int32_t new_priority)
3492{
3493 int retval;
3494 u_int32_t old_priority;
3495
3496 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3497
3498 old_priority = pinfo->priority;
3499
3500 /*
3501 * Are we already queued?
3502 */
3503 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3504 /* Simply reorder based on new priority */
3505 if (new_priority < old_priority) {
3506 camq_change_priority(queue, pinfo->index,
3507 new_priority);
3508 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3509 ("changed priority to %d\n",
3510 new_priority));
3511 }
3512 retval = 0;
3513 } else {
3514 /* New entry on the queue */
3515 if (new_priority < old_priority)
3516 pinfo->priority = new_priority;
3517
3518 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3519 ("Inserting onto queue\n"));
3520 pinfo->generation = ++queue->generation;
3521 camq_insert(queue, pinfo);
3522 retval = 1;
3523 }
3524 return (retval);
3525}
3526
3527static void
3528xpt_run_dev_allocq(struct cam_eb *bus)
3529{
3530 struct cam_devq *devq;
3531 int s;
3532
3533 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3534 devq = bus->sim->devq;
3535
3536 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3537 (" qfrozen_cnt == 0x%x, entries == %d, "
3538 "openings == %d, active == %d\n",
3539 devq->alloc_queue.qfrozen_cnt,
3540 devq->alloc_queue.entries,
3541 devq->alloc_openings,
3542 devq->alloc_active));
3543
3544 s = splsoftcam();
3545 devq->alloc_queue.qfrozen_cnt++;
3546 while ((devq->alloc_queue.entries > 0)
3547 && (devq->alloc_openings > 0)
3548 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3549 struct cam_ed_qinfo *qinfo;
3550 struct cam_ed *device;
3551 union ccb *work_ccb;
3552 struct cam_periph *drv;
3553 struct camq *drvq;
3554
3555 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3556 CAMQ_HEAD);
3557 device = qinfo->device;
3558
3559 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3560 ("running device %p\n", device));
3561
3562 drvq = &device->drvq;
3563
3564#ifdef CAMDEBUG
3565 if (drvq->entries <= 0) {
3566 panic("xpt_run_dev_allocq: "
3567 "Device on queue without any work to do");
3568 }
3569#endif
3570 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3571 devq->alloc_openings--;
3572 devq->alloc_active++;
3573 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3574 splx(s);
3575 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3576 drv->pinfo.priority);
3577 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3578 ("calling periph start\n"));
3579 drv->periph_start(drv, work_ccb);
3580 } else {
3581 /*
3582 * Malloc failure in alloc_ccb
3583 */
3584 /*
3585 * XXX add us to a list to be run from free_ccb
3586 * if we don't have any ccbs active on this
3587 * device queue otherwise we may never get run
3588 * again.
3589 */
3590 break;
3591 }
3592
3593 /* Raise IPL for possible insertion and test at top of loop */
3594 s = splsoftcam();
3595
3596 if (drvq->entries > 0) {
3597 /* We have more work. Attempt to reschedule */
3598 xpt_schedule_dev_allocq(bus, device);
3599 }
3600 }
3601 devq->alloc_queue.qfrozen_cnt--;
3602 splx(s);
3603}
3604
3605static void
3606xpt_run_dev_sendq(struct cam_eb *bus)
3607{
3608 struct cam_devq *devq;
3609 int s;
3610
3611 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3612
3613 devq = bus->sim->devq;
3614
3615 s = splcam();
3616 devq->send_queue.qfrozen_cnt++;
3617 splx(s);
3618 s = splsoftcam();
3619 while ((devq->send_queue.entries > 0)
3620 && (devq->send_openings > 0)) {
3621 struct cam_ed_qinfo *qinfo;
3622 struct cam_ed *device;
3623 union ccb *work_ccb;
3624 struct cam_sim *sim;
3625 int ospl;
3626
3627 ospl = splcam();
3628 if (devq->send_queue.qfrozen_cnt > 1) {
3629 splx(ospl);
3630 break;
3631 }
3632
3633 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3634 CAMQ_HEAD);
3635 device = qinfo->device;
3636
3637 /*
3638 * If the device has been "frozen", don't attempt
3639 * to run it.
3640 */
3641 if (device->qfrozen_cnt > 0) {
3642 splx(ospl);
3643 continue;
3644 }
3645
3646 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3647 ("running device %p\n", device));
3648
3649 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3650 if (work_ccb == NULL) {
3651 printf("device on run queue with no ccbs???\n");
3652 splx(ospl);
3653 continue;
3654 }
3655
3656 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3657
3658 if (num_highpower <= 0) {
3659 /*
3660 * We got a high power command, but we
3661 * don't have any available slots. Freeze
3662 * the device queue until we have a slot
3663 * available.
3664 */
3665 device->qfrozen_cnt++;
3666 STAILQ_INSERT_TAIL(&highpowerq,
3667 &work_ccb->ccb_h,
3668 xpt_links.stqe);
3669
3670 splx(ospl);
3671 continue;
3672 } else {
3673 /*
3674 * Consume a high power slot while
3675 * this ccb runs.
3676 */
3677 num_highpower--;
3678 }
3679 }
3680 devq->active_dev = device;
3681 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3682
3683 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3684 splx(ospl);
3685
3686 devq->send_openings--;
3687 devq->send_active++;
3688
3689 if (device->ccbq.queue.entries > 0)
3690 xpt_schedule_dev_sendq(bus, device);
3691
3692 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3693 /*
3694 * The client wants to freeze the queue
3695 * after this CCB is sent.
3696 */
3697 ospl = splcam();
3698 device->qfrozen_cnt++;
3699 splx(ospl);
3700 }
3701
3702 splx(s);
3703
3704 /* In Target mode, the peripheral driver knows best... */
3705 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3706 if ((device->inq_flags & SID_CmdQue) != 0
3707 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3708 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3709 else
3710 /*
3711 * Clear this in case of a retried CCB that
3712 * failed due to a rejected tag.
3713 */
3714 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3715 }
3716
3717 /*
3718 * Device queues can be shared among multiple sim instances
3719 * that reside on different busses. Use the SIM in the queue
3720 * CCB's path, rather than the one in the bus that was passed
3721 * into this function.
3722 */
3723 sim = work_ccb->ccb_h.path->bus->sim;
3724 (*(sim->sim_action))(sim, work_ccb);
3725
3726 ospl = splcam();
3727 devq->active_dev = NULL;
3728 splx(ospl);
3729 /* Raise IPL for possible insertion and test at top of loop */
3730 s = splsoftcam();
3731 }
3732 splx(s);
3733 s = splcam();
3734 devq->send_queue.qfrozen_cnt--;
3735 splx(s);
3736}
3737
3738/*
3739 * This function merges stuff from the slave ccb into the master ccb, while
3740 * keeping important fields in the master ccb constant.
3741 */
3742void
3743xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3744{
3745 /*
3746 * Pull fields that are valid for peripheral drivers to set
3747 * into the master CCB along with the CCB "payload".
3748 */
3749 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3750 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3751 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3752 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3753 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3754 sizeof(union ccb) - sizeof(struct ccb_hdr));
3755}
3756
3757void
3758xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3759{
3760 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3761 ccb_h->pinfo.priority = priority;
3762 ccb_h->path = path;
3763 ccb_h->path_id = path->bus->path_id;
3764 if (path->target)
3765 ccb_h->target_id = path->target->target_id;
3766 else
3767 ccb_h->target_id = CAM_TARGET_WILDCARD;
3768 if (path->device) {
3769 ccb_h->target_lun = path->device->lun_id;
3770 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3771 } else {
3772 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3773 }
3774 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3775 ccb_h->flags = 0;
3776}
3777
3778/* Path manipulation functions */
3779cam_status
3780xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3781 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3782{
3783 struct cam_path *path;
3784 cam_status status;
3785
898d961b 3786 path = malloc(sizeof(*path), M_DEVBUF, M_INTWAIT);
984263bc
MD
3787 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3788 if (status != CAM_REQ_CMP) {
3789 free(path, M_DEVBUF);
3790 path = NULL;
3791 }
3792 *new_path_ptr = path;
3793 return (status);
3794}
3795
3796static cam_status
3797xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3798 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3799{
3800 struct cam_eb *bus;
3801 struct cam_et *target;
3802 struct cam_ed *device;
3803 cam_status status;
3804 int s;
3805
3806 status = CAM_REQ_CMP; /* Completed without error */
3807 target = NULL; /* Wildcarded */
3808 device = NULL; /* Wildcarded */
3809
3810 /*
3811 * We will potentially modify the EDT, so block interrupts
3812 * that may attempt to create cam paths.
3813 */
3814 s = splcam();
3815 bus = xpt_find_bus(path_id);
3816 if (bus == NULL) {
3817 status = CAM_PATH_INVALID;
3818 } else {
3819 target = xpt_find_target(bus, target_id);
3820 if (target == NULL) {
3821 /* Create one */
3822 struct cam_et *new_target;
3823
3824 new_target = xpt_alloc_target(bus, target_id);
3825 if (new_target == NULL) {
3826 status = CAM_RESRC_UNAVAIL;
3827 } else {
3828 target = new_target;
3829 }
3830 }
3831 if (target != NULL) {
3832 device = xpt_find_device(target, lun_id);
3833 if (device == NULL) {
3834 /* Create one */
3835 struct cam_ed *new_device;
3836
3837 new_device = xpt_alloc_device(bus,
3838 target,
3839 lun_id);
3840 if (new_device == NULL) {
3841 status = CAM_RESRC_UNAVAIL;
3842 } else {
3843 device = new_device;
3844 }
3845 }
3846 }
3847 }
3848 splx(s);
3849
3850 /*
3851 * Only touch the user's data if we are successful.
3852 */
3853 if (status == CAM_REQ_CMP) {
3854 new_path->periph = perph;
3855 new_path->bus = bus;
3856 new_path->target = target;
3857 new_path->device = device;
3858 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3859 } else {
3860 if (device != NULL)
3861 xpt_release_device(bus, target, device);
3862 if (target != NULL)
3863 xpt_release_target(bus, target);
3864 if (bus != NULL)
3865 xpt_release_bus(bus);
3866 }
3867 return (status);
3868}
3869
3870static void
3871xpt_release_path(struct cam_path *path)
3872{
3873 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3874 if (path->device != NULL) {
3875 xpt_release_device(path->bus, path->target, path->device);
3876 path->device = NULL;
3877 }
3878 if (path->target != NULL) {
3879 xpt_release_target(path->bus, path->target);
3880 path->target = NULL;
3881 }
3882 if (path->bus != NULL) {
3883 xpt_release_bus(path->bus);
3884 path->bus = NULL;
3885 }
3886}
3887
3888void
3889xpt_free_path(struct cam_path *path)
3890{
3891 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3892 xpt_release_path(path);
3893 free(path, M_DEVBUF);
3894}
3895
3896
3897/*
3898 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3899 * in path1, 2 for match with wildcards in path2.
3900 */
3901int
3902xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3903{
3904 int retval = 0;
3905
3906 if (path1->bus != path2->bus) {
3907 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3908 retval = 1;
3909 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3910 retval = 2;
3911 else
3912 return (-1);
3913 }
3914 if (path1->target != path2->target) {
3915 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3916 if (retval == 0)
3917 retval = 1;
3918 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3919 retval = 2;
3920 else
3921 return (-1);
3922 }
3923 if (path1->device != path2->device) {
3924 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3925 if (retval == 0)
3926 retval = 1;
3927 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3928 retval = 2;
3929 else
3930 return (-1);
3931 }
3932 return (retval);
3933}
3934
3935void
3936xpt_print_path(struct cam_path *path)
3937{
3938 if (path == NULL)
3939 printf("(nopath): ");
3940 else {
3941 if (path->periph != NULL)
3942 printf("(%s%d:", path->periph->periph_name,
3943 path->periph->unit_number);
3944 else
3945 printf("(noperiph:");
3946
3947 if (path->bus != NULL)
3948 printf("%s%d:%d:", path->bus->sim->sim_name,
3949 path->bus->sim->unit_number,
3950 path->bus->sim->bus_id);
3951 else
3952 printf("nobus:");
3953
3954 if (path->target != NULL)
3955 printf("%d:", path->target->target_id);
3956 else
3957 printf("X:");
3958
3959 if (path->device != NULL)
3960 printf("%d): ", path->device->lun_id);
3961 else
3962 printf("X): ");
3963 }
3964}
3965
3966path_id_t
3967xpt_path_path_id(struct cam_path *path)
3968{
3969 return(path->bus->path_id);
3970}
3971
3972target_id_t
3973xpt_path_target_id(struct cam_path *path)
3974{
3975 if (path->target != NULL)
3976 return (path->target->target_id);
3977 else
3978 return (CAM_TARGET_WILDCARD);
3979}
3980
3981lun_id_t
3982xpt_path_lun_id(struct cam_path *path)
3983{
3984 if (path->device != NULL)
3985 return (path->device->lun_id);
3986 else
3987 return (CAM_LUN_WILDCARD);
3988}
3989
3990struct cam_sim *
3991xpt_path_sim(struct cam_path *path)
3992{
3993 return (path->bus->sim);
3994}
3995
3996struct cam_periph*
3997xpt_path_periph(struct cam_path *path)
3998{
3999 return (path->periph);
4000}
4001
4002/*
4003 * Release a CAM control block for the caller. Remit the cost of the structure
4004 * to the device referenced by the path. If the this device had no 'credits'
4005 * and peripheral drivers have registered async callbacks for this notification
4006 * call them now.
4007 */
4008void
4009xpt_release_ccb(union ccb *free_ccb)
4010{
4011 int s;
4012 struct cam_path *path;
4013 struct cam_ed *device;
4014 struct cam_eb *bus;
4015
4016 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4017 path = free_ccb->ccb_h.path;
4018 device = path->device;
4019 bus = path->bus;
4020 s = splsoftcam();
4021 cam_ccbq_release_opening(&device->ccbq);
4022 if (xpt_ccb_count > xpt_max_ccbs) {
4023 xpt_free_ccb(free_ccb);
4024 xpt_ccb_count--;
4025 } else {
4026 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4027 }
4028 bus->sim->devq->alloc_openings++;
4029 bus->sim->devq->alloc_active--;
4030 /* XXX Turn this into an inline function - xpt_run_device?? */
4031 if ((device_is_alloc_queued(device) == 0)
4032 && (device->drvq.entries > 0)) {
4033 xpt_schedule_dev_allocq(bus, device);
4034 }
4035 splx(s);
4036 if (dev_allocq_is_runnable(bus->sim->devq))
4037 xpt_run_dev_allocq(bus);
4038}
4039
4040/* Functions accessed by SIM drivers */
4041
4042/*
4043 * A sim structure, listing the SIM entry points and instance
4044 * identification info is passed to xpt_bus_register to hook the SIM
4045 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4046 * for this new bus and places it in the array of busses and assigns
4047 * it a path_id. The path_id may be influenced by "hard wiring"
4048 * information specified by the user. Once interrupt services are
4049 * availible, the bus will be probed.
4050 */
4051int32_t
4052xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4053{
4054 struct cam_eb *new_bus;
4055 struct cam_eb *old_bus;
4056 struct ccb_pathinq cpi;
4057 int s;
4058
4059 sim->bus_id = bus;
898d961b 4060 new_bus = malloc(sizeof(*new_bus), M_DEVBUF, M_INTWAIT);
984263bc
MD
4061
4062 if (strcmp(sim->sim_name, "xpt") != 0) {
984263bc
MD
4063 sim->path_id =
4064 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4065 }
4066
4067 TAILQ_INIT(&new_bus->et_entries);
4068 new_bus->path_id = sim->path_id;
4069 new_bus->sim = sim;
3aed1355 4070 ++sim->refcount;
984263bc
MD
4071 timevalclear(&new_bus->last_reset);
4072 new_bus->flags = 0;
4073 new_bus->refcount = 1; /* Held until a bus_deregister event */
4074 new_bus->generation = 0;
4075 s = splcam();
4076 old_bus = TAILQ_FIRST(&xpt_busses);
4077 while (old_bus != NULL
4078 && old_bus->path_id < new_bus->path_id)