Bring in some CAM bug fixes from FreeBSD.
[dragonfly.git] / sys / bus / cam / cam_xpt.c
CommitLineData
984263bc
MD
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
beac9491 30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.23 2005/03/15 20:42:12 dillon Exp $
984263bc
MD
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/devicestat.h>
42#include <sys/interrupt.h>
43#include <sys/bus.h>
3aed1355
MD
44#include <sys/thread.h>
45#include <sys/thread2.h>
984263bc 46
984263bc
MD
47#include <machine/clock.h>
48#include <machine/ipl.h>
49
1f2de5d4
MD
50#include "cam.h"
51#include "cam_ccb.h"
52#include "cam_periph.h"
53#include "cam_sim.h"
54#include "cam_xpt.h"
55#include "cam_xpt_sim.h"
56#include "cam_xpt_periph.h"
57#include "cam_debug.h"
984263bc 58
1f2de5d4
MD
59#include "scsi/scsi_all.h"
60#include "scsi/scsi_message.h"
61#include "scsi/scsi_pass.h"
984263bc
MD
62#include "opt_cam.h"
63
64/* Datastructures internal to the xpt layer */
65
66/*
67 * Definition of an async handler callback block. These are used to add
68 * SIMs and peripherals to the async callback lists.
69 */
70struct async_node {
71 SLIST_ENTRY(async_node) links;
72 u_int32_t event_enable; /* Async Event enables */
73 void (*callback)(void *arg, u_int32_t code,
74 struct cam_path *path, void *args);
75 void *callback_arg;
76};
77
78SLIST_HEAD(async_list, async_node);
79SLIST_HEAD(periph_list, cam_periph);
80static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
81
82/*
83 * This is the maximum number of high powered commands (e.g. start unit)
84 * that can be outstanding at a particular time.
85 */
86#ifndef CAM_MAX_HIGHPOWER
87#define CAM_MAX_HIGHPOWER 4
88#endif
89
90/* number of high powered commands that can go through right now */
91static int num_highpower = CAM_MAX_HIGHPOWER;
92
93/*
94 * Structure for queueing a device in a run queue.
95 * There is one run queue for allocating new ccbs,
96 * and another for sending ccbs to the controller.
97 */
98struct cam_ed_qinfo {
99 cam_pinfo pinfo;
100 struct cam_ed *device;
101};
102
103/*
104 * The CAM EDT (Existing Device Table) contains the device information for
105 * all devices for all busses in the system. The table contains a
106 * cam_ed structure for each device on the bus.
107 */
108struct cam_ed {
109 TAILQ_ENTRY(cam_ed) links;
110 struct cam_ed_qinfo alloc_ccb_entry;
111 struct cam_ed_qinfo send_ccb_entry;
112 struct cam_et *target;
113 lun_id_t lun_id;
114 struct camq drvq; /*
115 * Queue of type drivers wanting to do
116 * work on this device.
117 */
118 struct cam_ccbq ccbq; /* Queue of pending ccbs */
119 struct async_list asyncs; /* Async callback info for this B/T/L */
120 struct periph_list periphs; /* All attached devices */
121 u_int generation; /* Generation number */
122 struct cam_periph *owner; /* Peripheral driver's ownership tag */
123 struct xpt_quirk_entry *quirk; /* Oddities about this device */
124 /* Storage for the inquiry data */
125 struct scsi_inquiry_data inq_data;
126 u_int8_t inq_flags; /*
127 * Current settings for inquiry flags.
128 * This allows us to override settings
129 * like disconnection and tagged
130 * queuing for a device.
131 */
132 u_int8_t queue_flags; /* Queue flags from the control page */
133 u_int8_t serial_num_len;
134 u_int8_t *serial_num;
135 u_int32_t qfrozen_cnt;
136 u_int32_t flags;
137#define CAM_DEV_UNCONFIGURED 0x01
138#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
139#define CAM_DEV_REL_ON_COMPLETE 0x04
140#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
141#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
142#define CAM_DEV_TAG_AFTER_COUNT 0x20
143#define CAM_DEV_INQUIRY_DATA_VALID 0x40
144 u_int32_t tag_delay_count;
145#define CAM_TAG_DELAY_COUNT 5
146 u_int32_t refcount;
eaa58895 147 struct callout c_handle;
984263bc
MD
148};
149
150/*
151 * Each target is represented by an ET (Existing Target). These
152 * entries are created when a target is successfully probed with an
153 * identify, and removed when a device fails to respond after a number
154 * of retries, or a bus rescan finds the device missing.
155 */
156struct cam_et {
157 TAILQ_HEAD(, cam_ed) ed_entries;
158 TAILQ_ENTRY(cam_et) links;
159 struct cam_eb *bus;
160 target_id_t target_id;
161 u_int32_t refcount;
162 u_int generation;
88c4d2f6 163 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
164};
165
166/*
167 * Each bus is represented by an EB (Existing Bus). These entries
168 * are created by calls to xpt_bus_register and deleted by calls to
169 * xpt_bus_deregister.
170 */
171struct cam_eb {
172 TAILQ_HEAD(, cam_et) et_entries;
173 TAILQ_ENTRY(cam_eb) links;
174 path_id_t path_id;
175 struct cam_sim *sim;
88c4d2f6 176 struct timeval last_reset; /* uptime of last reset */
984263bc
MD
177 u_int32_t flags;
178#define CAM_EB_RUNQ_SCHEDULED 0x01
179 u_int32_t refcount;
180 u_int generation;
181};
182
183struct cam_path {
184 struct cam_periph *periph;
185 struct cam_eb *bus;
186 struct cam_et *target;
187 struct cam_ed *device;
188};
189
190struct xpt_quirk_entry {
191 struct scsi_inquiry_pattern inq_pat;
192 u_int8_t quirks;
193#define CAM_QUIRK_NOLUNS 0x01
194#define CAM_QUIRK_NOSERIAL 0x02
195#define CAM_QUIRK_HILUNS 0x04
196 u_int mintags;
197 u_int maxtags;
198};
199#define CAM_SCSI2_MAXLUN 8
200
201typedef enum {
202 XPT_FLAG_OPEN = 0x01
203} xpt_flags;
204
205struct xpt_softc {
206 xpt_flags flags;
207 u_int32_t generation;
208};
209
210static const char quantum[] = "QUANTUM";
211static const char sony[] = "SONY";
212static const char west_digital[] = "WDIGTL";
213static const char samsung[] = "SAMSUNG";
214static const char seagate[] = "SEAGATE";
215static const char microp[] = "MICROP";
216
217static struct xpt_quirk_entry xpt_quirk_table[] =
218{
219 {
220 /* Reports QUEUE FULL for temporary resource shortages */
221 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
222 /*quirks*/0, /*mintags*/24, /*maxtags*/32
223 },
224 {
225 /* Reports QUEUE FULL for temporary resource shortages */
226 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
227 /*quirks*/0, /*mintags*/24, /*maxtags*/32
228 },
229 {
230 /* Reports QUEUE FULL for temporary resource shortages */
231 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
232 /*quirks*/0, /*mintags*/24, /*maxtags*/32
233 },
234 {
235 /* Broken tagged queuing drive */
236 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
237 /*quirks*/0, /*mintags*/0, /*maxtags*/0
238 },
239 {
240 /* Broken tagged queuing drive */
241 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
242 /*quirks*/0, /*mintags*/0, /*maxtags*/0
243 },
244 {
245 /* Broken tagged queuing drive */
246 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
247 /*quirks*/0, /*mintags*/0, /*maxtags*/0
248 },
249 {
250 /*
251 * Unfortunately, the Quantum Atlas III has the same
252 * problem as the Atlas II drives above.
253 * Reported by: "Johan Granlund" <johan@granlund.nu>
254 *
255 * For future reference, the drive with the problem was:
256 * QUANTUM QM39100TD-SW N1B0
257 *
258 * It's possible that Quantum will fix the problem in later
259 * firmware revisions. If that happens, the quirk entry
260 * will need to be made specific to the firmware revisions
261 * with the problem.
262 *
263 */
264 /* Reports QUEUE FULL for temporary resource shortages */
265 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
266 /*quirks*/0, /*mintags*/24, /*maxtags*/32
267 },
268 {
269 /*
270 * 18 Gig Atlas III, same problem as the 9G version.
271 * Reported by: Andre Albsmeier
272 * <andre.albsmeier@mchp.siemens.de>
273 *
274 * For future reference, the drive with the problem was:
275 * QUANTUM QM318000TD-S N491
276 */
277 /* Reports QUEUE FULL for temporary resource shortages */
278 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
279 /*quirks*/0, /*mintags*/24, /*maxtags*/32
280 },
281 {
282 /*
283 * Broken tagged queuing drive
284 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
285 * and: Martin Renters <martin@tdc.on.ca>
286 */
287 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
288 /*quirks*/0, /*mintags*/0, /*maxtags*/0
289 },
290 /*
291 * The Seagate Medalist Pro drives have very poor write
292 * performance with anything more than 2 tags.
293 *
294 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
295 * Drive: <SEAGATE ST36530N 1444>
296 *
297 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
298 * Drive: <SEAGATE ST34520W 1281>
299 *
300 * No one has actually reported that the 9G version
301 * (ST39140*) of the Medalist Pro has the same problem, but
302 * we're assuming that it does because the 4G and 6.5G
303 * versions of the drive are broken.
304 */
305 {
306 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
307 /*quirks*/0, /*mintags*/2, /*maxtags*/2
308 },
309 {
310 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
311 /*quirks*/0, /*mintags*/2, /*maxtags*/2
312 },
313 {
314 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
315 /*quirks*/0, /*mintags*/2, /*maxtags*/2
316 },
317 {
318 /*
319 * Slow when tagged queueing is enabled. Write performance
320 * steadily drops off with more and more concurrent
321 * transactions. Best sequential write performance with
322 * tagged queueing turned off and write caching turned on.
323 *
324 * PR: kern/10398
325 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
326 * Drive: DCAS-34330 w/ "S65A" firmware.
327 *
328 * The drive with the problem had the "S65A" firmware
329 * revision, and has also been reported (by Stephen J.
330 * Roznowski <sjr@home.net>) for a drive with the "S61A"
331 * firmware revision.
332 *
333 * Although no one has reported problems with the 2 gig
334 * version of the DCAS drive, the assumption is that it
335 * has the same problems as the 4 gig version. Therefore
336 * this quirk entries disables tagged queueing for all
337 * DCAS drives.
338 */
339 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
340 /*quirks*/0, /*mintags*/0, /*maxtags*/0
341 },
342 {
343 /* Broken tagged queuing drive */
344 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
345 /*quirks*/0, /*mintags*/0, /*maxtags*/0
346 },
347 {
348 /* Broken tagged queuing drive */
349 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
350 /*quirks*/0, /*mintags*/0, /*maxtags*/0
351 },
352 {
353 /*
354 * Broken tagged queuing drive.
355 * Submitted by:
356 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
357 * in PR kern/9535
358 */
359 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
360 /*quirks*/0, /*mintags*/0, /*maxtags*/0
361 },
362 {
363 /*
364 * Slow when tagged queueing is enabled. (1.5MB/sec versus
365 * 8MB/sec.)
366 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
367 * Best performance with these drives is achieved with
368 * tagged queueing turned off, and write caching turned on.
369 */
370 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
371 /*quirks*/0, /*mintags*/0, /*maxtags*/0
372 },
373 {
374 /*
375 * Slow when tagged queueing is enabled. (1.5MB/sec versus
376 * 8MB/sec.)
377 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
378 * Best performance with these drives is achieved with
379 * tagged queueing turned off, and write caching turned on.
380 */
381 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
382 /*quirks*/0, /*mintags*/0, /*maxtags*/0
383 },
384 {
385 /*
386 * Doesn't handle queue full condition correctly,
387 * so we need to limit maxtags to what the device
388 * can handle instead of determining this automatically.
389 */
390 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
391 /*quirks*/0, /*mintags*/2, /*maxtags*/32
392 },
393 {
394 /* Really only one LUN */
395 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
396 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
397 },
398 {
399 /* I can't believe we need a quirk for DPT volumes. */
400 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
401 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
402 /*mintags*/0, /*maxtags*/255
403 },
404 {
405 /*
406 * Many Sony CDROM drives don't like multi-LUN probing.
407 */
408 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
409 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
410 },
411 {
412 /*
413 * This drive doesn't like multiple LUN probing.
414 * Submitted by: Parag Patel <parag@cgt.com>
415 */
416 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
417 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
418 },
419 {
420 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
421 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
422 },
423 {
424 /*
425 * The 8200 doesn't like multi-lun probing, and probably
426 * don't like serial number requests either.
427 */
428 {
429 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
430 "EXB-8200*", "*"
431 },
432 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
433 },
434 {
435 /*
436 * Let's try the same as above, but for a drive that says
437 * it's an IPL-6860 but is actually an EXB 8200.
438 */
439 {
440 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
441 "IPL-6860*", "*"
442 },
443 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
444 },
445 {
446 /*
447 * These Hitachi drives don't like multi-lun probing.
448 * The PR submitter has a DK319H, but says that the Linux
449 * kernel has a similar work-around for the DK312 and DK314,
450 * so all DK31* drives are quirked here.
451 * PR: misc/18793
452 * Submitted by: Paul Haddad <paul@pth.com>
453 */
454 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
455 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
456 },
457 {
458 /*
459 * This old revision of the TDC3600 is also SCSI-1, and
460 * hangs upon serial number probing.
461 */
462 {
463 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
464 " TDC 3600", "U07:"
465 },
466 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
467 },
468 {
469 /*
470 * Would repond to all LUNs if asked for.
471 */
472 {
473 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
474 "CP150", "*"
475 },
476 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
477 },
478 {
479 /*
480 * Would repond to all LUNs if asked for.
481 */
482 {
483 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
484 "96X2*", "*"
485 },
486 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
487 },
488 {
489 /* Submitted by: Matthew Dodd <winter@jurai.net> */
490 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
491 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
492 },
493 {
494 /* Submitted by: Matthew Dodd <winter@jurai.net> */
495 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
496 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
497 },
498 {
499 /* TeraSolutions special settings for TRC-22 RAID */
500 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
501 /*quirks*/0, /*mintags*/55, /*maxtags*/255
502 },
503 {
504 /* Veritas Storage Appliance */
505 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
506 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
507 },
508 {
509 /*
510 * Would respond to all LUNs. Device type and removable
511 * flag are jumper-selectable.
512 */
513 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
514 "Tahiti 1", "*"
515 },
516 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
517 },
518 {
519 /* Default tagged queuing parameters for all devices */
520 {
521 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
522 /*vendor*/"*", /*product*/"*", /*revision*/"*"
523 },
524 /*quirks*/0, /*mintags*/2, /*maxtags*/255
525 },
526};
527
528static const int xpt_quirk_table_size =
529 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
530
531typedef enum {
532 DM_RET_COPY = 0x01,
533 DM_RET_FLAG_MASK = 0x0f,
534 DM_RET_NONE = 0x00,
535 DM_RET_STOP = 0x10,
536 DM_RET_DESCEND = 0x20,
537 DM_RET_ERROR = 0x30,
538 DM_RET_ACTION_MASK = 0xf0
539} dev_match_ret;
540
541typedef enum {
542 XPT_DEPTH_BUS,
543 XPT_DEPTH_TARGET,
544 XPT_DEPTH_DEVICE,
545 XPT_DEPTH_PERIPH
546} xpt_traverse_depth;
547
548struct xpt_traverse_config {
549 xpt_traverse_depth depth;
550 void *tr_func;
551 void *tr_arg;
552};
553
554typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
555typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
556typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
557typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
558typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
559
560/* Transport layer configuration information */
561static struct xpt_softc xsoftc;
562
563/* Queues for our software interrupt handler */
564typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
565static cam_isrq_t cam_bioq;
566static cam_isrq_t cam_netq;
567
568/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
569static SLIST_HEAD(,ccb_hdr) ccb_freeq;
570static u_int xpt_max_ccbs; /*
571 * Maximum size of ccb pool. Modified as
572 * devices are added/removed or have their
573 * opening counts changed.
574 */
575static u_int xpt_ccb_count; /* Current count of allocated ccbs */
576
577struct cam_periph *xpt_periph;
578
579static periph_init_t xpt_periph_init;
580
581static periph_init_t probe_periph_init;
582
583static struct periph_driver xpt_driver =
584{
585 xpt_periph_init, "xpt",
586 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
587};
588
589static struct periph_driver probe_driver =
590{
591 probe_periph_init, "probe",
592 TAILQ_HEAD_INITIALIZER(probe_driver.units)
593};
594
595DATA_SET(periphdriver_set, xpt_driver);
596DATA_SET(periphdriver_set, probe_driver);
597
598#define XPT_CDEV_MAJOR 104
599
600static d_open_t xptopen;
601static d_close_t xptclose;
602static d_ioctl_t xptioctl;
603
604static struct cdevsw xpt_cdevsw = {
fabb8ceb
MD
605 /* name */ "xpt",
606 /* maj */ XPT_CDEV_MAJOR,
607 /* flags */ 0,
608 /* port */ NULL,
455fcd7e 609 /* clone */ NULL,
fabb8ceb 610
984263bc
MD
611 /* open */ xptopen,
612 /* close */ xptclose,
613 /* read */ noread,
614 /* write */ nowrite,
615 /* ioctl */ xptioctl,
616 /* poll */ nopoll,
617 /* mmap */ nommap,
618 /* strategy */ nostrategy,
984263bc 619 /* dump */ nodump,
fabb8ceb 620 /* psize */ nopsize
984263bc
MD
621};
622
623static struct intr_config_hook *xpt_config_hook;
624
625/* Registered busses */
626static TAILQ_HEAD(,cam_eb) xpt_busses;
627static u_int bus_generation;
628
629/* Storage for debugging datastructures */
630#ifdef CAMDEBUG
631struct cam_path *cam_dpath;
632u_int32_t cam_dflags;
633u_int32_t cam_debug_delay;
634#endif
635
636#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
637#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
638#endif
639
640/*
641 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
642 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
643 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
644 */
645#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
646 || defined(CAM_DEBUG_LUN)
647#ifdef CAMDEBUG
648#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
649 || !defined(CAM_DEBUG_LUN)
650#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
651 and CAM_DEBUG_LUN"
652#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
653#else /* !CAMDEBUG */
654#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
655#endif /* CAMDEBUG */
656#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
657
658/* Our boot-time initialization hook */
659static void xpt_init(void *);
660SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
661
662static cam_status xpt_compile_path(struct cam_path *new_path,
663 struct cam_periph *perph,
664 path_id_t path_id,
665 target_id_t target_id,
666 lun_id_t lun_id);
667
668static void xpt_release_path(struct cam_path *path);
669
670static void xpt_async_bcast(struct async_list *async_head,
671 u_int32_t async_code,
672 struct cam_path *path,
673 void *async_arg);
674static void xpt_dev_async(u_int32_t async_code,
675 struct cam_eb *bus,
676 struct cam_et *target,
677 struct cam_ed *device,
678 void *async_arg);
679static path_id_t xptnextfreepathid(void);
680static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
681static union ccb *xpt_get_ccb(struct cam_ed *device);
682static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
683 u_int32_t new_priority);
684static void xpt_run_dev_allocq(struct cam_eb *bus);
685static void xpt_run_dev_sendq(struct cam_eb *bus);
686static timeout_t xpt_release_devq_timeout;
984263bc
MD
687static void xpt_release_bus(struct cam_eb *bus);
688static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
689 int run_queue);
690static struct cam_et*
691 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
692static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
693static struct cam_ed*
694 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
695 lun_id_t lun_id);
696static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
697 struct cam_ed *device);
698static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
699static struct cam_eb*
700 xpt_find_bus(path_id_t path_id);
701static struct cam_et*
702 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
703static struct cam_ed*
704 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
705static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
706static void xpt_scan_lun(struct cam_periph *periph,
707 struct cam_path *path, cam_flags flags,
708 union ccb *ccb);
709static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
710static xpt_busfunc_t xptconfigbuscountfunc;
711static xpt_busfunc_t xptconfigfunc;
712static void xpt_config(void *arg);
713static xpt_devicefunc_t xptpassannouncefunc;
714static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
715static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
716static void xptpoll(struct cam_sim *sim);
ef0fdad1
MD
717static inthand2_t swi_camnet;
718static inthand2_t swi_cambio;
984263bc
MD
719static void camisr(cam_isrq_t *queue);
720#if 0
721static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
722static void xptasync(struct cam_periph *periph,
723 u_int32_t code, cam_path *path);
724#endif
725static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
726 int num_patterns, struct cam_eb *bus);
727static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
728 int num_patterns, struct cam_ed *device);
729static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
730 int num_patterns,
731 struct cam_periph *periph);
732static xpt_busfunc_t xptedtbusfunc;
733static xpt_targetfunc_t xptedttargetfunc;
734static xpt_devicefunc_t xptedtdevicefunc;
735static xpt_periphfunc_t xptedtperiphfunc;
736static xpt_pdrvfunc_t xptplistpdrvfunc;
737static xpt_periphfunc_t xptplistperiphfunc;
738static int xptedtmatch(struct ccb_dev_match *cdm);
739static int xptperiphlistmatch(struct ccb_dev_match *cdm);
740static int xptbustraverse(struct cam_eb *start_bus,
741 xpt_busfunc_t *tr_func, void *arg);
742static int xpttargettraverse(struct cam_eb *bus,
743 struct cam_et *start_target,
744 xpt_targetfunc_t *tr_func, void *arg);
745static int xptdevicetraverse(struct cam_et *target,
746 struct cam_ed *start_device,
747 xpt_devicefunc_t *tr_func, void *arg);
748static int xptperiphtraverse(struct cam_ed *device,
749 struct cam_periph *start_periph,
750 xpt_periphfunc_t *tr_func, void *arg);
751static int xptpdrvtraverse(struct periph_driver **start_pdrv,
752 xpt_pdrvfunc_t *tr_func, void *arg);
753static int xptpdperiphtraverse(struct periph_driver **pdrv,
754 struct cam_periph *start_periph,
755 xpt_periphfunc_t *tr_func,
756 void *arg);
757static xpt_busfunc_t xptdefbusfunc;
758static xpt_targetfunc_t xptdeftargetfunc;
759static xpt_devicefunc_t xptdefdevicefunc;
760static xpt_periphfunc_t xptdefperiphfunc;
761static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
762#ifdef notusedyet
763static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
764 void *arg);
765#endif
766static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
767 void *arg);
768#ifdef notusedyet
769static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
770 void *arg);
771#endif
772static xpt_devicefunc_t xptsetasyncfunc;
773static xpt_busfunc_t xptsetasyncbusfunc;
774static cam_status xptregister(struct cam_periph *periph,
775 void *arg);
776static cam_status proberegister(struct cam_periph *periph,
777 void *arg);
778static void probeschedule(struct cam_periph *probe_periph);
779static void probestart(struct cam_periph *periph, union ccb *start_ccb);
780static void proberequestdefaultnegotiation(struct cam_periph *periph);
781static void probedone(struct cam_periph *periph, union ccb *done_ccb);
782static void probecleanup(struct cam_periph *periph);
783static void xpt_find_quirk(struct cam_ed *device);
784static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
785 struct cam_ed *device,
786 int async_update);
787static void xpt_toggle_tags(struct cam_path *path);
788static void xpt_start_tags(struct cam_path *path);
789static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
790 struct cam_ed *dev);
791static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
792 struct cam_ed *dev);
793static __inline int periph_is_queued(struct cam_periph *periph);
794static __inline int device_is_alloc_queued(struct cam_ed *device);
795static __inline int device_is_send_queued(struct cam_ed *device);
796static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
797
798static __inline int
799xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
800{
801 int retval;
802
803 if (dev->ccbq.devq_openings > 0) {
804 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
805 cam_ccbq_resize(&dev->ccbq,
806 dev->ccbq.dev_openings
807 + dev->ccbq.dev_active);
808 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
809 }
810 /*
811 * The priority of a device waiting for CCB resources
812 * is that of the the highest priority peripheral driver
813 * enqueued.
814 */
815 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
816 &dev->alloc_ccb_entry.pinfo,
817 CAMQ_GET_HEAD(&dev->drvq)->priority);
818 } else {
819 retval = 0;
820 }
821
822 return (retval);
823}
824
825static __inline int
826xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
827{
828 int retval;
829
830 if (dev->ccbq.dev_openings > 0) {
831 /*
832 * The priority of a device waiting for controller
833 * resources is that of the the highest priority CCB
834 * enqueued.
835 */
836 retval =
837 xpt_schedule_dev(&bus->sim->devq->send_queue,
838 &dev->send_ccb_entry.pinfo,
839 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
840 } else {
841 retval = 0;
842 }
843 return (retval);
844}
845
846static __inline int
847periph_is_queued(struct cam_periph *periph)
848{
849 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
850}
851
852static __inline int
853device_is_alloc_queued(struct cam_ed *device)
854{
855 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
856}
857
858static __inline int
859device_is_send_queued(struct cam_ed *device)
860{
861 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
862}
863
864static __inline int
865dev_allocq_is_runnable(struct cam_devq *devq)
866{
867 /*
868 * Have work to do.
869 * Have space to do more work.
870 * Allowed to do work.
871 */
872 return ((devq->alloc_queue.qfrozen_cnt == 0)
873 && (devq->alloc_queue.entries > 0)
874 && (devq->alloc_openings > 0));
875}
876
877static void
878xpt_periph_init()
879{
e4c9c0c8 880 cdevsw_add(&xpt_cdevsw, 0, 0);
984263bc
MD
881 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
882}
883
884static void
885probe_periph_init()
886{
887}
888
889
890static void
891xptdone(struct cam_periph *periph, union ccb *done_ccb)
892{
893 /* Caller will release the CCB */
894 wakeup(&done_ccb->ccb_h.cbfcnp);
895}
896
897static int
41c20dac 898xptopen(dev_t dev, int flags, int fmt, struct thread *td)
984263bc
MD
899{
900 int unit;
901
902 unit = minor(dev) & 0xff;
903
904 /*
905 * Only allow read-write access.
906 */
907 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
908 return(EPERM);
909
910 /*
911 * We don't allow nonblocking access.
912 */
913 if ((flags & O_NONBLOCK) != 0) {
914 printf("xpt%d: can't do nonblocking access\n", unit);
915 return(ENODEV);
916 }
917
918 /*
919 * We only have one transport layer right now. If someone accesses
920 * us via something other than minor number 1, point out their
921 * mistake.
922 */
923 if (unit != 0) {
924 printf("xptopen: got invalid xpt unit %d\n", unit);
925 return(ENXIO);
926 }
927
928 /* Mark ourselves open */
929 xsoftc.flags |= XPT_FLAG_OPEN;
930
931 return(0);
932}
933
934static int
41c20dac 935xptclose(dev_t dev, int flag, int fmt, struct thread *td)
984263bc
MD
936{
937 int unit;
938
939 unit = minor(dev) & 0xff;
940
941 /*
942 * We only have one transport layer right now. If someone accesses
943 * us via something other than minor number 1, point out their
944 * mistake.
945 */
946 if (unit != 0) {
947 printf("xptclose: got invalid xpt unit %d\n", unit);
948 return(ENXIO);
949 }
950
951 /* Mark ourselves closed */
952 xsoftc.flags &= ~XPT_FLAG_OPEN;
953
954 return(0);
955}
956
957static int
41c20dac 958xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
984263bc
MD
959{
960 int unit, error;
961
962 error = 0;
963 unit = minor(dev) & 0xff;
964
965 /*
966 * We only have one transport layer right now. If someone accesses
967 * us via something other than minor number 1, point out their
968 * mistake.
969 */
970 if (unit != 0) {
971 printf("xptioctl: got invalid xpt unit %d\n", unit);
972 return(ENXIO);
973 }
974
975 switch(cmd) {
976 /*
977 * For the transport layer CAMIOCOMMAND ioctl, we really only want
978 * to accept CCB types that don't quite make sense to send through a
979 * passthrough driver.
980 */
981 case CAMIOCOMMAND: {
982 union ccb *ccb;
983 union ccb *inccb;
984
985 inccb = (union ccb *)addr;
986
987 switch(inccb->ccb_h.func_code) {
988 case XPT_SCAN_BUS:
989 case XPT_RESET_BUS:
990 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
991 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
992 error = EINVAL;
993 break;
994 }
995 /* FALLTHROUGH */
996 case XPT_PATH_INQ:
997 case XPT_ENG_INQ:
998 case XPT_SCAN_LUN:
999
1000 ccb = xpt_alloc_ccb();
1001
1002 /*
1003 * Create a path using the bus, target, and lun the
1004 * user passed in.
1005 */
1006 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1007 inccb->ccb_h.path_id,
1008 inccb->ccb_h.target_id,
1009 inccb->ccb_h.target_lun) !=
1010 CAM_REQ_CMP){
1011 error = EINVAL;
1012 xpt_free_ccb(ccb);
1013 break;
1014 }
1015 /* Ensure all of our fields are correct */
1016 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1017 inccb->ccb_h.pinfo.priority);
1018 xpt_merge_ccb(ccb, inccb);
1019 ccb->ccb_h.cbfcnp = xptdone;
1020 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1021 bcopy(ccb, inccb, sizeof(union ccb));
1022 xpt_free_path(ccb->ccb_h.path);
1023 xpt_free_ccb(ccb);
1024 break;
1025
1026 case XPT_DEBUG: {
1027 union ccb ccb;
1028
1029 /*
1030 * This is an immediate CCB, so it's okay to
1031 * allocate it on the stack.
1032 */
1033
1034 /*
1035 * Create a path using the bus, target, and lun the
1036 * user passed in.
1037 */
1038 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1039 inccb->ccb_h.path_id,
1040 inccb->ccb_h.target_id,
1041 inccb->ccb_h.target_lun) !=
1042 CAM_REQ_CMP){
1043 error = EINVAL;
1044 break;
1045 }
1046 /* Ensure all of our fields are correct */
1047 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1048 inccb->ccb_h.pinfo.priority);
1049 xpt_merge_ccb(&ccb, inccb);
1050 ccb.ccb_h.cbfcnp = xptdone;
1051 xpt_action(&ccb);
1052 bcopy(&ccb, inccb, sizeof(union ccb));
1053 xpt_free_path(ccb.ccb_h.path);
1054 break;
1055
1056 }
1057 case XPT_DEV_MATCH: {
1058 struct cam_periph_map_info mapinfo;
1059 struct cam_path *old_path;
1060
1061 /*
1062 * We can't deal with physical addresses for this
1063 * type of transaction.
1064 */
1065 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1066 error = EINVAL;
1067 break;
1068 }
1069
1070 /*
1071 * Save this in case the caller had it set to
1072 * something in particular.
1073 */
1074 old_path = inccb->ccb_h.path;
1075
1076 /*
1077 * We really don't need a path for the matching
1078 * code. The path is needed because of the
1079 * debugging statements in xpt_action(). They
1080 * assume that the CCB has a valid path.
1081 */
1082 inccb->ccb_h.path = xpt_periph->path;
1083
1084 bzero(&mapinfo, sizeof(mapinfo));
1085
1086 /*
1087 * Map the pattern and match buffers into kernel
1088 * virtual address space.
1089 */
1090 error = cam_periph_mapmem(inccb, &mapinfo);
1091
1092 if (error) {
1093 inccb->ccb_h.path = old_path;
1094 break;
1095 }
1096
1097 /*
1098 * This is an immediate CCB, we can send it on directly.
1099 */
1100 xpt_action(inccb);
1101
1102 /*
1103 * Map the buffers back into user space.
1104 */
1105 cam_periph_unmapmem(inccb, &mapinfo);
1106
1107 inccb->ccb_h.path = old_path;
1108
1109 error = 0;
1110 break;
1111 }
1112 default:
1113 error = ENOTSUP;
1114 break;
1115 }
1116 break;
1117 }
1118 /*
1119 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1120 * with the periphal driver name and unit name filled in. The other
1121 * fields don't really matter as input. The passthrough driver name
1122 * ("pass"), and unit number are passed back in the ccb. The current
1123 * device generation number, and the index into the device peripheral
1124 * driver list, and the status are also passed back. Note that
1125 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1126 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1127 * (or rather should be) impossible for the device peripheral driver
1128 * list to change since we look at the whole thing in one pass, and
1129 * we do it with splcam protection.
1130 *
1131 */
1132 case CAMGETPASSTHRU: {
1133 union ccb *ccb;
1134 struct cam_periph *periph;
1135 struct periph_driver **p_drv;
1136 char *name;
1137 int unit;
1138 int cur_generation;
1139 int base_periph_found;
1140 int splbreaknum;
1141 int s;
1142
1143 ccb = (union ccb *)addr;
1144 unit = ccb->cgdl.unit_number;
1145 name = ccb->cgdl.periph_name;
1146 /*
1147 * Every 100 devices, we want to drop our spl protection to
1148 * give the software interrupt handler a chance to run.
1149 * Most systems won't run into this check, but this should
1150 * avoid starvation in the software interrupt handler in
1151 * large systems.
1152 */
1153 splbreaknum = 100;
1154
1155 ccb = (union ccb *)addr;
1156
1157 base_periph_found = 0;
1158
1159 /*
1160 * Sanity check -- make sure we don't get a null peripheral
1161 * driver name.
1162 */
1163 if (*ccb->cgdl.periph_name == '\0') {
1164 error = EINVAL;
1165 break;
1166 }
1167
1168 /* Keep the list from changing while we traverse it */
1169 s = splcam();
1170ptstartover:
1171 cur_generation = xsoftc.generation;
1172
1173 /* first find our driver in the list of drivers */
dc62b251 1174 SET_FOREACH(p_drv, periphdriver_set) {
984263bc
MD
1175 if (strcmp((*p_drv)->driver_name, name) == 0)
1176 break;
dc62b251 1177 }
984263bc
MD
1178
1179 if (*p_drv == NULL) {
1180 splx(s);
1181 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1182 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1183 *ccb->cgdl.periph_name = '\0';
1184 ccb->cgdl.unit_number = 0;
1185 error = ENOENT;
1186 break;
1187 }
1188
1189 /*
1190 * Run through every peripheral instance of this driver
1191 * and check to see whether it matches the unit passed
1192 * in by the user. If it does, get out of the loops and
1193 * find the passthrough driver associated with that
1194 * peripheral driver.
1195 */
1196 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1197 periph = TAILQ_NEXT(periph, unit_links)) {
1198
1199 if (periph->unit_number == unit) {
1200 break;
1201 } else if (--splbreaknum == 0) {
1202 splx(s);
1203 s = splcam();
1204 splbreaknum = 100;
1205 if (cur_generation != xsoftc.generation)
1206 goto ptstartover;
1207 }
1208 }
1209 /*
1210 * If we found the peripheral driver that the user passed
1211 * in, go through all of the peripheral drivers for that
1212 * particular device and look for a passthrough driver.
1213 */
1214 if (periph != NULL) {
1215 struct cam_ed *device;
1216 int i;
1217
1218 base_periph_found = 1;
1219 device = periph->path->device;
1220 for (i = 0, periph = device->periphs.slh_first;
1221 periph != NULL;
1222 periph = periph->periph_links.sle_next, i++) {
1223 /*
1224 * Check to see whether we have a
1225 * passthrough device or not.
1226 */
1227 if (strcmp(periph->periph_name, "pass") == 0) {
1228 /*
1229 * Fill in the getdevlist fields.
1230 */
1231 strcpy(ccb->cgdl.periph_name,
1232 periph->periph_name);
1233 ccb->cgdl.unit_number =
1234 periph->unit_number;
1235 if (periph->periph_links.sle_next)
1236 ccb->cgdl.status =
1237 CAM_GDEVLIST_MORE_DEVS;
1238 else
1239 ccb->cgdl.status =
1240 CAM_GDEVLIST_LAST_DEVICE;
1241 ccb->cgdl.generation =
1242 device->generation;
1243 ccb->cgdl.index = i;
1244 /*
1245 * Fill in some CCB header fields
1246 * that the user may want.
1247 */
1248 ccb->ccb_h.path_id =
1249 periph->path->bus->path_id;
1250 ccb->ccb_h.target_id =
1251 periph->path->target->target_id;
1252 ccb->ccb_h.target_lun =
1253 periph->path->device->lun_id;
1254 ccb->ccb_h.status = CAM_REQ_CMP;
1255 break;
1256 }
1257 }
1258 }
1259
1260 /*
1261 * If the periph is null here, one of two things has
1262 * happened. The first possibility is that we couldn't
1263 * find the unit number of the particular peripheral driver
1264 * that the user is asking about. e.g. the user asks for
1265 * the passthrough driver for "da11". We find the list of
1266 * "da" peripherals all right, but there is no unit 11.
1267 * The other possibility is that we went through the list
1268 * of peripheral drivers attached to the device structure,
1269 * but didn't find one with the name "pass". Either way,
1270 * we return ENOENT, since we couldn't find something.
1271 */
1272 if (periph == NULL) {
1273 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1274 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1275 *ccb->cgdl.periph_name = '\0';
1276 ccb->cgdl.unit_number = 0;
1277 error = ENOENT;
1278 /*
1279 * It is unfortunate that this is even necessary,
1280 * but there are many, many clueless users out there.
1281 * If this is true, the user is looking for the
1282 * passthrough driver, but doesn't have one in his
1283 * kernel.
1284 */
1285 if (base_periph_found == 1) {
1286 printf("xptioctl: pass driver is not in the "
1287 "kernel\n");
1288 printf("xptioctl: put \"device pass0\" in "
1289 "your kernel config file\n");
1290 }
1291 }
1292 splx(s);
1293 break;
1294 }
1295 default:
1296 error = ENOTTY;
1297 break;
1298 }
1299
1300 return(error);
1301}
1302
1303/* Functions accessed by the peripheral drivers */
1304static void
1305xpt_init(dummy)
1306 void *dummy;
1307{
1308 struct cam_sim *xpt_sim;
1309 struct cam_path *path;
1310 struct cam_devq *devq;
1311 cam_status status;
1312
1313 TAILQ_INIT(&xpt_busses);
1314 TAILQ_INIT(&cam_bioq);
1315 TAILQ_INIT(&cam_netq);
1316 SLIST_INIT(&ccb_freeq);
1317 STAILQ_INIT(&highpowerq);
1318
1319 /*
1320 * The xpt layer is, itself, the equivelent of a SIM.
1321 * Allow 16 ccbs in the ccb pool for it. This should
1322 * give decent parallelism when we probe busses and
1323 * perform other XPT functions.
1324 */
1325 devq = cam_simq_alloc(16);
1326 xpt_sim = cam_sim_alloc(xptaction,
1327 xptpoll,
1328 "xpt",
1329 /*softc*/NULL,
1330 /*unit*/0,
1331 /*max_dev_transactions*/0,
1332 /*max_tagged_dev_transactions*/0,
1333 devq);
3aed1355 1334 cam_simq_release(devq);
984263bc
MD
1335 xpt_max_ccbs = 16;
1336
1337 xpt_bus_register(xpt_sim, /*bus #*/0);
1338
1339 /*
1340 * Looking at the XPT from the SIM layer, the XPT is
1341 * the equivelent of a peripheral driver. Allocate
1342 * a peripheral driver entry for us.
1343 */
1344 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1345 CAM_TARGET_WILDCARD,
1346 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1347 printf("xpt_init: xpt_create_path failed with status %#x,"
1348 " failing attach\n", status);
1349 return;
1350 }
1351
1352 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1353 path, NULL, 0, NULL);
1354 xpt_free_path(path);
1355
1356 xpt_sim->softc = xpt_periph;
1357
1358 /*
1359 * Register a callback for when interrupts are enabled.
1360 */
898d961b
MD
1361 xpt_config_hook = malloc(sizeof(struct intr_config_hook),
1362 M_TEMP, M_INTWAIT | M_ZERO);
984263bc 1363 xpt_config_hook->ich_func = xpt_config;
a1e26a0c 1364 xpt_config_hook->ich_desc = "xpt";
984263bc
MD
1365 if (config_intrhook_establish(xpt_config_hook) != 0) {
1366 free (xpt_config_hook, M_TEMP);
1367 printf("xpt_init: config_intrhook_establish failed "
1368 "- failing attach\n");
1369 }
1370
1371 /* Install our software interrupt handlers */
45d76888
MD
1372 register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet", NULL);
1373 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
984263bc
MD
1374}
1375
1376static cam_status
1377xptregister(struct cam_periph *periph, void *arg)
1378{
1379 if (periph == NULL) {
1380 printf("xptregister: periph was NULL!!\n");
1381 return(CAM_REQ_CMP_ERR);
1382 }
1383
1384 periph->softc = NULL;
1385
1386 xpt_periph = periph;
1387
1388 return(CAM_REQ_CMP);
1389}
1390
1391int32_t
1392xpt_add_periph(struct cam_periph *periph)
1393{
1394 struct cam_ed *device;
1395 int32_t status;
1396 struct periph_list *periph_head;
1397
1398 device = periph->path->device;
1399
1400 periph_head = &device->periphs;
1401
1402 status = CAM_REQ_CMP;
1403
1404 if (device != NULL) {
1405 int s;
1406
1407 /*
1408 * Make room for this peripheral
1409 * so it will fit in the queue
1410 * when it's scheduled to run
1411 */
1412 s = splsoftcam();
1413 status = camq_resize(&device->drvq,
1414 device->drvq.array_size + 1);
1415
1416 device->generation++;
1417
1418 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1419
1420 splx(s);
1421 }
1422
1423 xsoftc.generation++;
1424
1425 return (status);
1426}
1427
1428void
1429xpt_remove_periph(struct cam_periph *periph)
1430{
1431 struct cam_ed *device;
1432
1433 device = periph->path->device;
1434
1435 if (device != NULL) {
1436 int s;
1437 struct periph_list *periph_head;
1438
1439 periph_head = &device->periphs;
1440
1441 /* Release the slot for this peripheral */
1442 s = splsoftcam();
1443 camq_resize(&device->drvq, device->drvq.array_size - 1);
1444
1445 device->generation++;
1446
1447 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1448
1449 splx(s);
1450 }
1451
1452 xsoftc.generation++;
1453
1454}
1455
1456void
1457xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1458{
1459 int s;
1460 u_int mb;
1461 struct cam_path *path;
1462 struct ccb_trans_settings cts;
1463
1464 path = periph->path;
1465 /*
1466 * To ensure that this is printed in one piece,
1467 * mask out CAM interrupts.
1468 */
1469 s = splsoftcam();
1470 printf("%s%d at %s%d bus %d target %d lun %d\n",
1471 periph->periph_name, periph->unit_number,
1472 path->bus->sim->sim_name,
1473 path->bus->sim->unit_number,
1474 path->bus->sim->bus_id,
1475 path->target->target_id,
1476 path->device->lun_id);
1477 printf("%s%d: ", periph->periph_name, periph->unit_number);
1478 scsi_print_inquiry(&path->device->inq_data);
1479 if ((bootverbose)
1480 && (path->device->serial_num_len > 0)) {
1481 /* Don't wrap the screen - print only the first 60 chars */
1482 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1483 periph->unit_number, path->device->serial_num);
1484 }
1485 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1486 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1487 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1488 xpt_action((union ccb*)&cts);
1489 if (cts.ccb_h.status == CAM_REQ_CMP) {
1490 u_int speed;
1491 u_int freq;
1492
1493 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1494 && cts.sync_offset != 0) {
1495 freq = scsi_calc_syncsrate(cts.sync_period);
1496 speed = freq;
1497 } else {
1498 struct ccb_pathinq cpi;
1499
1500 /* Ask the SIM for its base transfer speed */
1501 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1502 cpi.ccb_h.func_code = XPT_PATH_INQ;
1503 xpt_action((union ccb *)&cpi);
1504
1505 speed = cpi.base_transfer_speed;
1506 freq = 0;
1507 }
1508 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1509 speed *= (0x01 << cts.bus_width);
1510 mb = speed / 1000;
1511 if (mb > 0)
1512 printf("%s%d: %d.%03dMB/s transfers",
1513 periph->periph_name, periph->unit_number,
1514 mb, speed % 1000);
1515 else
1516 printf("%s%d: %dKB/s transfers", periph->periph_name,
1517 periph->unit_number, speed);
1518 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1519 && cts.sync_offset != 0) {
1520 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1521 freq % 1000, cts.sync_offset);
1522 }
1523 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1524 && cts.bus_width > 0) {
1525 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1526 && cts.sync_offset != 0) {
1527 printf(", ");
1528 } else {
1529 printf(" (");
1530 }
1531 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1532 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1533 && cts.sync_offset != 0) {
1534 printf(")");
1535 }
1536
1537 if (path->device->inq_flags & SID_CmdQue
1538 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1539 printf(", Tagged Queueing Enabled");
1540 }
1541
1542 printf("\n");
1543 } else if (path->device->inq_flags & SID_CmdQue
1544 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1545 printf("%s%d: Tagged Queueing Enabled\n",
1546 periph->periph_name, periph->unit_number);
1547 }
1548
1549 /*
1550 * We only want to print the caller's announce string if they've
1551 * passed one in..
1552 */
1553 if (announce_string != NULL)
1554 printf("%s%d: %s\n", periph->periph_name,
1555 periph->unit_number, announce_string);
1556 splx(s);
1557}
1558
1559
1560static dev_match_ret
1561xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1562 struct cam_eb *bus)
1563{
1564 dev_match_ret retval;
1565 int i;
1566
1567 retval = DM_RET_NONE;
1568
1569 /*
1570 * If we aren't given something to match against, that's an error.
1571 */
1572 if (bus == NULL)
1573 return(DM_RET_ERROR);
1574
1575 /*
1576 * If there are no match entries, then this bus matches no
1577 * matter what.
1578 */
1579 if ((patterns == NULL) || (num_patterns == 0))
1580 return(DM_RET_DESCEND | DM_RET_COPY);
1581
1582 for (i = 0; i < num_patterns; i++) {
1583 struct bus_match_pattern *cur_pattern;
1584
1585 /*
1586 * If the pattern in question isn't for a bus node, we
1587 * aren't interested. However, we do indicate to the
1588 * calling routine that we should continue descending the
1589 * tree, since the user wants to match against lower-level
1590 * EDT elements.
1591 */
1592 if (patterns[i].type != DEV_MATCH_BUS) {
1593 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1594 retval |= DM_RET_DESCEND;
1595 continue;
1596 }
1597
1598 cur_pattern = &patterns[i].pattern.bus_pattern;
1599
1600 /*
1601 * If they want to match any bus node, we give them any
1602 * device node.
1603 */
1604 if (cur_pattern->flags == BUS_MATCH_ANY) {
1605 /* set the copy flag */
1606 retval |= DM_RET_COPY;
1607
1608 /*
1609 * If we've already decided on an action, go ahead
1610 * and return.
1611 */
1612 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1613 return(retval);
1614 }
1615
1616 /*
1617 * Not sure why someone would do this...
1618 */
1619 if (cur_pattern->flags == BUS_MATCH_NONE)
1620 continue;
1621
1622 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1623 && (cur_pattern->path_id != bus->path_id))
1624 continue;
1625
1626 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1627 && (cur_pattern->bus_id != bus->sim->bus_id))
1628 continue;
1629
1630 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1631 && (cur_pattern->unit_number != bus->sim->unit_number))
1632 continue;
1633
1634 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1635 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1636 DEV_IDLEN) != 0))
1637 continue;
1638
1639 /*
1640 * If we get to this point, the user definitely wants
1641 * information on this bus. So tell the caller to copy the
1642 * data out.
1643 */
1644 retval |= DM_RET_COPY;
1645
1646 /*
1647 * If the return action has been set to descend, then we
1648 * know that we've already seen a non-bus matching
1649 * expression, therefore we need to further descend the tree.
1650 * This won't change by continuing around the loop, so we
1651 * go ahead and return. If we haven't seen a non-bus
1652 * matching expression, we keep going around the loop until
1653 * we exhaust the matching expressions. We'll set the stop
1654 * flag once we fall out of the loop.
1655 */
1656 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1657 return(retval);
1658 }
1659
1660 /*
1661 * If the return action hasn't been set to descend yet, that means
1662 * we haven't seen anything other than bus matching patterns. So
1663 * tell the caller to stop descending the tree -- the user doesn't
1664 * want to match against lower level tree elements.
1665 */
1666 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1667 retval |= DM_RET_STOP;
1668
1669 return(retval);
1670}
1671
1672static dev_match_ret
1673xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1674 struct cam_ed *device)
1675{
1676 dev_match_ret retval;
1677 int i;
1678
1679 retval = DM_RET_NONE;
1680
1681 /*
1682 * If we aren't given something to match against, that's an error.
1683 */
1684 if (device == NULL)
1685 return(DM_RET_ERROR);
1686
1687 /*
1688 * If there are no match entries, then this device matches no
1689 * matter what.
1690 */
1691 if ((patterns == NULL) || (patterns == 0))
1692 return(DM_RET_DESCEND | DM_RET_COPY);
1693
1694 for (i = 0; i < num_patterns; i++) {
1695 struct device_match_pattern *cur_pattern;
1696
1697 /*
1698 * If the pattern in question isn't for a device node, we
1699 * aren't interested.
1700 */
1701 if (patterns[i].type != DEV_MATCH_DEVICE) {
1702 if ((patterns[i].type == DEV_MATCH_PERIPH)
1703 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1704 retval |= DM_RET_DESCEND;
1705 continue;
1706 }
1707
1708 cur_pattern = &patterns[i].pattern.device_pattern;
1709
1710 /*
1711 * If they want to match any device node, we give them any
1712 * device node.
1713 */
1714 if (cur_pattern->flags == DEV_MATCH_ANY) {
1715 /* set the copy flag */
1716 retval |= DM_RET_COPY;
1717
1718
1719 /*
1720 * If we've already decided on an action, go ahead
1721 * and return.
1722 */
1723 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1724 return(retval);
1725 }
1726
1727 /*
1728 * Not sure why someone would do this...
1729 */
1730 if (cur_pattern->flags == DEV_MATCH_NONE)
1731 continue;
1732
1733 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1734 && (cur_pattern->path_id != device->target->bus->path_id))
1735 continue;
1736
1737 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1738 && (cur_pattern->target_id != device->target->target_id))
1739 continue;
1740
1741 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1742 && (cur_pattern->target_lun != device->lun_id))
1743 continue;
1744
1745 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1746 && (cam_quirkmatch((caddr_t)&device->inq_data,
1747 (caddr_t)&cur_pattern->inq_pat,
1748 1, sizeof(cur_pattern->inq_pat),
1749 scsi_static_inquiry_match) == NULL))
1750 continue;
1751
1752 /*
1753 * If we get to this point, the user definitely wants
1754 * information on this device. So tell the caller to copy
1755 * the data out.
1756 */
1757 retval |= DM_RET_COPY;
1758
1759 /*
1760 * If the return action has been set to descend, then we
1761 * know that we've already seen a peripheral matching
1762 * expression, therefore we need to further descend the tree.
1763 * This won't change by continuing around the loop, so we
1764 * go ahead and return. If we haven't seen a peripheral
1765 * matching expression, we keep going around the loop until
1766 * we exhaust the matching expressions. We'll set the stop
1767 * flag once we fall out of the loop.
1768 */
1769 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1770 return(retval);
1771 }
1772
1773 /*
1774 * If the return action hasn't been set to descend yet, that means
1775 * we haven't seen any peripheral matching patterns. So tell the
1776 * caller to stop descending the tree -- the user doesn't want to
1777 * match against lower level tree elements.
1778 */
1779 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1780 retval |= DM_RET_STOP;
1781
1782 return(retval);
1783}
1784
1785/*
1786 * Match a single peripheral against any number of match patterns.
1787 */
1788static dev_match_ret
1789xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1790 struct cam_periph *periph)
1791{
1792 dev_match_ret retval;
1793 int i;
1794
1795 /*
1796 * If we aren't given something to match against, that's an error.
1797 */
1798 if (periph == NULL)
1799 return(DM_RET_ERROR);
1800
1801 /*
1802 * If there are no match entries, then this peripheral matches no
1803 * matter what.
1804 */
1805 if ((patterns == NULL) || (num_patterns == 0))
1806 return(DM_RET_STOP | DM_RET_COPY);
1807
1808 /*
1809 * There aren't any nodes below a peripheral node, so there's no
1810 * reason to descend the tree any further.
1811 */
1812 retval = DM_RET_STOP;
1813
1814 for (i = 0; i < num_patterns; i++) {
1815 struct periph_match_pattern *cur_pattern;
1816
1817 /*
1818 * If the pattern in question isn't for a peripheral, we
1819 * aren't interested.
1820 */
1821 if (patterns[i].type != DEV_MATCH_PERIPH)
1822 continue;
1823
1824 cur_pattern = &patterns[i].pattern.periph_pattern;
1825
1826 /*
1827 * If they want to match on anything, then we will do so.
1828 */
1829 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1830 /* set the copy flag */
1831 retval |= DM_RET_COPY;
1832
1833 /*
1834 * We've already set the return action to stop,
1835 * since there are no nodes below peripherals in
1836 * the tree.
1837 */
1838 return(retval);
1839 }
1840
1841 /*
1842 * Not sure why someone would do this...
1843 */
1844 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1845 continue;
1846
1847 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1848 && (cur_pattern->path_id != periph->path->bus->path_id))
1849 continue;
1850
1851 /*
1852 * For the target and lun id's, we have to make sure the
1853 * target and lun pointers aren't NULL. The xpt peripheral
1854 * has a wildcard target and device.
1855 */
1856 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1857 && ((periph->path->target == NULL)
1858 ||(cur_pattern->target_id != periph->path->target->target_id)))
1859 continue;
1860
1861 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1862 && ((periph->path->device == NULL)
1863 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1864 continue;
1865
1866 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1867 && (cur_pattern->unit_number != periph->unit_number))
1868 continue;
1869
1870 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1871 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1872 DEV_IDLEN) != 0))
1873 continue;
1874
1875 /*
1876 * If we get to this point, the user definitely wants
1877 * information on this peripheral. So tell the caller to
1878 * copy the data out.
1879 */
1880 retval |= DM_RET_COPY;
1881
1882 /*
1883 * The return action has already been set to stop, since
1884 * peripherals don't have any nodes below them in the EDT.
1885 */
1886 return(retval);
1887 }
1888
1889 /*
1890 * If we get to this point, the peripheral that was passed in
1891 * doesn't match any of the patterns.
1892 */
1893 return(retval);
1894}
1895
1896static int
1897xptedtbusfunc(struct cam_eb *bus, void *arg)
1898{
1899 struct ccb_dev_match *cdm;
1900 dev_match_ret retval;
1901
1902 cdm = (struct ccb_dev_match *)arg;
1903
1904 /*
1905 * If our position is for something deeper in the tree, that means
1906 * that we've already seen this node. So, we keep going down.
1907 */
1908 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1909 && (cdm->pos.cookie.bus == bus)
1910 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1911 && (cdm->pos.cookie.target != NULL))
1912 retval = DM_RET_DESCEND;
1913 else
1914 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1915
1916 /*
1917 * If we got an error, bail out of the search.
1918 */
1919 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1920 cdm->status = CAM_DEV_MATCH_ERROR;
1921 return(0);
1922 }
1923
1924 /*
1925 * If the copy flag is set, copy this bus out.
1926 */
1927 if (retval & DM_RET_COPY) {
1928 int spaceleft, j;
1929
1930 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1931 sizeof(struct dev_match_result));
1932
1933 /*
1934 * If we don't have enough space to put in another
1935 * match result, save our position and tell the
1936 * user there are more devices to check.
1937 */
1938 if (spaceleft < sizeof(struct dev_match_result)) {
1939 bzero(&cdm->pos, sizeof(cdm->pos));
1940 cdm->pos.position_type =
1941 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1942
1943 cdm->pos.cookie.bus = bus;
1944 cdm->pos.generations[CAM_BUS_GENERATION]=
1945 bus_generation;
1946 cdm->status = CAM_DEV_MATCH_MORE;
1947 return(0);
1948 }
1949 j = cdm->num_matches;
1950 cdm->num_matches++;
1951 cdm->matches[j].type = DEV_MATCH_BUS;
1952 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1953 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1954 cdm->matches[j].result.bus_result.unit_number =
1955 bus->sim->unit_number;
1956 strncpy(cdm->matches[j].result.bus_result.dev_name,
1957 bus->sim->sim_name, DEV_IDLEN);
1958 }
1959
1960 /*
1961 * If the user is only interested in busses, there's no
1962 * reason to descend to the next level in the tree.
1963 */
1964 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1965 return(1);
1966
1967 /*
1968 * If there is a target generation recorded, check it to
1969 * make sure the target list hasn't changed.
1970 */
1971 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1972 && (bus == cdm->pos.cookie.bus)
1973 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1974 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1975 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1976 bus->generation)) {
1977 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1978 return(0);
1979 }
1980
1981 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1982 && (cdm->pos.cookie.bus == bus)
1983 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1984 && (cdm->pos.cookie.target != NULL))
1985 return(xpttargettraverse(bus,
1986 (struct cam_et *)cdm->pos.cookie.target,
1987 xptedttargetfunc, arg));
1988 else
1989 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1990}
1991
1992static int
1993xptedttargetfunc(struct cam_et *target, void *arg)
1994{
1995 struct ccb_dev_match *cdm;
1996
1997 cdm = (struct ccb_dev_match *)arg;
1998
1999 /*
2000 * If there is a device list generation recorded, check it to
2001 * make sure the device list hasn't changed.
2002 */
2003 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2004 && (cdm->pos.cookie.bus == target->bus)
2005 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2006 && (cdm->pos.cookie.target == target)
2007 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2008 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2009 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2010 target->generation)) {
2011 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2012 return(0);
2013 }
2014
2015 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2016 && (cdm->pos.cookie.bus == target->bus)
2017 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2018 && (cdm->pos.cookie.target == target)
2019 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2020 && (cdm->pos.cookie.device != NULL))
2021 return(xptdevicetraverse(target,
2022 (struct cam_ed *)cdm->pos.cookie.device,
2023 xptedtdevicefunc, arg));
2024 else
2025 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2026}
2027
2028static int
2029xptedtdevicefunc(struct cam_ed *device, void *arg)
2030{
2031
2032 struct ccb_dev_match *cdm;
2033 dev_match_ret retval;
2034
2035 cdm = (struct ccb_dev_match *)arg;
2036
2037 /*
2038 * If our position is for something deeper in the tree, that means
2039 * that we've already seen this node. So, we keep going down.
2040 */
2041 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2042 && (cdm->pos.cookie.device == device)
2043 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2044 && (cdm->pos.cookie.periph != NULL))
2045 retval = DM_RET_DESCEND;
2046 else
2047 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2048 device);
2049
2050 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2051 cdm->status = CAM_DEV_MATCH_ERROR;
2052 return(0);
2053 }
2054
2055 /*
2056 * If the copy flag is set, copy this device out.
2057 */
2058 if (retval & DM_RET_COPY) {
2059 int spaceleft, j;
2060
2061 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2062 sizeof(struct dev_match_result));
2063
2064 /*
2065 * If we don't have enough space to put in another
2066 * match result, save our position and tell the
2067 * user there are more devices to check.
2068 */
2069 if (spaceleft < sizeof(struct dev_match_result)) {
2070 bzero(&cdm->pos, sizeof(cdm->pos));
2071 cdm->pos.position_type =
2072 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2073 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2074
2075 cdm->pos.cookie.bus = device->target->bus;
2076 cdm->pos.generations[CAM_BUS_GENERATION]=
2077 bus_generation;
2078 cdm->pos.cookie.target = device->target;
2079 cdm->pos.generations[CAM_TARGET_GENERATION] =
2080 device->target->bus->generation;
2081 cdm->pos.cookie.device = device;
2082 cdm->pos.generations[CAM_DEV_GENERATION] =
2083 device->target->generation;
2084 cdm->status = CAM_DEV_MATCH_MORE;
2085 return(0);
2086 }
2087 j = cdm->num_matches;
2088 cdm->num_matches++;
2089 cdm->matches[j].type = DEV_MATCH_DEVICE;
2090 cdm->matches[j].result.device_result.path_id =
2091 device->target->bus->path_id;
2092 cdm->matches[j].result.device_result.target_id =
2093 device->target->target_id;
2094 cdm->matches[j].result.device_result.target_lun =
2095 device->lun_id;
2096 bcopy(&device->inq_data,
2097 &cdm->matches[j].result.device_result.inq_data,
2098 sizeof(struct scsi_inquiry_data));
2099
2100 /* Let the user know whether this device is unconfigured */
2101 if (device->flags & CAM_DEV_UNCONFIGURED)
2102 cdm->matches[j].result.device_result.flags =
2103 DEV_RESULT_UNCONFIGURED;
2104 else
2105 cdm->matches[j].result.device_result.flags =
2106 DEV_RESULT_NOFLAG;
2107 }
2108
2109 /*
2110 * If the user isn't interested in peripherals, don't descend
2111 * the tree any further.
2112 */
2113 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2114 return(1);
2115
2116 /*
2117 * If there is a peripheral list generation recorded, make sure
2118 * it hasn't changed.
2119 */
2120 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2121 && (device->target->bus == cdm->pos.cookie.bus)
2122 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2123 && (device->target == cdm->pos.cookie.target)
2124 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2125 && (device == cdm->pos.cookie.device)
2126 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2127 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2128 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2129 device->generation)){
2130 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2131 return(0);
2132 }
2133
2134 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2135 && (cdm->pos.cookie.bus == device->target->bus)
2136 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2137 && (cdm->pos.cookie.target == device->target)
2138 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2139 && (cdm->pos.cookie.device == device)
2140 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2141 && (cdm->pos.cookie.periph != NULL))
2142 return(xptperiphtraverse(device,
2143 (struct cam_periph *)cdm->pos.cookie.periph,
2144 xptedtperiphfunc, arg));
2145 else
2146 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2147}
2148
2149static int
2150xptedtperiphfunc(struct cam_periph *periph, void *arg)
2151{
2152 struct ccb_dev_match *cdm;
2153 dev_match_ret retval;
2154
2155 cdm = (struct ccb_dev_match *)arg;
2156
2157 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2158
2159 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2160 cdm->status = CAM_DEV_MATCH_ERROR;
2161 return(0);
2162 }
2163
2164 /*
2165 * If the copy flag is set, copy this peripheral out.
2166 */
2167 if (retval & DM_RET_COPY) {
2168 int spaceleft, j;
2169
2170 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2171 sizeof(struct dev_match_result));
2172
2173 /*
2174 * If we don't have enough space to put in another
2175 * match result, save our position and tell the
2176 * user there are more devices to check.
2177 */
2178 if (spaceleft < sizeof(struct dev_match_result)) {
2179 bzero(&cdm->pos, sizeof(cdm->pos));
2180 cdm->pos.position_type =
2181 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2182 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2183 CAM_DEV_POS_PERIPH;
2184
2185 cdm->pos.cookie.bus = periph->path->bus;
2186 cdm->pos.generations[CAM_BUS_GENERATION]=
2187 bus_generation;
2188 cdm->pos.cookie.target = periph->path->target;
2189 cdm->pos.generations[CAM_TARGET_GENERATION] =
2190 periph->path->bus->generation;
2191 cdm->pos.cookie.device = periph->path->device;
2192 cdm->pos.generations[CAM_DEV_GENERATION] =
2193 periph->path->target->generation;
2194 cdm->pos.cookie.periph = periph;
2195 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2196 periph->path->device->generation;
2197 cdm->status = CAM_DEV_MATCH_MORE;
2198 return(0);
2199 }
2200
2201 j = cdm->num_matches;
2202 cdm->num_matches++;
2203 cdm->matches[j].type = DEV_MATCH_PERIPH;
2204 cdm->matches[j].result.periph_result.path_id =
2205 periph->path->bus->path_id;
2206 cdm->matches[j].result.periph_result.target_id =
2207 periph->path->target->target_id;
2208 cdm->matches[j].result.periph_result.target_lun =
2209 periph->path->device->lun_id;
2210 cdm->matches[j].result.periph_result.unit_number =
2211 periph->unit_number;
2212 strncpy(cdm->matches[j].result.periph_result.periph_name,
2213 periph->periph_name, DEV_IDLEN);
2214 }
2215
2216 return(1);
2217}
2218
2219static int
2220xptedtmatch(struct ccb_dev_match *cdm)
2221{
2222 int ret;
2223
2224 cdm->num_matches = 0;
2225
2226 /*
2227 * Check the bus list generation. If it has changed, the user
2228 * needs to reset everything and start over.
2229 */
2230 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2231 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2232 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2233 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2234 return(0);
2235 }
2236
2237 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2238 && (cdm->pos.cookie.bus != NULL))
2239 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2240 xptedtbusfunc, cdm);
2241 else
2242 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2243
2244 /*
2245 * If we get back 0, that means that we had to stop before fully
2246 * traversing the EDT. It also means that one of the subroutines
2247 * has set the status field to the proper value. If we get back 1,
2248 * we've fully traversed the EDT and copied out any matching entries.
2249 */
2250 if (ret == 1)
2251 cdm->status = CAM_DEV_MATCH_LAST;
2252
2253 return(ret);
2254}
2255
2256static int
2257xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2258{
2259 struct ccb_dev_match *cdm;
2260
2261 cdm = (struct ccb_dev_match *)arg;
2262
2263 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2264 && (cdm->pos.cookie.pdrv == pdrv)
2265 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2266 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2267 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2268 (*pdrv)->generation)) {
2269 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2270 return(0);
2271 }
2272
2273 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2274 && (cdm->pos.cookie.pdrv == pdrv)
2275 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2276 && (cdm->pos.cookie.periph != NULL))
2277 return(xptpdperiphtraverse(pdrv,
2278 (struct cam_periph *)cdm->pos.cookie.periph,
2279 xptplistperiphfunc, arg));
2280 else
2281 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2282}
2283
2284static int
2285xptplistperiphfunc(struct cam_periph *periph, void *arg)
2286{
2287 struct ccb_dev_match *cdm;
2288 dev_match_ret retval;
2289
2290 cdm = (struct ccb_dev_match *)arg;
2291
2292 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2293
2294 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2295 cdm->status = CAM_DEV_MATCH_ERROR;
2296 return(0);
2297 }
2298
2299 /*
2300 * If the copy flag is set, copy this peripheral out.
2301 */
2302 if (retval & DM_RET_COPY) {
2303 int spaceleft, j;
2304
2305 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2306 sizeof(struct dev_match_result));
2307
2308 /*
2309 * If we don't have enough space to put in another
2310 * match result, save our position and tell the
2311 * user there are more devices to check.
2312 */
2313 if (spaceleft < sizeof(struct dev_match_result)) {
2314 struct periph_driver **pdrv;
2315
2316 pdrv = NULL;
2317 bzero(&cdm->pos, sizeof(cdm->pos));
2318 cdm->pos.position_type =
2319 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2320 CAM_DEV_POS_PERIPH;
2321
2322 /*
2323 * This may look a bit non-sensical, but it is
2324 * actually quite logical. There are very few
2325 * peripheral drivers, and bloating every peripheral
2326 * structure with a pointer back to its parent
2327 * peripheral driver linker set entry would cost
2328 * more in the long run than doing this quick lookup.
2329 */
dc62b251 2330 SET_FOREACH(pdrv, periphdriver_set) {
984263bc
MD
2331 if (strcmp((*pdrv)->driver_name,
2332 periph->periph_name) == 0)
2333 break;
2334 }
2335
beac9491 2336 if (*pdrv == NULL) {
984263bc
MD
2337 cdm->status = CAM_DEV_MATCH_ERROR;
2338 return(0);
2339 }
2340
2341 cdm->pos.cookie.pdrv = pdrv;
2342 /*
2343 * The periph generation slot does double duty, as
2344 * does the periph pointer slot. They are used for
2345 * both edt and pdrv lookups and positioning.
2346 */
2347 cdm->pos.cookie.periph = periph;
2348 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2349 (*pdrv)->generation;
2350 cdm->status = CAM_DEV_MATCH_MORE;
2351 return(0);
2352 }
2353
2354 j = cdm->num_matches;
2355 cdm->num_matches++;
2356 cdm->matches[j].type = DEV_MATCH_PERIPH;
2357 cdm->matches[j].result.periph_result.path_id =
2358 periph->path->bus->path_id;
2359
2360 /*
2361 * The transport layer peripheral doesn't have a target or
2362 * lun.
2363 */
2364 if (periph->path->target)
2365 cdm->matches[j].result.periph_result.target_id =
2366 periph->path->target->target_id;
2367 else
2368 cdm->matches[j].result.periph_result.target_id = -1;
2369
2370 if (periph->path->device)
2371 cdm->matches[j].result.periph_result.target_lun =
2372 periph->path->device->lun_id;
2373 else
2374 cdm->matches[j].result.periph_result.target_lun = -1;
2375
2376 cdm->matches[j].result.periph_result.unit_number =
2377 periph->unit_number;
2378 strncpy(cdm->matches[j].result.periph_result.periph_name,
2379 periph->periph_name, DEV_IDLEN);
2380 }
2381
2382 return(1);
2383}
2384
2385static int
2386xptperiphlistmatch(struct ccb_dev_match *cdm)
2387{
2388 int ret;
2389
2390 cdm->num_matches = 0;
2391
2392 /*
2393 * At this point in the edt traversal function, we check the bus
2394 * list generation to make sure that no busses have been added or
2395 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2396 * For the peripheral driver list traversal function, however, we
2397 * don't have to worry about new peripheral driver types coming or
2398 * going; they're in a linker set, and therefore can't change
2399 * without a recompile.
2400 */
2401
2402 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2403 && (cdm->pos.cookie.pdrv != NULL))
2404 ret = xptpdrvtraverse(
2405 (struct periph_driver **)cdm->pos.cookie.pdrv,
2406 xptplistpdrvfunc, cdm);
2407 else
2408 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2409
2410 /*
2411 * If we get back 0, that means that we had to stop before fully
2412 * traversing the peripheral driver tree. It also means that one of
2413 * the subroutines has set the status field to the proper value. If
2414 * we get back 1, we've fully traversed the EDT and copied out any
2415 * matching entries.
2416 */
2417 if (ret == 1)
2418 cdm->status = CAM_DEV_MATCH_LAST;
2419
2420 return(ret);
2421}
2422
2423static int
2424xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2425{
2426 struct cam_eb *bus, *next_bus;
2427 int retval;
2428
2429 retval = 1;
2430
2431 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2432 bus != NULL;
2433 bus = next_bus) {
2434 next_bus = TAILQ_NEXT(bus, links);
2435
2436 retval = tr_func(bus, arg);
2437 if (retval == 0)
2438 return(retval);
2439 }
2440
2441 return(retval);
2442}
2443
2444static int
2445xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2446 xpt_targetfunc_t *tr_func, void *arg)
2447{
2448 struct cam_et *target, *next_target;
2449 int retval;
2450
2451 retval = 1;
2452 for (target = (start_target ? start_target :
2453 TAILQ_FIRST(&bus->et_entries));
2454 target != NULL; target = next_target) {
2455
2456 next_target = TAILQ_NEXT(target, links);
2457
2458 retval = tr_func(target, arg);
2459
2460 if (retval == 0)
2461 return(retval);
2462 }
2463
2464 return(retval);
2465}
2466
2467static int
2468xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2469 xpt_devicefunc_t *tr_func, void *arg)
2470{
2471 struct cam_ed *device, *next_device;
2472 int retval;
2473
2474 retval = 1;
2475 for (device = (start_device ? start_device :
2476 TAILQ_FIRST(&target->ed_entries));
2477 device != NULL;
2478 device = next_device) {
2479
2480 next_device = TAILQ_NEXT(device, links);
2481
2482 retval = tr_func(device, arg);
2483
2484 if (retval == 0)
2485 return(retval);
2486 }
2487
2488 return(retval);
2489}
2490
2491static int
2492xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2493 xpt_periphfunc_t *tr_func, void *arg)
2494{
2495 struct cam_periph *periph, *next_periph;
2496 int retval;
2497
2498 retval = 1;
2499
2500 for (periph = (start_periph ? start_periph :
2501 SLIST_FIRST(&device->periphs));
2502 periph != NULL;
2503 periph = next_periph) {
2504
2505 next_periph = SLIST_NEXT(periph, periph_links);
2506
2507 retval = tr_func(periph, arg);
2508 if (retval == 0)
2509 return(retval);
2510 }
2511
2512 return(retval);
2513}
2514
2515static int
2516xptpdrvtraverse(struct periph_driver **start_pdrv,
2517 xpt_pdrvfunc_t *tr_func, void *arg)
2518{
2519 struct periph_driver **pdrv;
2520 int retval;
2521
2522 retval = 1;
2523
2524 /*
2525 * We don't traverse the peripheral driver list like we do the
2526 * other lists, because it is a linker set, and therefore cannot be
2527 * changed during runtime. If the peripheral driver list is ever
2528 * re-done to be something other than a linker set (i.e. it can
2529 * change while the system is running), the list traversal should
2530 * be modified to work like the other traversal functions.
2531 */
98593f25
MD
2532 SET_FOREACH(pdrv, periphdriver_set) {
2533 if (start_pdrv == NULL || start_pdrv == pdrv) {
2534 retval = tr_func(pdrv, arg);
dc62b251
MD
2535 if (retval == 0)
2536 return(retval);
98593f25 2537 start_pdrv = NULL; /* traverse remainder */
dc62b251 2538 }
984263bc 2539 }
984263bc
MD
2540 return(retval);
2541}
2542
2543static int
2544xptpdperiphtraverse(struct periph_driver **pdrv,
2545 struct cam_periph *start_periph,
2546 xpt_periphfunc_t *tr_func, void *arg)
2547{
2548 struct cam_periph *periph, *next_periph;
2549 int retval;
2550
2551 retval = 1;
2552
2553 for (periph = (start_periph ? start_periph :
2554 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2555 periph = next_periph) {
2556
2557 next_periph = TAILQ_NEXT(periph, unit_links);
2558
2559 retval = tr_func(periph, arg);
2560 if (retval == 0)
2561 return(retval);
2562 }
2563 return(retval);
2564}
2565
2566static int
2567xptdefbusfunc(struct cam_eb *bus, void *arg)
2568{
2569 struct xpt_traverse_config *tr_config;
2570
2571 tr_config = (struct xpt_traverse_config *)arg;
2572
2573 if (tr_config->depth == XPT_DEPTH_BUS) {
2574 xpt_busfunc_t *tr_func;
2575
2576 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2577
2578 return(tr_func(bus, tr_config->tr_arg));
2579 } else
2580 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2581}
2582
2583static int
2584xptdeftargetfunc(struct cam_et *target, void *arg)
2585{
2586 struct xpt_traverse_config *tr_config;
2587
2588 tr_config = (struct xpt_traverse_config *)arg;
2589
2590 if (tr_config->depth == XPT_DEPTH_TARGET) {
2591 xpt_targetfunc_t *tr_func;
2592
2593 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2594
2595 return(tr_func(target, tr_config->tr_arg));
2596 } else
2597 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2598}
2599
2600static int
2601xptdefdevicefunc(struct cam_ed *device, void *arg)
2602{
2603 struct xpt_traverse_config *tr_config;
2604
2605 tr_config = (struct xpt_traverse_config *)arg;
2606
2607 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2608 xpt_devicefunc_t *tr_func;
2609
2610 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2611
2612 return(tr_func(device, tr_config->tr_arg));
2613 } else
2614 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2615}
2616
2617static int
2618xptdefperiphfunc(struct cam_periph *periph, void *arg)
2619{
2620 struct xpt_traverse_config *tr_config;
2621 xpt_periphfunc_t *tr_func;
2622
2623 tr_config = (struct xpt_traverse_config *)arg;
2624
2625 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2626
2627 /*
2628 * Unlike the other default functions, we don't check for depth
2629 * here. The peripheral driver level is the last level in the EDT,
2630 * so if we're here, we should execute the function in question.
2631 */
2632 return(tr_func(periph, tr_config->tr_arg));
2633}
2634
2635/*
2636 * Execute the given function for every bus in the EDT.
2637 */
2638static int
2639xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2640{
2641 struct xpt_traverse_config tr_config;
2642
2643 tr_config.depth = XPT_DEPTH_BUS;
2644 tr_config.tr_func = tr_func;
2645 tr_config.tr_arg = arg;
2646
2647 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2648}
2649
2650#ifdef notusedyet
2651/*
2652 * Execute the given function for every target in the EDT.
2653 */
2654static int
2655xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2656{
2657 struct xpt_traverse_config tr_config;
2658
2659 tr_config.depth = XPT_DEPTH_TARGET;
2660 tr_config.tr_func = tr_func;
2661 tr_config.tr_arg = arg;
2662
2663 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2664}
2665#endif /* notusedyet */
2666
2667/*
2668 * Execute the given function for every device in the EDT.
2669 */
2670static int
2671xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2672{
2673 struct xpt_traverse_config tr_config;
2674
2675 tr_config.depth = XPT_DEPTH_DEVICE;
2676 tr_config.tr_func = tr_func;
2677 tr_config.tr_arg = arg;
2678
2679 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2680}
2681
2682#ifdef notusedyet
2683/*
2684 * Execute the given function for every peripheral in the EDT.
2685 */
2686static int
2687xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2688{
2689 struct xpt_traverse_config tr_config;
2690
2691 tr_config.depth = XPT_DEPTH_PERIPH;
2692 tr_config.tr_func = tr_func;
2693 tr_config.tr_arg = arg;
2694
2695 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2696}
2697#endif /* notusedyet */
2698
2699static int
2700xptsetasyncfunc(struct cam_ed *device, void *arg)
2701{
2702 struct cam_path path;
2703 struct ccb_getdev cgd;
2704 struct async_node *cur_entry;
2705
2706 cur_entry = (struct async_node *)arg;
2707
2708 /*
2709 * Don't report unconfigured devices (Wildcard devs,
2710 * devices only for target mode, device instances
2711 * that have been invalidated but are waiting for
2712 * their last reference count to be released).
2713 */
2714 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2715 return (1);
2716
2717 xpt_compile_path(&path,
2718 NULL,
2719 device->target->bus->path_id,
2720 device->target->target_id,
2721 device->lun_id);
2722 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2723 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2724 xpt_action((union ccb *)&cgd);
2725 cur_entry->callback(cur_entry->callback_arg,
2726 AC_FOUND_DEVICE,
2727 &path, &cgd);
2728 xpt_release_path(&path);
2729
2730 return(1);
2731}
2732
2733static int
2734xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2735{
2736 struct cam_path path;
2737 struct ccb_pathinq cpi;
2738 struct async_node *cur_entry;
2739
2740 cur_entry = (struct async_node *)arg;
2741
2742 xpt_compile_path(&path, /*periph*/NULL,
2743 bus->sim->path_id,
2744 CAM_TARGET_WILDCARD,
2745 CAM_LUN_WILDCARD);
2746 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2747 cpi.ccb_h.func_code = XPT_PATH_INQ;
2748 xpt_action((union ccb *)&cpi);
2749 cur_entry->callback(cur_entry->callback_arg,
2750 AC_PATH_REGISTERED,
2751 &path, &cpi);
2752 xpt_release_path(&path);
2753
2754 return(1);
2755}
2756
2757void
2758xpt_action(union ccb *start_ccb)
2759{
2760 int iopl;
2761
2762 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2763
2764 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2765
2766 iopl = splsoftcam();
2767 switch (start_ccb->ccb_h.func_code) {
2768 case XPT_SCSI_IO:
2769 {
2770#ifdef CAMDEBUG
2771 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2772 struct cam_path *path;
2773
2774 path = start_ccb->ccb_h.path;
2775#endif
2776
2777 /*
2778 * For the sake of compatibility with SCSI-1
2779 * devices that may not understand the identify
2780 * message, we include lun information in the
2781 * second byte of all commands. SCSI-1 specifies
2782 * that luns are a 3 bit value and reserves only 3
2783 * bits for lun information in the CDB. Later
2784 * revisions of the SCSI spec allow for more than 8
2785 * luns, but have deprecated lun information in the
2786 * CDB. So, if the lun won't fit, we must omit.
2787 *
2788 * Also be aware that during initial probing for devices,
2789 * the inquiry information is unknown but initialized to 0.
2790 * This means that this code will be exercised while probing
2791 * devices with an ANSI revision greater than 2.
2792 */
2793 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2794 && start_ccb->ccb_h.target_lun < 8
2795 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2796
2797 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2798 start_ccb->ccb_h.target_lun << 5;
2799 }
2800 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2801 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2802 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2803 &path->device->inq_data),
2804 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2805 cdb_str, sizeof(cdb_str))));
2806 /* FALLTHROUGH */
2807 }
2808 case XPT_TARGET_IO:
2809 case XPT_CONT_TARGET_IO:
2810 start_ccb->csio.sense_resid = 0;
2811 start_ccb->csio.resid = 0;
2812 /* FALLTHROUGH */
2813 case XPT_RESET_DEV:
2814 case XPT_ENG_EXEC:
2815 {
2816 struct cam_path *path;
2817 int s;
2818 int runq;
2819
2820 path = start_ccb->ccb_h.path;
2821 s = splsoftcam();
2822
2823 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2824 if (path->device->qfrozen_cnt == 0)
2825 runq = xpt_schedule_dev_sendq(path->bus, path->device);
2826 else
2827 runq = 0;
2828 splx(s);
2829 if (runq != 0)
2830 xpt_run_dev_sendq(path->bus);
2831 break;
2832 }
2833 case XPT_SET_TRAN_SETTINGS:
2834 {
2835 xpt_set_transfer_settings(&start_ccb->cts,
2836 start_ccb->ccb_h.path->device,
2837 /*async_update*/FALSE);
2838 break;
2839 }
2840 case XPT_CALC_GEOMETRY:
2841 {
2842 struct cam_sim *sim;
2843
2844 /* Filter out garbage */
2845 if (start_ccb->ccg.block_size == 0
2846 || start_ccb->ccg.volume_size == 0) {
2847 start_ccb->ccg.cylinders = 0;
2848 start_ccb->ccg.heads = 0;
2849 start_ccb->ccg.secs_per_track = 0;
2850 start_ccb->ccb_h.status = CAM_REQ_CMP;
2851 break;
2852 }
984263bc
MD
2853 sim = start_ccb->ccb_h.path->bus->sim;
2854 (*(sim->sim_action))(sim, start_ccb);
2855 break;
2856 }
2857 case XPT_ABORT:
2858 {
2859 union ccb* abort_ccb;
2860 int s;
2861
2862 abort_ccb = start_ccb->cab.abort_ccb;
2863 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2864
2865 if (abort_ccb->ccb_h.pinfo.index >= 0) {
2866 struct cam_ccbq *ccbq;
2867
2868 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2869 cam_ccbq_remove_ccb(ccbq, abort_ccb);
2870 abort_ccb->ccb_h.status =
2871 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2872 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2873 s = splcam();
2874 xpt_done(abort_ccb);
2875 splx(s);
2876 start_ccb->ccb_h.status = CAM_REQ_CMP;
2877 break;
2878 }
2879 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2880 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2881 /*
2882 * We've caught this ccb en route to
2883 * the SIM. Flag it for abort and the
2884 * SIM will do so just before starting
2885 * real work on the CCB.
2886 */
2887 abort_ccb->ccb_h.status =
2888 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2889 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2890 start_ccb->ccb_h.status = CAM_REQ_CMP;
2891 break;
2892 }
2893 }
2894 if (XPT_FC_IS_QUEUED(abort_ccb)
2895 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2896 /*
2897 * It's already completed but waiting
2898 * for our SWI to get to it.
2899 */
2900 start_ccb->ccb_h.status = CAM_UA_ABORT;
2901 break;
2902 }
2903 /*
2904 * If we weren't able to take care of the abort request
2905 * in the XPT, pass the request down to the SIM for processing.
2906 */
2907 /* FALLTHROUGH */
2908 }
2909 case XPT_ACCEPT_TARGET_IO:
2910 case XPT_EN_LUN:
2911 case XPT_IMMED_NOTIFY:
2912 case XPT_NOTIFY_ACK:
2913 case XPT_GET_TRAN_SETTINGS:
2914 case XPT_RESET_BUS:
2915 {
2916 struct cam_sim *sim;
2917
2918 sim = start_ccb->ccb_h.path->bus->sim;
2919 (*(sim->sim_action))(sim, start_ccb);
2920 break;
2921 }
2922 case XPT_PATH_INQ:
2923 {
2924 struct cam_sim *sim;
2925
2926 sim = start_ccb->ccb_h.path->bus->sim;
2927 (*(sim->sim_action))(sim, start_ccb);
2928 break;
2929 }
2930 case XPT_PATH_STATS:
2931 start_ccb->cpis.last_reset =
2932 start_ccb->ccb_h.path->bus->last_reset;
2933 start_ccb->ccb_h.status = CAM_REQ_CMP;
2934 break;
2935 case XPT_GDEV_TYPE:
2936 {
2937 struct cam_ed *dev;
2938 int s;
2939
2940 dev = start_ccb->ccb_h.path->device;
2941 s = splcam();
2942 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2943 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2944 } else {
2945 struct ccb_getdev *cgd;
2946 struct cam_eb *bus;
2947 struct cam_et *tar;
2948
2949 cgd = &start_ccb->cgd;
2950 bus = cgd->ccb_h.path->bus;
2951 tar = cgd->ccb_h.path->target;
2952 cgd->inq_data = dev->inq_data;
2953 cgd->ccb_h.status = CAM_REQ_CMP;
2954 cgd->serial_num_len = dev->serial_num_len;
2955 if ((dev->serial_num_len > 0)
2956 && (dev->serial_num != NULL))
2957 bcopy(dev->serial_num, cgd->serial_num,
2958 dev->serial_num_len);
2959 }
2960 splx(s);
2961 break;
2962 }
2963 case XPT_GDEV_STATS:
2964 {
2965 struct cam_ed *dev;
2966 int s;
2967
2968 dev = start_ccb->ccb_h.path->device;
2969 s = splcam();
2970 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2971 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2972 } else {
2973 struct ccb_getdevstats *cgds;
2974 struct cam_eb *bus;
2975 struct cam_et *tar;
2976
2977 cgds = &start_ccb->cgds;
2978 bus = cgds->ccb_h.path->bus;
2979 tar = cgds->ccb_h.path->target;
2980 cgds->dev_openings = dev->ccbq.dev_openings;
2981 cgds->dev_active = dev->ccbq.dev_active;
2982 cgds->devq_openings = dev->ccbq.devq_openings;
2983 cgds->devq_queued = dev->ccbq.queue.entries;
2984 cgds->held = dev->ccbq.held;
2985 cgds->last_reset = tar->last_reset;
2986 cgds->maxtags = dev->quirk->maxtags;
2987 cgds->mintags = dev->quirk->mintags;
2988 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2989 cgds->last_reset = bus->last_reset;
2990 cgds->ccb_h.status = CAM_REQ_CMP;
2991 }
2992 splx(s);
2993 break;
2994 }
2995 case XPT_GDEVLIST:
2996 {
2997 struct cam_periph *nperiph;
2998 struct periph_list *periph_head;
2999 struct ccb_getdevlist *cgdl;
3000 int i;
3001 int s;
3002 struct cam_ed *device;
3003 int found;
3004
3005
3006 found = 0;
3007
3008 /*
3009 * Don't want anyone mucking with our data.
3010 */
3011 s = splcam();
3012 device = start_ccb->ccb_h.path->device;
3013 periph_head = &device->periphs;
3014 cgdl = &start_ccb->cgdl;
3015
3016 /*
3017 * Check and see if the list has changed since the user
3018 * last requested a list member. If so, tell them that the
3019 * list has changed, and therefore they need to start over
3020 * from the beginning.
3021 */
3022 if ((cgdl->index != 0) &&
3023 (cgdl->generation != device->generation)) {
3024 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3025 splx(s);
3026 break;
3027 }
3028
3029 /*
3030 * Traverse the list of peripherals and attempt to find
3031 * the requested peripheral.
3032 */
3033 for (nperiph = periph_head->slh_first, i = 0;
3034 (nperiph != NULL) && (i <= cgdl->index);
3035 nperiph = nperiph->periph_links.sle_next, i++) {
3036 if (i == cgdl->index) {
3037 strncpy(cgdl->periph_name,
3038 nperiph->periph_name,
3039 DEV_IDLEN);
3040 cgdl->unit_number = nperiph->unit_number;
3041 found = 1;
3042 }
3043 }
3044 if (found == 0) {
3045 cgdl->status = CAM_GDEVLIST_ERROR;
3046 splx(s);
3047 break;
3048 }
3049
3050 if (nperiph == NULL)
3051 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3052 else
3053 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3054
3055 cgdl->index++;
3056 cgdl->generation = device->generation;
3057
3058 splx(s);
3059 cgdl->ccb_h.status = CAM_REQ_CMP;
3060 break;
3061 }
3062 case XPT_DEV_MATCH:
3063 {
3064 int s;
3065 dev_pos_type position_type;
3066 struct ccb_dev_match *cdm;
3067 int ret;
3068
3069 cdm = &start_ccb->cdm;
3070
3071 /*
3072 * Prevent EDT changes while we traverse it.
3073 */
3074 s = splcam();
3075 /*
3076 * There are two ways of getting at information in the EDT.
3077 * The first way is via the primary EDT tree. It starts
3078 * with a list of busses, then a list of targets on a bus,
3079 * then devices/luns on a target, and then peripherals on a
3080 * device/lun. The "other" way is by the peripheral driver
3081 * lists. The peripheral driver lists are organized by
3082 * peripheral driver. (obviously) So it makes sense to
3083 * use the peripheral driver list if the user is looking
3084 * for something like "da1", or all "da" devices. If the
3085 * user is looking for something on a particular bus/target
3086 * or lun, it's generally better to go through the EDT tree.
3087 */
3088
3089 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3090 position_type = cdm->pos.position_type;
3091 else {
3092 int i;
3093
3094 position_type = CAM_DEV_POS_NONE;
3095
3096 for (i = 0; i < cdm->num_patterns; i++) {
3097 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3098 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3099 position_type = CAM_DEV_POS_EDT;
3100 break;
3101 }
3102 }
3103
3104 if (cdm->num_patterns == 0)
3105 position_type = CAM_DEV_POS_EDT;
3106 else if (position_type == CAM_DEV_POS_NONE)
3107 position_type = CAM_DEV_POS_PDRV;
3108 }
3109
3110 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3111 case CAM_DEV_POS_EDT:
3112 ret = xptedtmatch(cdm);
3113 break;
3114 case CAM_DEV_POS_PDRV:
3115 ret = xptperiphlistmatch(cdm);
3116 break;
3117 default:
3118 cdm->status = CAM_DEV_MATCH_ERROR;
3119 break;
3120 }
3121
3122 splx(s);
3123
3124 if (cdm->status == CAM_DEV_MATCH_ERROR)
3125 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3126 else
3127 start_ccb->ccb_h.status = CAM_REQ_CMP;
3128
3129 break;
3130 }
3131 case XPT_SASYNC_CB:
3132 {
3133 struct ccb_setasync *csa;
3134 struct async_node *cur_entry;
3135 struct async_list *async_head;
3136 u_int32_t added;
3137 int s;
3138
3139 csa = &start_ccb->csa;
3140 added = csa->event_enable;
3141 async_head = &csa->ccb_h.path->device->asyncs;
3142
3143 /*
3144 * If there is already an entry for us, simply
3145 * update it.
3146 */
3147 s = splcam();
3148 cur_entry = SLIST_FIRST(async_head);
3149 while (cur_entry != NULL) {
3150 if ((cur_entry->callback_arg == csa->callback_arg)
3151 && (cur_entry->callback == csa->callback))
3152 break;
3153 cur_entry = SLIST_NEXT(cur_entry, links);
3154 }
3155
3156 if (cur_entry != NULL) {
3157 /*
3158 * If the request has no flags set,
3159 * remove the entry.
3160 */
3161 added &= ~cur_entry->event_enable;
3162 if (csa->event_enable == 0) {
3163 SLIST_REMOVE(async_head, cur_entry,
3164 async_node, links);
3165 csa->ccb_h.path->device->refcount--;
3166 free(cur_entry, M_DEVBUF);
3167 } else {
3168 cur_entry->event_enable = csa->event_enable;
3169 }
3170 } else {
898d961b
MD
3171 cur_entry = malloc(sizeof(*cur_entry),
3172 M_DEVBUF, M_INTWAIT);
984263bc
MD
3173 cur_entry->event_enable = csa->event_enable;
3174 cur_entry->callback_arg = csa->callback_arg;
3175 cur_entry->callback = csa->callback;
3176 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3177 csa->ccb_h.path->device->refcount++;
3178 }
3179
3180 if ((added & AC_FOUND_DEVICE) != 0) {
3181 /*
3182 * Get this peripheral up to date with all
3183 * the currently existing devices.
3184 */
3185 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3186 }
3187 if ((added & AC_PATH_REGISTERED) != 0) {
3188 /*
3189 * Get this peripheral up to date with all
3190 * the currently existing busses.
3191 */
3192 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3193 }
3194 splx(s);
3195 start_ccb->ccb_h.status = CAM_REQ_CMP;
3196 break;
3197 }
3198 case XPT_REL_SIMQ:
3199 {
3200 struct ccb_relsim *crs;
3201 struct cam_ed *dev;
3202 int s;
3203
3204 crs = &start_ccb->crs;
3205 dev = crs->ccb_h.path->device;
3206 if (dev == NULL) {
3207
3208 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3209 break;
3210 }
3211
3212 s = splcam();
3213
3214 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3215
3216 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3217
3218 /* Don't ever go below one opening */
3219 if (crs->openings > 0) {
3220 xpt_dev_ccbq_resize(crs->ccb_h.path,
3221 crs->openings);
3222
3223 if (bootverbose) {
3224 xpt_print_path(crs->ccb_h.path);
3225 printf("tagged openings "
3226 "now %d\n",
3227 crs->openings);
3228 }
3229 }
3230 }
3231 }
3232
3233 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3234
3235 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3236
3237 /*
3238 * Just extend the old timeout and decrement
3239 * the freeze count so that a single timeout
3240 * is sufficient for releasing the queue.
3241 */
3242 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
eaa58895 3243 callout_stop(&dev->c_handle);
984263bc
MD
3244 } else {
3245
3246 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3247 }
3248
eaa58895
JS
3249 callout_reset(&dev->c_handle,
3250 (crs->release_timeout * hz) / 1000,
3251 xpt_release_devq_timeout, dev);
984263bc
MD
3252
3253 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3254
3255 }
3256
3257 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3258
3259 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3260 /*
3261 * Decrement the freeze count so that a single
3262 * completion is still sufficient to unfreeze
3263 * the queue.
3264 */
3265 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3266 } else {
3267
3268 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3269 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3270 }
3271 }
3272
3273 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3274
3275 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3276 || (dev->ccbq.dev_active == 0)) {
3277
3278 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3279 } else {
3280
3281 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3282 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3283 }
3284 }
3285 splx(s);
3286
3287 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3288
3289 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3290 /*run_queue*/TRUE);
3291 }
3292 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3293 start_ccb->ccb_h.status = CAM_REQ_CMP;
3294 break;
3295 }
3296 case XPT_SCAN_BUS:
3297 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3298 break;
3299 case XPT_SCAN_LUN:
3300 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3301 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3302 start_ccb);
3303 break;
3304 case XPT_DEBUG: {
3305#ifdef CAMDEBUG
3306 int s;
3307
3308 s = splcam();
3309#ifdef CAM_DEBUG_DELAY
3310 cam_debug_delay = CAM_DEBUG_DELAY;
3311#endif
3312 cam_dflags = start_ccb->cdbg.flags;
3313 if (cam_dpath != NULL) {
3314 xpt_free_path(cam_dpath);
3315 cam_dpath = NULL;
3316 }
3317
3318 if (cam_dflags != CAM_DEBUG_NONE) {
3319 if (xpt_create_path(&cam_dpath, xpt_periph,
3320 start_ccb->ccb_h.path_id,
3321 start_ccb->ccb_h.target_id,
3322 start_ccb->ccb_h.target_lun) !=
3323 CAM_REQ_CMP) {
3324 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3325 cam_dflags = CAM_DEBUG_NONE;
3326 } else {
3327 start_ccb->ccb_h.status = CAM_REQ_CMP;
3328 xpt_print_path(cam_dpath);
3329 printf("debugging flags now %x\n", cam_dflags);
3330 }
3331 } else {
3332 cam_dpath = NULL;
3333 start_ccb->ccb_h.status = CAM_REQ_CMP;
3334 }
3335 splx(s);
3336#else /* !CAMDEBUG */
3337 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3338#endif /* CAMDEBUG */
3339 break;
3340 }
3341 case XPT_NOOP:
3342 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3343 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3344 start_ccb->ccb_h.status = CAM_REQ_CMP;
3345 break;
3346 default:
3347 case XPT_SDEV_TYPE:
3348 case XPT_TERM_IO:
3349 case XPT_ENG_INQ:
3350 /* XXX Implement */
3351 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3352 break;
3353 }
3354 splx(iopl);
3355}
3356
3357void
3358xpt_polled_action(union ccb *start_ccb)
3359{
3360 int s;
3361 u_int32_t timeout;
3362 struct cam_sim *sim;
3363 struct cam_devq *devq;
3364 struct cam_ed *dev;
3365
3366 timeout = start_ccb->ccb_h.timeout;
3367 sim = start_ccb->ccb_h.path->bus->sim;
3368 devq = sim->devq;
3369 dev = start_ccb->ccb_h.path->device;
3370
3371 s = splcam();
3372
3373 /*
3374 * Steal an opening so that no other queued requests
3375 * can get it before us while we simulate interrupts.
3376 */
3377 dev->ccbq.devq_openings--;
3378 dev->ccbq.dev_openings--;
3379
3380 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3381 && (--timeout > 0)) {
3382 DELAY(1000);
3383 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3384 swi_camnet(NULL);
3385 swi_cambio(NULL);
984263bc
MD
3386 }
3387
3388 dev->ccbq.devq_openings++;
3389 dev->ccbq.dev_openings++;
3390
3391 if (timeout != 0) {
3392 xpt_action(start_ccb);
3393 while(--timeout > 0) {
3394 (*(sim->sim_poll))(sim);
ef0fdad1
MD
3395 swi_camnet(NULL);
3396 swi_cambio(NULL);
984263bc
MD
3397 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3398 != CAM_REQ_INPROG)
3399 break;
3400 DELAY(1000);
3401 }
3402 if (timeout == 0) {
3403 /*
3404 * XXX Is it worth adding a sim_timeout entry
3405 * point so we can attempt recovery? If
3406 * this is only used for dumps, I don't think
3407 * it is.
3408 */
3409 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3410 }
3411 } else {
3412 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3413 }
3414 splx(s);
3415}
3416
3417/*
3418 * Schedule a peripheral driver to receive a ccb when it's
3419 * target device has space for more transactions.
3420 */
3421void
3422xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3423{
3424 struct cam_ed *device;
3425 int s;
3426 int runq;
3427
3428 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3429 device = perph->path->device;
3430 s = splsoftcam();
3431 if (periph_is_queued(perph)) {
3432 /* Simply reorder based on new priority */
3433 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3434 (" change priority to %d\n", new_priority));
3435 if (new_priority < perph->pinfo.priority) {
3436 camq_change_priority(&device->drvq,
3437 perph->pinfo.index,
3438 new_priority);
3439 }
3440 runq = 0;
3441 } else {
3442 /* New entry on the queue */
3443 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3444 (" added periph to queue\n"));
3445 perph->pinfo.priority = new_priority;
3446 perph->pinfo.generation = ++device->drvq.generation;
3447 camq_insert(&device->drvq, &perph->pinfo);
3448 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3449 }
3450 splx(s);
3451 if (runq != 0) {
3452 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3453 (" calling xpt_run_devq\n"));
3454 xpt_run_dev_allocq(perph->path->bus);
3455 }
3456}
3457
3458
3459/*
3460 * Schedule a device to run on a given queue.
3461 * If the device was inserted as a new entry on the queue,
3462 * return 1 meaning the device queue should be run. If we
3463 * were already queued, implying someone else has already
3464 * started the queue, return 0 so the caller doesn't attempt
3465 * to run the queue. Must be run at either splsoftcam
3466 * (or splcam since that encompases splsoftcam).
3467 */
3468static int
3469xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3470 u_int32_t new_priority)
3471{
3472 int retval;
3473 u_int32_t old_priority;
3474
3475 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3476
3477 old_priority = pinfo->priority;
3478
3479 /*
3480 * Are we already queued?
3481 */
3482 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3483 /* Simply reorder based on new priority */
3484 if (new_priority < old_priority) {
3485 camq_change_priority(queue, pinfo->index,
3486 new_priority);
3487 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3488 ("changed priority to %d\n",
3489 new_priority));
3490 }
3491 retval = 0;
3492 } else {
3493 /* New entry on the queue */
3494 if (new_priority < old_priority)
3495 pinfo->priority = new_priority;
3496
3497 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3498 ("Inserting onto queue\n"));
3499 pinfo->generation = ++queue->generation;
3500 camq_insert(queue, pinfo);
3501 retval = 1;
3502 }
3503 return (retval);
3504}
3505
3506static void
3507xpt_run_dev_allocq(struct cam_eb *bus)
3508{
3509 struct cam_devq *devq;
3510 int s;
3511
3512 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3513 devq = bus->sim->devq;
3514
3515 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3516 (" qfrozen_cnt == 0x%x, entries == %d, "
3517 "openings == %d, active == %d\n",
3518 devq->alloc_queue.qfrozen_cnt,
3519 devq->alloc_queue.entries,
3520 devq->alloc_openings,
3521 devq->alloc_active));
3522
3523 s = splsoftcam();
3524 devq->alloc_queue.qfrozen_cnt++;
3525 while ((devq->alloc_queue.entries > 0)
3526 && (devq->alloc_openings > 0)
3527 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3528 struct cam_ed_qinfo *qinfo;
3529 struct cam_ed *device;
3530 union ccb *work_ccb;
3531 struct cam_periph *drv;
3532 struct camq *drvq;
3533
3534 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3535 CAMQ_HEAD);
3536 device = qinfo->device;
3537
3538 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3539 ("running device %p\n", device));
3540
3541 drvq = &device->drvq;
3542
3543#ifdef CAMDEBUG
3544 if (drvq->entries <= 0) {
3545 panic("xpt_run_dev_allocq: "
3546 "Device on queue without any work to do");
3547 }
3548#endif
3549 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3550 devq->alloc_openings--;
3551 devq->alloc_active++;
3552 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3553 splx(s);
3554 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3555 drv->pinfo.priority);
3556 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3557 ("calling periph start\n"));
3558 drv->periph_start(drv, work_ccb);
3559 } else {
3560 /*
3561 * Malloc failure in alloc_ccb
3562 */
3563 /*
3564 * XXX add us to a list to be run from free_ccb
3565 * if we don't have any ccbs active on this
3566 * device queue otherwise we may never get run
3567 * again.
3568 */
3569 break;
3570 }
3571
3572 /* Raise IPL for possible insertion and test at top of loop */
3573 s = splsoftcam();
3574
3575 if (drvq->entries > 0) {
3576 /* We have more work. Attempt to reschedule */
3577 xpt_schedule_dev_allocq(bus, device);
3578 }
3579 }
3580 devq->alloc_queue.qfrozen_cnt--;
3581 splx(s);
3582}
3583
3584static void
3585xpt_run_dev_sendq(struct cam_eb *bus)
3586{
3587 struct cam_devq *devq;
3588 int s;
3589
3590 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3591
3592 devq = bus->sim->devq;
3593
3594 s = splcam();
3595 devq->send_queue.qfrozen_cnt++;
3596 splx(s);
3597 s = splsoftcam();
3598 while ((devq->send_queue.entries > 0)
3599 && (devq->send_openings > 0)) {
3600 struct cam_ed_qinfo *qinfo;
3601 struct cam_ed *device;
3602 union ccb *work_ccb;
3603 struct cam_sim *sim;
3604 int ospl;
3605
3606 ospl = splcam();
3607 if (devq->send_queue.qfrozen_cnt > 1) {
3608 splx(ospl);
3609 break;
3610 }
3611
3612 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3613 CAMQ_HEAD);
3614 device = qinfo->device;
3615
3616 /*
3617 * If the device has been "frozen", don't attempt
3618 * to run it.
3619 */
3620 if (device->qfrozen_cnt > 0) {
3621 splx(ospl);
3622 continue;
3623 }
3624
3625 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3626 ("running device %p\n", device));
3627
3628 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3629 if (work_ccb == NULL) {
3630 printf("device on run queue with no ccbs???\n");
3631 splx(ospl);
3632 continue;
3633 }
3634
3635 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3636
3637 if (num_highpower <= 0) {
3638 /*
3639 * We got a high power command, but we
3640 * don't have any available slots. Freeze
3641 * the device queue until we have a slot
3642 * available.
3643 */
3644 device->qfrozen_cnt++;
3645 STAILQ_INSERT_TAIL(&highpowerq,
3646 &work_ccb->ccb_h,
3647 xpt_links.stqe);
3648
3649 splx(ospl);
3650 continue;
3651 } else {
3652 /*
3653 * Consume a high power slot while
3654 * this ccb runs.
3655 */
3656 num_highpower--;
3657 }
3658 }
3659 devq->active_dev = device;
3660 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3661
3662 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3663 splx(ospl);
3664
3665 devq->send_openings--;
3666 devq->send_active++;
3667
3668 if (device->ccbq.queue.entries > 0)
3669 xpt_schedule_dev_sendq(bus, device);
3670
3671 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3672 /*
3673 * The client wants to freeze the queue
3674 * after this CCB is sent.
3675 */
3676 ospl = splcam();
3677 device->qfrozen_cnt++;
3678 splx(ospl);
3679 }
3680
3681 splx(s);
3682
3683 /* In Target mode, the peripheral driver knows best... */
3684 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3685 if ((device->inq_flags & SID_CmdQue) != 0
3686 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3687 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3688 else
3689 /*
3690 * Clear this in case of a retried CCB that
3691 * failed due to a rejected tag.
3692 */
3693 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3694 }
3695
3696 /*
3697 * Device queues can be shared among multiple sim instances
3698 * that reside on different busses. Use the SIM in the queue
3699 * CCB's path, rather than the one in the bus that was passed
3700 * into this function.
3701 */
3702 sim = work_ccb->ccb_h.path->bus->sim;
3703 (*(sim->sim_action))(sim, work_ccb);
3704
3705 ospl = splcam();
3706 devq->active_dev = NULL;
3707 splx(ospl);
3708 /* Raise IPL for possible insertion and test at top of loop */
3709 s = splsoftcam();
3710 }
3711 splx(s);
3712 s = splcam();
3713 devq->send_queue.qfrozen_cnt--;
3714 splx(s);
3715}
3716
3717/*
3718 * This function merges stuff from the slave ccb into the master ccb, while
3719 * keeping important fields in the master ccb constant.
3720 */
3721void
3722xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3723{
3724 /*
3725 * Pull fields that are valid for peripheral drivers to set
3726 * into the master CCB along with the CCB "payload".
3727 */
3728 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3729 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3730 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3731 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3732 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3733 sizeof(union ccb) - sizeof(struct ccb_hdr));
3734}
3735
3736void
3737xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3738{
3739 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
5a8f60a3 3740 callout_init(&ccb_h->timeout_ch);
984263bc
MD
3741 ccb_h->pinfo.priority = priority;
3742 ccb_h->path = path;
3743 ccb_h->path_id = path->bus->path_id;
3744 if (path->target)
3745 ccb_h->target_id = path->target->target_id;
3746 else
3747 ccb_h->target_id = CAM_TARGET_WILDCARD;
3748 if (path->device) {
3749 ccb_h->target_lun = path->device->lun_id;
3750 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3751 } else {
3752 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3753 }
3754 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3755 ccb_h->flags = 0;
3756}
3757
3758/* Path manipulation functions */
3759cam_status
3760xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3761 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3762{
3763 struct cam_path *path;
3764 cam_status status;
3765
898d961b 3766 path = malloc(sizeof(*path), M_DEVBUF, M_INTWAIT);
984263bc
MD
3767 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3768 if (status != CAM_REQ_CMP) {
3769 free(path, M_DEVBUF);
3770 path = NULL;
3771 }
3772 *new_path_ptr = path;
3773 return (status);
3774}
3775
3776static cam_status
3777xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3778 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3779{
3780 struct cam_eb *bus;
3781 struct cam_et *target;
3782 struct cam_ed *device;
3783 cam_status status;
3784 int s;
3785
3786 status = CAM_REQ_CMP; /* Completed without error */
3787 target = NULL; /* Wildcarded */
3788 device = NULL; /* Wildcarded */
3789
3790 /*
3791 * We will potentially modify the EDT, so block interrupts
3792 * that may attempt to create cam paths.
3793 */
3794 s = splcam();
3795 bus = xpt_find_bus(path_id);
3796 if (bus == NULL) {
3797 status = CAM_PATH_INVALID;
3798 } else {
3799 target = xpt_find_target(bus, target_id);
3800 if (target == NULL) {
3801 /* Create one */
3802 struct cam_et *new_target;
3803
3804 new_target = xpt_alloc_target(bus, target_id);
3805 if (new_target == NULL) {
3806 status = CAM_RESRC_UNAVAIL;
3807 } else {
3808 target = new_target;
3809 }
3810 }
3811 if (target != NULL) {
3812 device = xpt_find_device(target, lun_id);
3813 if (device == NULL) {
3814 /* Create one */
3815 struct cam_ed *new_device;
3816
3817 new_device = xpt_alloc_device(bus,
3818 target,
3819 lun_id);
3820 if (new_device == NULL) {
3821 status = CAM_RESRC_UNAVAIL;
3822 } else {
3823 device = new_device;
3824 }
3825 }
3826 }
3827 }
3828 splx(s);
3829
3830 /*
3831 * Only touch the user's data if we are successful.
3832 */
3833 if (status == CAM_REQ_CMP) {
3834 new_path->periph = perph;
3835 new_path->bus = bus;
3836 new_path->target = target;
3837 new_path->device = device;
3838 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3839 } else {
3840 if (device != NULL)
3841 xpt_release_device(bus, target, device);
3842 if (target != NULL)
3843 xpt_release_target(bus, target);
3844 if (bus != NULL)
3845 xpt_release_bus(bus);
3846 }
3847 return (status);
3848}
3849
3850static void
3851xpt_release_path(struct cam_path *path)
3852{
3853 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3854 if (path->device != NULL) {
3855 xpt_release_device(path->bus, path->target, path->device);
3856 path->device = NULL;
3857 }
3858 if (path->target != NULL) {
3859 xpt_release_target(path->bus, path->target);
3860 path->target = NULL;
3861 }
3862 if (path->bus != NULL) {
3863 xpt_release_bus(path->bus);
3864 path->bus = NULL;
3865 }
3866}
3867
3868void
3869xpt_free_path(struct cam_path *path)
3870{
3871 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3872 xpt_release_path(path);
3873 free(path, M_DEVBUF);
3874}
3875
3876
3877/*
3878 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3879 * in path1, 2 for match with wildcards in path2.
3880 */
3881int
3882xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3883{
3884 int retval = 0;
3885
3886 if (path1->bus != path2->bus) {
3887 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3888 retval = 1;
3889 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3890 retval = 2;
3891 else
3892 return (-1);
3893 }
3894 if (path1->target != path2->target) {
3895 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3896 if (retval == 0)
3897 retval = 1;
3898 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3899 retval = 2;
3900 else
3901 return (-1);
3902 }
3903 if (path1->device != path2->device) {
3904 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3905 if (retval == 0)
3906 retval = 1;
3907 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3908 retval = 2;
3909 else
3910 return (-1);
3911 }
3912 return (retval);
3913}
3914
3915void
3916xpt_print_path(struct cam_path *path)
3917{
3918 if (path == NULL)
3919 printf("(nopath): ");
3920 else {
3921 if (path->periph != NULL)
3922 printf("(%s%d:", path->periph->periph_name,
3923 path->periph->unit_number);
3924 else
3925 printf("(noperiph:");
3926
3927 if (path->bus != NULL)
3928 printf("%s%d:%d:", path->bus->sim->sim_name,
3929 path->bus->sim->unit_number,
3930 path->bus->sim->bus_id);
3931 else
3932 printf("nobus:");
3933
3934 if (path->target != NULL)
3935 printf("%d:", path->target->target_id);
3936 else
3937 printf("X:");
3938
3939 if (path->device != NULL)
3940 printf("%d): ", path->device->lun_id);
3941 else
3942 printf("X): ");
3943 }
3944}
3945
3946path_id_t
3947xpt_path_path_id(struct cam_path *path)
3948{
3949 return(path->bus->path_id);
3950}
3951
3952target_id_t
3953xpt_path_target_id(struct cam_path *path)
3954{
3955 if (path->target != NULL)
3956 return (path->target->target_id);
3957 else
3958 return (CAM_TARGET_WILDCARD);
3959}
3960
3961lun_id_t
3962xpt_path_lun_id(struct cam_path *path)
3963{
3964 if (path->device != NULL)
3965 return (path->device->lun_id);
3966 else
3967 return (CAM_LUN_WILDCARD);
3968}
3969
3970struct cam_sim *
3971xpt_path_sim(struct cam_path *path)
3972{
3973 return (path->bus->sim);
3974}
3975
3976struct cam_periph*
3977xpt_path_periph(struct cam_path *path)
3978{
3979 return (path->periph);
3980}
3981
3982/*
3983 * Release a CAM control block for the caller. Remit the cost of the structure
3984 * to the device referenced by the path. If the this device had no 'credits'
3985 * and peripheral drivers have registered async callbacks for this notification
3986 * call them now.
3987 */
3988void
3989xpt_release_ccb(union ccb *free_ccb)
3990{
3991 int s;
3992 struct cam_path *path;
3993 struct cam_ed *device;
3994 struct cam_eb *bus;
3995
3996 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3997 path = free_ccb->ccb_h.path;
3998 device = path->device;
3999 bus = path->bus;
4000 s = splsoftcam();
4001 cam_ccbq_release_opening(&device->ccbq);
4002 if (xpt_ccb_count > xpt_max_ccbs) {
4003 xpt_free_ccb(free_ccb);
4004 xpt_ccb_count--;
4005 } else {
4006 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4007 }
4008 bus->sim->devq->alloc_openings++;
4009 bus->sim->devq->alloc_active--;
4010 /* XXX Turn this into an inline function - xpt_run_device?? */
4011 if ((device_is_alloc_queued(device) == 0)
4012 && (device->drvq.entries > 0)) {
4013 xpt_schedule_dev_allocq(bus, device);
4014 }
4015 splx(s);
4016 if (dev_allocq_is_runnable(bus->sim->devq))
4017 xpt_run_dev_allocq(bus);
4018}
4019
4020/* Functions accessed by SIM drivers */
4021
4022/*
4023 * A sim structure, listing the SIM entry points and instance
4024 * identification info is passed to xpt_bus_register to hook the SIM
4025 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4026 * for this new bus and places it in the array of busses and assigns
4027 * it a path_id. The path_id may be influenced by "hard wiring"
4028 * information specified by the user. Once interrupt services are
4029 * availible, the bus will be probed.
4030 */
4031int32_t
4032xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4033{
4034 struct cam_eb *new_bus;
4035 struct cam_eb *old_bus;
4036 struct ccb_pathinq cpi;
4037 int s;
4038
4039 sim->bus_id = bus;
898d961b 4040 new_bus = malloc(sizeof(*new_bus), M_DEVBUF, M_INTWAIT);
984263bc
MD
4041
4042 if (strcmp(sim->sim_name, "xpt") != 0) {
984263bc
MD
4043 sim->path_id =
4044 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4045 }
4046
4047 TAILQ_INIT(&new_bus->et_entries);
4048 new_bus->path_id = sim->path_id;
4049 new_bus->sim = sim;
3aed1355 4050 ++sim->refcount;
984263bc
MD
4051 timevalclear(&new_bus->last_reset);
4052 new_bus->flags = 0;
4053 new_bus->refcount = 1; /* Held until a bus_deregister event */
4054 new_bus->generation = 0;
4055 s = splcam();
4056 old_bus = TAILQ_FIRST(&xpt_busses);
4057 while (old_bus != NULL
4058 && old_bus->path_id < new_bus->path_id)
4059 old_bus = TAILQ_NEXT(old_bus, links);
4060 if (old_bus != NULL)
4061 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4062 else
4063 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4064 bus_generation++;
4065 splx(s);
4066
4067 /* Notify interested parties */
4068 if (sim->path_id != CAM_XPT_PATH_ID) {
4069 struct cam_path path;
4070