Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / bus / cam / cam_xpt.c
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/time.h>
37 #include <sys/conf.h>
38 #include <sys/fcntl.h>
39 #include <sys/md5.h>
40 #include <sys/devicestat.h>
41 #include <sys/interrupt.h>
42 #include <sys/bus.h>
43
44 #ifdef PC98
45 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
46 #endif
47
48 #include <machine/clock.h>
49 #include <machine/ipl.h>
50
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_debug.h>
59
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/scsi/scsi_pass.h>
63 #include "opt_cam.h"
64
65 /* Datastructures internal to the xpt layer */
66
67 /*
68  * Definition of an async handler callback block.  These are used to add
69  * SIMs and peripherals to the async callback lists.
70  */
71 struct async_node {
72         SLIST_ENTRY(async_node) links;
73         u_int32_t       event_enable;   /* Async Event enables */
74         void            (*callback)(void *arg, u_int32_t code,
75                                     struct cam_path *path, void *args);
76         void            *callback_arg;
77 };
78
79 SLIST_HEAD(async_list, async_node);
80 SLIST_HEAD(periph_list, cam_periph);
81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
82
83 /*
84  * This is the maximum number of high powered commands (e.g. start unit)
85  * that can be outstanding at a particular time.
86  */
87 #ifndef CAM_MAX_HIGHPOWER
88 #define CAM_MAX_HIGHPOWER  4
89 #endif
90
91 /* number of high powered commands that can go through right now */
92 static int num_highpower = CAM_MAX_HIGHPOWER;
93
94 /*
95  * Structure for queueing a device in a run queue.
96  * There is one run queue for allocating new ccbs,
97  * and another for sending ccbs to the controller.
98  */
99 struct cam_ed_qinfo {
100         cam_pinfo pinfo;
101         struct    cam_ed *device;
102 };
103
104 /*
105  * The CAM EDT (Existing Device Table) contains the device information for
106  * all devices for all busses in the system.  The table contains a
107  * cam_ed structure for each device on the bus.
108  */
109 struct cam_ed {
110         TAILQ_ENTRY(cam_ed) links;
111         struct  cam_ed_qinfo alloc_ccb_entry;
112         struct  cam_ed_qinfo send_ccb_entry;
113         struct  cam_et   *target;
114         lun_id_t         lun_id;
115         struct  camq drvq;              /*
116                                          * Queue of type drivers wanting to do
117                                          * work on this device.
118                                          */
119         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
120         struct  async_list asyncs;      /* Async callback info for this B/T/L */
121         struct  periph_list periphs;    /* All attached devices */
122         u_int   generation;             /* Generation number */
123         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
124         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
125                                         /* Storage for the inquiry data */
126         struct  scsi_inquiry_data inq_data;
127         u_int8_t         inq_flags;     /*
128                                          * Current settings for inquiry flags.
129                                          * This allows us to override settings
130                                          * like disconnection and tagged
131                                          * queuing for a device.
132                                          */
133         u_int8_t         queue_flags;   /* Queue flags from the control page */
134         u_int8_t         serial_num_len;
135         u_int8_t         *serial_num;
136         u_int32_t        qfrozen_cnt;
137         u_int32_t        flags;
138 #define CAM_DEV_UNCONFIGURED            0x01
139 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
140 #define CAM_DEV_REL_ON_COMPLETE         0x04
141 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
142 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
143 #define CAM_DEV_TAG_AFTER_COUNT         0x20
144 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
145         u_int32_t        tag_delay_count;
146 #define CAM_TAG_DELAY_COUNT             5
147         u_int32_t        refcount;
148         struct           callout_handle c_handle;
149 };
150
151 /*
152  * Each target is represented by an ET (Existing Target).  These
153  * entries are created when a target is successfully probed with an
154  * identify, and removed when a device fails to respond after a number
155  * of retries, or a bus rescan finds the device missing.
156  */
157 struct cam_et { 
158         TAILQ_HEAD(, cam_ed) ed_entries;
159         TAILQ_ENTRY(cam_et) links;
160         struct  cam_eb  *bus;   
161         target_id_t     target_id;
162         u_int32_t       refcount;       
163         u_int           generation;
164         struct          timeval last_reset;
165 };
166
167 /*
168  * Each bus is represented by an EB (Existing Bus).  These entries
169  * are created by calls to xpt_bus_register and deleted by calls to
170  * xpt_bus_deregister.
171  */
172 struct cam_eb { 
173         TAILQ_HEAD(, cam_et) et_entries;
174         TAILQ_ENTRY(cam_eb)  links;
175         path_id_t            path_id;
176         struct cam_sim       *sim;
177         struct timeval       last_reset;
178         u_int32_t            flags;
179 #define CAM_EB_RUNQ_SCHEDULED   0x01
180         u_int32_t            refcount;
181         u_int                generation;
182 };
183
184 struct cam_path {
185         struct cam_periph *periph;
186         struct cam_eb     *bus;
187         struct cam_et     *target;
188         struct cam_ed     *device;
189 };
190
191 struct xpt_quirk_entry {
192         struct scsi_inquiry_pattern inq_pat;
193         u_int8_t quirks;
194 #define CAM_QUIRK_NOLUNS        0x01
195 #define CAM_QUIRK_NOSERIAL      0x02
196 #define CAM_QUIRK_HILUNS        0x04
197         u_int mintags;
198         u_int maxtags;
199 };
200 #define CAM_SCSI2_MAXLUN        8
201
202 typedef enum {
203         XPT_FLAG_OPEN           = 0x01
204 } xpt_flags;
205
206 struct xpt_softc {
207         xpt_flags       flags;
208         u_int32_t       generation;
209 };
210
211 static const char quantum[] = "QUANTUM";
212 static const char sony[] = "SONY";
213 static const char west_digital[] = "WDIGTL";
214 static const char samsung[] = "SAMSUNG";
215 static const char seagate[] = "SEAGATE";
216 static const char microp[] = "MICROP";
217
218 static struct xpt_quirk_entry xpt_quirk_table[] = 
219 {
220         {
221                 /* Reports QUEUE FULL for temporary resource shortages */
222                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
223                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
224         },
225         {
226                 /* Reports QUEUE FULL for temporary resource shortages */
227                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
228                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
229         },
230         {
231                 /* Reports QUEUE FULL for temporary resource shortages */
232                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
233                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
234         },
235         {
236                 /* Broken tagged queuing drive */
237                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
238                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
239         },
240         {
241                 /* Broken tagged queuing drive */
242                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
243                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
244         },
245         {
246                 /* Broken tagged queuing drive */
247                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
248                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
249         },
250         {
251                 /*
252                  * Unfortunately, the Quantum Atlas III has the same
253                  * problem as the Atlas II drives above.
254                  * Reported by: "Johan Granlund" <johan@granlund.nu>
255                  *
256                  * For future reference, the drive with the problem was:
257                  * QUANTUM QM39100TD-SW N1B0
258                  * 
259                  * It's possible that Quantum will fix the problem in later
260                  * firmware revisions.  If that happens, the quirk entry
261                  * will need to be made specific to the firmware revisions
262                  * with the problem.
263                  * 
264                  */
265                 /* Reports QUEUE FULL for temporary resource shortages */
266                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
267                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
268         },
269         {
270                 /*
271                  * 18 Gig Atlas III, same problem as the 9G version.
272                  * Reported by: Andre Albsmeier
273                  *              <andre.albsmeier@mchp.siemens.de>
274                  *
275                  * For future reference, the drive with the problem was:
276                  * QUANTUM QM318000TD-S N491
277                  */
278                 /* Reports QUEUE FULL for temporary resource shortages */
279                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
280                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
281         },
282         {
283                 /*
284                  * Broken tagged queuing drive
285                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
286                  *         and: Martin Renters <martin@tdc.on.ca>
287                  */
288                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
289                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
290         },
291                 /*
292                  * The Seagate Medalist Pro drives have very poor write
293                  * performance with anything more than 2 tags.
294                  * 
295                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
296                  * Drive:  <SEAGATE ST36530N 1444>
297                  *
298                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
299                  * Drive:  <SEAGATE ST34520W 1281>
300                  *
301                  * No one has actually reported that the 9G version
302                  * (ST39140*) of the Medalist Pro has the same problem, but
303                  * we're assuming that it does because the 4G and 6.5G
304                  * versions of the drive are broken.
305                  */
306         {
307                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
308                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
309         },
310         {
311                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
312                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
313         },
314         {
315                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
316                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
317         },
318         {
319                 /*
320                  * Slow when tagged queueing is enabled.  Write performance
321                  * steadily drops off with more and more concurrent
322                  * transactions.  Best sequential write performance with
323                  * tagged queueing turned off and write caching turned on.
324                  *
325                  * PR:  kern/10398
326                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
327                  * Drive:  DCAS-34330 w/ "S65A" firmware.
328                  *
329                  * The drive with the problem had the "S65A" firmware
330                  * revision, and has also been reported (by Stephen J.
331                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
332                  * firmware revision.
333                  *
334                  * Although no one has reported problems with the 2 gig
335                  * version of the DCAS drive, the assumption is that it
336                  * has the same problems as the 4 gig version.  Therefore
337                  * this quirk entries disables tagged queueing for all
338                  * DCAS drives.
339                  */
340                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
341                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
342         },
343         {
344                 /* Broken tagged queuing drive */
345                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
346                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
347         },
348         {
349                 /* Broken tagged queuing drive */ 
350                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
351                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
352         },
353         {
354                 /*
355                  * Broken tagged queuing drive.
356                  * Submitted by:
357                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
358                  * in PR kern/9535
359                  */
360                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
361                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
362         },
363         {
364                 /*
365                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
366                  * 8MB/sec.)
367                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
368                  * Best performance with these drives is achieved with
369                  * tagged queueing turned off, and write caching turned on.
370                  */
371                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
372                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
373         },
374         {
375                 /*
376                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
377                  * 8MB/sec.)
378                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
379                  * Best performance with these drives is achieved with
380                  * tagged queueing turned off, and write caching turned on.
381                  */
382                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
383                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
384         },
385         {
386                 /*
387                  * Doesn't handle queue full condition correctly,
388                  * so we need to limit maxtags to what the device
389                  * can handle instead of determining this automatically.
390                  */
391                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
392                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
393         },
394         {
395                 /* Really only one LUN */
396                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
397                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
398         },
399         {
400                 /* I can't believe we need a quirk for DPT volumes. */
401                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
402                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
403                 /*mintags*/0, /*maxtags*/255
404         },
405         {
406                 /*
407                  * Many Sony CDROM drives don't like multi-LUN probing.
408                  */
409                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
410                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
411         },
412         {
413                 /*
414                  * This drive doesn't like multiple LUN probing.
415                  * Submitted by:  Parag Patel <parag@cgt.com>
416                  */
417                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
418                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
419         },
420         {
421                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
422                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
423         },
424         {
425                 /*
426                  * The 8200 doesn't like multi-lun probing, and probably
427                  * don't like serial number requests either.
428                  */
429                 {
430                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
431                         "EXB-8200*", "*"
432                 },
433                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
434         },
435         {
436                 /*
437                  * Let's try the same as above, but for a drive that says
438                  * it's an IPL-6860 but is actually an EXB 8200.
439                  */
440                 {
441                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
442                         "IPL-6860*", "*"
443                 },
444                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
445         },
446         {
447                 /*
448                  * These Hitachi drives don't like multi-lun probing.
449                  * The PR submitter has a DK319H, but says that the Linux
450                  * kernel has a similar work-around for the DK312 and DK314,
451                  * so all DK31* drives are quirked here.
452                  * PR:            misc/18793
453                  * Submitted by:  Paul Haddad <paul@pth.com>
454                  */
455                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
456                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
457         },
458         {
459                 /*
460                  * This old revision of the TDC3600 is also SCSI-1, and
461                  * hangs upon serial number probing.
462                  */
463                 {
464                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
465                         " TDC 3600", "U07:"
466                 },
467                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
468         },
469         {
470                 /*
471                  * Would repond to all LUNs if asked for.
472                  */
473                 {
474                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
475                         "CP150", "*"
476                 },
477                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
478         },
479         {
480                 /*
481                  * Would repond to all LUNs if asked for.
482                  */
483                 {
484                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
485                         "96X2*", "*"
486                 },
487                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488         },
489         {
490                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
491                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
492                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
493         },
494         {
495                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
496                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
497                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
498         },
499         {
500                 /* TeraSolutions special settings for TRC-22 RAID */
501                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
502                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
503         },
504         {
505                 /* Veritas Storage Appliance */
506                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
507                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
508         },
509         {
510                 /*
511                  * Would respond to all LUNs.  Device type and removable
512                  * flag are jumper-selectable.
513                  */
514                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
515                   "Tahiti 1", "*"
516                 },
517                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
518         },
519         {
520                 /* Default tagged queuing parameters for all devices */
521                 {
522                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
523                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
524                 },
525                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
526         },
527 };
528
529 static const int xpt_quirk_table_size =
530         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
531
532 typedef enum {
533         DM_RET_COPY             = 0x01,
534         DM_RET_FLAG_MASK        = 0x0f,
535         DM_RET_NONE             = 0x00,
536         DM_RET_STOP             = 0x10,
537         DM_RET_DESCEND          = 0x20,
538         DM_RET_ERROR            = 0x30,
539         DM_RET_ACTION_MASK      = 0xf0
540 } dev_match_ret;
541
542 typedef enum {
543         XPT_DEPTH_BUS,
544         XPT_DEPTH_TARGET,
545         XPT_DEPTH_DEVICE,
546         XPT_DEPTH_PERIPH
547 } xpt_traverse_depth;
548
549 struct xpt_traverse_config {
550         xpt_traverse_depth      depth;
551         void                    *tr_func;
552         void                    *tr_arg;
553 };
554
555 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
556 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
557 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
558 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
559 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
560
561 /* Transport layer configuration information */
562 static struct xpt_softc xsoftc;
563
564 /* Queues for our software interrupt handler */
565 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
566 static cam_isrq_t cam_bioq;
567 static cam_isrq_t cam_netq;
568
569 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
570 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
571 static u_int xpt_max_ccbs;      /*
572                                  * Maximum size of ccb pool.  Modified as
573                                  * devices are added/removed or have their
574                                  * opening counts changed.
575                                  */
576 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
577
578 struct cam_periph *xpt_periph;
579
580 static periph_init_t xpt_periph_init;
581
582 static periph_init_t probe_periph_init;
583
584 static struct periph_driver xpt_driver =
585 {
586         xpt_periph_init, "xpt",
587         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
588 };
589
590 static struct periph_driver probe_driver =
591 {
592         probe_periph_init, "probe",
593         TAILQ_HEAD_INITIALIZER(probe_driver.units)
594 };
595
596 DATA_SET(periphdriver_set, xpt_driver);
597 DATA_SET(periphdriver_set, probe_driver);
598
599 #define XPT_CDEV_MAJOR 104
600
601 static d_open_t xptopen;
602 static d_close_t xptclose;
603 static d_ioctl_t xptioctl;
604
605 static struct cdevsw xpt_cdevsw = {
606         /* open */      xptopen,
607         /* close */     xptclose,
608         /* read */      noread,
609         /* write */     nowrite,
610         /* ioctl */     xptioctl,
611         /* poll */      nopoll,
612         /* mmap */      nommap,
613         /* strategy */  nostrategy,
614         /* name */      "xpt",
615         /* maj */       XPT_CDEV_MAJOR,
616         /* dump */      nodump,
617         /* psize */     nopsize,
618         /* flags */     0,
619         /* bmaj */      -1
620 };
621
622 static struct intr_config_hook *xpt_config_hook;
623
624 /* Registered busses */
625 static TAILQ_HEAD(,cam_eb) xpt_busses;
626 static u_int bus_generation;
627
628 /* Storage for debugging datastructures */
629 #ifdef  CAMDEBUG
630 struct cam_path *cam_dpath;
631 u_int32_t cam_dflags;
632 u_int32_t cam_debug_delay;
633 #endif
634
635 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
636 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
637 #endif
638
639 /*
640  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
641  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
642  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
643  */
644 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
645     || defined(CAM_DEBUG_LUN)
646 #ifdef CAMDEBUG
647 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
648     || !defined(CAM_DEBUG_LUN)
649 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
650         and CAM_DEBUG_LUN"
651 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
652 #else /* !CAMDEBUG */
653 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
654 #endif /* CAMDEBUG */
655 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
656
657 /* Our boot-time initialization hook */
658 static void     xpt_init(void *);
659 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
660
661 static cam_status       xpt_compile_path(struct cam_path *new_path,
662                                          struct cam_periph *perph,
663                                          path_id_t path_id,
664                                          target_id_t target_id,
665                                          lun_id_t lun_id);
666
667 static void             xpt_release_path(struct cam_path *path);
668
669 static void             xpt_async_bcast(struct async_list *async_head,
670                                         u_int32_t async_code,
671                                         struct cam_path *path,
672                                         void *async_arg);
673 static void             xpt_dev_async(u_int32_t async_code,
674                                       struct cam_eb *bus,
675                                       struct cam_et *target,
676                                       struct cam_ed *device,
677                                       void *async_arg);
678 static path_id_t xptnextfreepathid(void);
679 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
680 static union ccb *xpt_get_ccb(struct cam_ed *device);
681 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
682                                   u_int32_t new_priority);
683 static void      xpt_run_dev_allocq(struct cam_eb *bus);
684 static void      xpt_run_dev_sendq(struct cam_eb *bus);
685 static timeout_t xpt_release_devq_timeout;
686 static timeout_t xpt_release_simq_timeout;
687 static void      xpt_release_bus(struct cam_eb *bus);
688 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
689                                          int run_queue);
690 static struct cam_et*
691                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
692 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
693 static struct cam_ed*
694                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
695                                   lun_id_t lun_id);
696 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
697                                     struct cam_ed *device);
698 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
699 static struct cam_eb*
700                  xpt_find_bus(path_id_t path_id);
701 static struct cam_et*
702                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
703 static struct cam_ed*
704                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
705 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
706 static void      xpt_scan_lun(struct cam_periph *periph,
707                               struct cam_path *path, cam_flags flags,
708                               union ccb *ccb);
709 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
710 static xpt_busfunc_t    xptconfigbuscountfunc;
711 static xpt_busfunc_t    xptconfigfunc;
712 static void      xpt_config(void *arg);
713 static xpt_devicefunc_t xptpassannouncefunc;
714 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
715 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
716 static void      xptpoll(struct cam_sim *sim);
717 static swihand_t swi_camnet;
718 static swihand_t swi_cambio;
719 static void      camisr(cam_isrq_t *queue);
720 #if 0
721 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
722 static void      xptasync(struct cam_periph *periph,
723                           u_int32_t code, cam_path *path);
724 #endif
725 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
726                                     int num_patterns, struct cam_eb *bus);
727 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
728                                        int num_patterns, struct cam_ed *device);
729 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
730                                        int num_patterns,
731                                        struct cam_periph *periph);
732 static xpt_busfunc_t    xptedtbusfunc;
733 static xpt_targetfunc_t xptedttargetfunc;
734 static xpt_devicefunc_t xptedtdevicefunc;
735 static xpt_periphfunc_t xptedtperiphfunc;
736 static xpt_pdrvfunc_t   xptplistpdrvfunc;
737 static xpt_periphfunc_t xptplistperiphfunc;
738 static int              xptedtmatch(struct ccb_dev_match *cdm);
739 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
740 static int              xptbustraverse(struct cam_eb *start_bus,
741                                        xpt_busfunc_t *tr_func, void *arg);
742 static int              xpttargettraverse(struct cam_eb *bus,
743                                           struct cam_et *start_target,
744                                           xpt_targetfunc_t *tr_func, void *arg);
745 static int              xptdevicetraverse(struct cam_et *target,
746                                           struct cam_ed *start_device,
747                                           xpt_devicefunc_t *tr_func, void *arg);
748 static int              xptperiphtraverse(struct cam_ed *device,
749                                           struct cam_periph *start_periph,
750                                           xpt_periphfunc_t *tr_func, void *arg);
751 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
752                                         xpt_pdrvfunc_t *tr_func, void *arg);
753 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
754                                             struct cam_periph *start_periph,
755                                             xpt_periphfunc_t *tr_func,
756                                             void *arg);
757 static xpt_busfunc_t    xptdefbusfunc;
758 static xpt_targetfunc_t xptdeftargetfunc;
759 static xpt_devicefunc_t xptdefdevicefunc;
760 static xpt_periphfunc_t xptdefperiphfunc;
761 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
762 #ifdef notusedyet
763 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
764                                             void *arg);
765 #endif
766 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
767                                             void *arg);
768 #ifdef notusedyet
769 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
770                                             void *arg);
771 #endif
772 static xpt_devicefunc_t xptsetasyncfunc;
773 static xpt_busfunc_t    xptsetasyncbusfunc;
774 static cam_status       xptregister(struct cam_periph *periph,
775                                     void *arg);
776 static cam_status       proberegister(struct cam_periph *periph,
777                                       void *arg);
778 static void      probeschedule(struct cam_periph *probe_periph);
779 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
780 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
781 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
782 static void      probecleanup(struct cam_periph *periph);
783 static void      xpt_find_quirk(struct cam_ed *device);
784 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
785                                            struct cam_ed *device,
786                                            int async_update);
787 static void      xpt_toggle_tags(struct cam_path *path);
788 static void      xpt_start_tags(struct cam_path *path);
789 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
790                                             struct cam_ed *dev);
791 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
792                                            struct cam_ed *dev);
793 static __inline int periph_is_queued(struct cam_periph *periph);
794 static __inline int device_is_alloc_queued(struct cam_ed *device);
795 static __inline int device_is_send_queued(struct cam_ed *device);
796 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
797
798 static __inline int
799 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
800 {
801         int retval;
802
803         if (dev->ccbq.devq_openings > 0) {
804                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
805                         cam_ccbq_resize(&dev->ccbq,
806                                         dev->ccbq.dev_openings
807                                         + dev->ccbq.dev_active);
808                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
809                 }
810                 /*
811                  * The priority of a device waiting for CCB resources
812                  * is that of the the highest priority peripheral driver
813                  * enqueued.
814                  */
815                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
816                                           &dev->alloc_ccb_entry.pinfo,
817                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
818         } else {
819                 retval = 0;
820         }
821
822         return (retval);
823 }
824
825 static __inline int
826 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
827 {
828         int     retval;
829
830         if (dev->ccbq.dev_openings > 0) {
831                 /*
832                  * The priority of a device waiting for controller
833                  * resources is that of the the highest priority CCB
834                  * enqueued.
835                  */
836                 retval =
837                     xpt_schedule_dev(&bus->sim->devq->send_queue,
838                                      &dev->send_ccb_entry.pinfo,
839                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
840         } else {
841                 retval = 0;
842         }
843         return (retval);
844 }
845
846 static __inline int
847 periph_is_queued(struct cam_periph *periph)
848 {
849         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
850 }
851
852 static __inline int
853 device_is_alloc_queued(struct cam_ed *device)
854 {
855         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
856 }
857
858 static __inline int
859 device_is_send_queued(struct cam_ed *device)
860 {
861         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
862 }
863
864 static __inline int
865 dev_allocq_is_runnable(struct cam_devq *devq)
866 {
867         /*
868          * Have work to do.
869          * Have space to do more work.
870          * Allowed to do work.
871          */
872         return ((devq->alloc_queue.qfrozen_cnt == 0)
873              && (devq->alloc_queue.entries > 0)
874              && (devq->alloc_openings > 0));
875 }
876
877 static void
878 xpt_periph_init()
879 {
880         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
881 }
882
883 static void
884 probe_periph_init()
885 {
886 }
887
888
889 static void
890 xptdone(struct cam_periph *periph, union ccb *done_ccb)
891 {
892         /* Caller will release the CCB */
893         wakeup(&done_ccb->ccb_h.cbfcnp);
894 }
895
896 static int
897 xptopen(dev_t dev, int flags, int fmt, struct proc *p)
898 {
899         int unit;
900
901         unit = minor(dev) & 0xff;
902
903         /*
904          * Only allow read-write access.
905          */
906         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
907                 return(EPERM);
908
909         /*
910          * We don't allow nonblocking access.
911          */
912         if ((flags & O_NONBLOCK) != 0) {
913                 printf("xpt%d: can't do nonblocking access\n", unit);
914                 return(ENODEV);
915         }
916
917         /*
918          * We only have one transport layer right now.  If someone accesses
919          * us via something other than minor number 1, point out their
920          * mistake.
921          */
922         if (unit != 0) {
923                 printf("xptopen: got invalid xpt unit %d\n", unit);
924                 return(ENXIO);
925         }
926
927         /* Mark ourselves open */
928         xsoftc.flags |= XPT_FLAG_OPEN;
929         
930         return(0);
931 }
932
933 static int
934 xptclose(dev_t dev, int flag, int fmt, struct proc *p)
935 {
936         int unit;
937
938         unit = minor(dev) & 0xff;
939
940         /*
941          * We only have one transport layer right now.  If someone accesses
942          * us via something other than minor number 1, point out their
943          * mistake.
944          */
945         if (unit != 0) {
946                 printf("xptclose: got invalid xpt unit %d\n", unit);
947                 return(ENXIO);
948         }
949
950         /* Mark ourselves closed */
951         xsoftc.flags &= ~XPT_FLAG_OPEN;
952
953         return(0);
954 }
955
956 static int
957 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
958 {
959         int unit, error;
960
961         error = 0;
962         unit = minor(dev) & 0xff;
963
964         /*
965          * We only have one transport layer right now.  If someone accesses
966          * us via something other than minor number 1, point out their
967          * mistake.
968          */
969         if (unit != 0) {
970                 printf("xptioctl: got invalid xpt unit %d\n", unit);
971                 return(ENXIO);
972         }
973
974         switch(cmd) {
975         /*
976          * For the transport layer CAMIOCOMMAND ioctl, we really only want
977          * to accept CCB types that don't quite make sense to send through a
978          * passthrough driver.
979          */
980         case CAMIOCOMMAND: {
981                 union ccb *ccb;
982                 union ccb *inccb;
983
984                 inccb = (union ccb *)addr;
985
986                 switch(inccb->ccb_h.func_code) {
987                 case XPT_SCAN_BUS:
988                 case XPT_RESET_BUS:
989                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
990                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
991                                 error = EINVAL;
992                                 break;
993                         }
994                         /* FALLTHROUGH */
995                 case XPT_PATH_INQ:
996                 case XPT_ENG_INQ:
997                 case XPT_SCAN_LUN:
998
999                         ccb = xpt_alloc_ccb();
1000
1001                         /*
1002                          * Create a path using the bus, target, and lun the
1003                          * user passed in.
1004                          */
1005                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1006                                             inccb->ccb_h.path_id,
1007                                             inccb->ccb_h.target_id,
1008                                             inccb->ccb_h.target_lun) !=
1009                                             CAM_REQ_CMP){
1010                                 error = EINVAL;
1011                                 xpt_free_ccb(ccb);
1012                                 break;
1013                         }
1014                         /* Ensure all of our fields are correct */
1015                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1016                                       inccb->ccb_h.pinfo.priority);
1017                         xpt_merge_ccb(ccb, inccb);
1018                         ccb->ccb_h.cbfcnp = xptdone;
1019                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1020                         bcopy(ccb, inccb, sizeof(union ccb));
1021                         xpt_free_path(ccb->ccb_h.path);
1022                         xpt_free_ccb(ccb);
1023                         break;
1024
1025                 case XPT_DEBUG: {
1026                         union ccb ccb;
1027
1028                         /*
1029                          * This is an immediate CCB, so it's okay to
1030                          * allocate it on the stack.
1031                          */
1032
1033                         /*
1034                          * Create a path using the bus, target, and lun the
1035                          * user passed in.
1036                          */
1037                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1038                                             inccb->ccb_h.path_id,
1039                                             inccb->ccb_h.target_id,
1040                                             inccb->ccb_h.target_lun) !=
1041                                             CAM_REQ_CMP){
1042                                 error = EINVAL;
1043                                 break;
1044                         }
1045                         /* Ensure all of our fields are correct */
1046                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1047                                       inccb->ccb_h.pinfo.priority);
1048                         xpt_merge_ccb(&ccb, inccb);
1049                         ccb.ccb_h.cbfcnp = xptdone;
1050                         xpt_action(&ccb);
1051                         bcopy(&ccb, inccb, sizeof(union ccb));
1052                         xpt_free_path(ccb.ccb_h.path);
1053                         break;
1054
1055                 }
1056                 case XPT_DEV_MATCH: {
1057                         struct cam_periph_map_info mapinfo;
1058                         struct cam_path *old_path;
1059
1060                         /*
1061                          * We can't deal with physical addresses for this
1062                          * type of transaction.
1063                          */
1064                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1065                                 error = EINVAL;
1066                                 break;
1067                         }
1068
1069                         /*
1070                          * Save this in case the caller had it set to
1071                          * something in particular.
1072                          */
1073                         old_path = inccb->ccb_h.path;
1074
1075                         /*
1076                          * We really don't need a path for the matching
1077                          * code.  The path is needed because of the
1078                          * debugging statements in xpt_action().  They
1079                          * assume that the CCB has a valid path.
1080                          */
1081                         inccb->ccb_h.path = xpt_periph->path;
1082
1083                         bzero(&mapinfo, sizeof(mapinfo));
1084
1085                         /*
1086                          * Map the pattern and match buffers into kernel
1087                          * virtual address space.
1088                          */
1089                         error = cam_periph_mapmem(inccb, &mapinfo);
1090
1091                         if (error) {
1092                                 inccb->ccb_h.path = old_path;
1093                                 break;
1094                         }
1095
1096                         /*
1097                          * This is an immediate CCB, we can send it on directly.
1098                          */
1099                         xpt_action(inccb);
1100
1101                         /*
1102                          * Map the buffers back into user space.
1103                          */
1104                         cam_periph_unmapmem(inccb, &mapinfo);
1105
1106                         inccb->ccb_h.path = old_path;
1107
1108                         error = 0;
1109                         break;
1110                 }
1111                 default:
1112                         error = ENOTSUP;
1113                         break;
1114                 }
1115                 break;
1116         }
1117         /*
1118          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1119          * with the periphal driver name and unit name filled in.  The other
1120          * fields don't really matter as input.  The passthrough driver name
1121          * ("pass"), and unit number are passed back in the ccb.  The current
1122          * device generation number, and the index into the device peripheral
1123          * driver list, and the status are also passed back.  Note that
1124          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1125          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1126          * (or rather should be) impossible for the device peripheral driver
1127          * list to change since we look at the whole thing in one pass, and
1128          * we do it with splcam protection.
1129          * 
1130          */
1131         case CAMGETPASSTHRU: {
1132                 union ccb *ccb;
1133                 struct cam_periph *periph;
1134                 struct periph_driver **p_drv;
1135                 char   *name;
1136                 int unit;
1137                 int cur_generation;
1138                 int base_periph_found;
1139                 int splbreaknum;
1140                 int s;
1141
1142                 ccb = (union ccb *)addr;
1143                 unit = ccb->cgdl.unit_number;
1144                 name = ccb->cgdl.periph_name;
1145                 /*
1146                  * Every 100 devices, we want to drop our spl protection to
1147                  * give the software interrupt handler a chance to run.
1148                  * Most systems won't run into this check, but this should
1149                  * avoid starvation in the software interrupt handler in
1150                  * large systems.
1151                  */
1152                 splbreaknum = 100;
1153
1154                 ccb = (union ccb *)addr;
1155
1156                 base_periph_found = 0;
1157
1158                 /*
1159                  * Sanity check -- make sure we don't get a null peripheral
1160                  * driver name.
1161                  */
1162                 if (*ccb->cgdl.periph_name == '\0') {
1163                         error = EINVAL;
1164                         break;
1165                 }
1166
1167                 /* Keep the list from changing while we traverse it */
1168                 s = splcam();
1169 ptstartover:
1170                 cur_generation = xsoftc.generation;
1171
1172                 /* first find our driver in the list of drivers */
1173                 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
1174                      *p_drv != NULL; p_drv++)
1175                         if (strcmp((*p_drv)->driver_name, name) == 0)
1176                                 break;
1177
1178                 if (*p_drv == NULL) {
1179                         splx(s);
1180                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1181                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1182                         *ccb->cgdl.periph_name = '\0';
1183                         ccb->cgdl.unit_number = 0;
1184                         error = ENOENT;
1185                         break;
1186                 }       
1187
1188                 /*
1189                  * Run through every peripheral instance of this driver
1190                  * and check to see whether it matches the unit passed
1191                  * in by the user.  If it does, get out of the loops and
1192                  * find the passthrough driver associated with that
1193                  * peripheral driver.
1194                  */
1195                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1196                      periph = TAILQ_NEXT(periph, unit_links)) {
1197
1198                         if (periph->unit_number == unit) {
1199                                 break;
1200                         } else if (--splbreaknum == 0) {
1201                                 splx(s);
1202                                 s = splcam();
1203                                 splbreaknum = 100;
1204                                 if (cur_generation != xsoftc.generation)
1205                                        goto ptstartover;
1206                         }
1207                 }
1208                 /*
1209                  * If we found the peripheral driver that the user passed
1210                  * in, go through all of the peripheral drivers for that
1211                  * particular device and look for a passthrough driver.
1212                  */
1213                 if (periph != NULL) {
1214                         struct cam_ed *device;
1215                         int i;
1216
1217                         base_periph_found = 1;
1218                         device = periph->path->device;
1219                         for (i = 0, periph = device->periphs.slh_first;
1220                              periph != NULL;
1221                              periph = periph->periph_links.sle_next, i++) {
1222                                 /*
1223                                  * Check to see whether we have a
1224                                  * passthrough device or not. 
1225                                  */
1226                                 if (strcmp(periph->periph_name, "pass") == 0) {
1227                                         /*
1228                                          * Fill in the getdevlist fields.
1229                                          */
1230                                         strcpy(ccb->cgdl.periph_name,
1231                                                periph->periph_name);
1232                                         ccb->cgdl.unit_number =
1233                                                 periph->unit_number;
1234                                         if (periph->periph_links.sle_next)
1235                                                 ccb->cgdl.status =
1236                                                         CAM_GDEVLIST_MORE_DEVS;
1237                                         else
1238                                                 ccb->cgdl.status =
1239                                                        CAM_GDEVLIST_LAST_DEVICE;
1240                                         ccb->cgdl.generation =
1241                                                 device->generation;
1242                                         ccb->cgdl.index = i;
1243                                         /*
1244                                          * Fill in some CCB header fields
1245                                          * that the user may want.
1246                                          */
1247                                         ccb->ccb_h.path_id =
1248                                                 periph->path->bus->path_id;
1249                                         ccb->ccb_h.target_id =
1250                                                 periph->path->target->target_id;
1251                                         ccb->ccb_h.target_lun =
1252                                                 periph->path->device->lun_id;
1253                                         ccb->ccb_h.status = CAM_REQ_CMP;
1254                                         break;
1255                                 }
1256                         }
1257                 }
1258
1259                 /*
1260                  * If the periph is null here, one of two things has
1261                  * happened.  The first possibility is that we couldn't
1262                  * find the unit number of the particular peripheral driver
1263                  * that the user is asking about.  e.g. the user asks for
1264                  * the passthrough driver for "da11".  We find the list of
1265                  * "da" peripherals all right, but there is no unit 11.
1266                  * The other possibility is that we went through the list
1267                  * of peripheral drivers attached to the device structure,
1268                  * but didn't find one with the name "pass".  Either way,
1269                  * we return ENOENT, since we couldn't find something.
1270                  */
1271                 if (periph == NULL) {
1272                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1273                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1274                         *ccb->cgdl.periph_name = '\0';
1275                         ccb->cgdl.unit_number = 0;
1276                         error = ENOENT;
1277                         /*
1278                          * It is unfortunate that this is even necessary,
1279                          * but there are many, many clueless users out there.
1280                          * If this is true, the user is looking for the
1281                          * passthrough driver, but doesn't have one in his
1282                          * kernel.
1283                          */
1284                         if (base_periph_found == 1) {
1285                                 printf("xptioctl: pass driver is not in the "
1286                                        "kernel\n");
1287                                 printf("xptioctl: put \"device pass0\" in "
1288                                        "your kernel config file\n");
1289                         }
1290                 }
1291                 splx(s);
1292                 break;
1293                 }
1294         default:
1295                 error = ENOTTY;
1296                 break;
1297         }
1298
1299         return(error);
1300 }
1301
1302 /* Functions accessed by the peripheral drivers */
1303 static void
1304 xpt_init(dummy)
1305         void *dummy;
1306 {
1307         struct cam_sim *xpt_sim;
1308         struct cam_path *path;
1309         struct cam_devq *devq;
1310         cam_status status;
1311
1312         TAILQ_INIT(&xpt_busses);
1313         TAILQ_INIT(&cam_bioq);
1314         TAILQ_INIT(&cam_netq);
1315         SLIST_INIT(&ccb_freeq);
1316         STAILQ_INIT(&highpowerq);
1317
1318         /*
1319          * The xpt layer is, itself, the equivelent of a SIM.
1320          * Allow 16 ccbs in the ccb pool for it.  This should
1321          * give decent parallelism when we probe busses and
1322          * perform other XPT functions.
1323          */
1324         devq = cam_simq_alloc(16);
1325         xpt_sim = cam_sim_alloc(xptaction,
1326                                 xptpoll,
1327                                 "xpt",
1328                                 /*softc*/NULL,
1329                                 /*unit*/0,
1330                                 /*max_dev_transactions*/0,
1331                                 /*max_tagged_dev_transactions*/0,
1332                                 devq);
1333         xpt_max_ccbs = 16;
1334                                 
1335         xpt_bus_register(xpt_sim, /*bus #*/0);
1336
1337         /*
1338          * Looking at the XPT from the SIM layer, the XPT is
1339          * the equivelent of a peripheral driver.  Allocate
1340          * a peripheral driver entry for us.
1341          */
1342         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1343                                       CAM_TARGET_WILDCARD,
1344                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1345                 printf("xpt_init: xpt_create_path failed with status %#x,"
1346                        " failing attach\n", status);
1347                 return;
1348         }
1349
1350         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1351                          path, NULL, 0, NULL);
1352         xpt_free_path(path);
1353
1354         xpt_sim->softc = xpt_periph;
1355
1356         /*
1357          * Register a callback for when interrupts are enabled.
1358          */
1359         xpt_config_hook =
1360             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1361                                               M_TEMP, M_NOWAIT | M_ZERO);
1362         if (xpt_config_hook == NULL) {
1363                 printf("xpt_init: Cannot malloc config hook "
1364                        "- failing attach\n");
1365                 return;
1366         }
1367
1368         xpt_config_hook->ich_func = xpt_config;
1369         if (config_intrhook_establish(xpt_config_hook) != 0) {
1370                 free (xpt_config_hook, M_TEMP);
1371                 printf("xpt_init: config_intrhook_establish failed "
1372                        "- failing attach\n");
1373         }
1374
1375         /* Install our software interrupt handlers */
1376         register_swi(SWI_CAMNET, swi_camnet);
1377         register_swi(SWI_CAMBIO, swi_cambio);
1378 }
1379
1380 static cam_status
1381 xptregister(struct cam_periph *periph, void *arg)
1382 {
1383         if (periph == NULL) {
1384                 printf("xptregister: periph was NULL!!\n");
1385                 return(CAM_REQ_CMP_ERR);
1386         }
1387
1388         periph->softc = NULL;
1389
1390         xpt_periph = periph;
1391
1392         return(CAM_REQ_CMP);
1393 }
1394
1395 int32_t
1396 xpt_add_periph(struct cam_periph *periph)
1397 {
1398         struct cam_ed *device;
1399         int32_t  status;
1400         struct periph_list *periph_head;
1401
1402         device = periph->path->device;
1403
1404         periph_head = &device->periphs;
1405
1406         status = CAM_REQ_CMP;
1407
1408         if (device != NULL) {
1409                 int s;
1410
1411                 /*
1412                  * Make room for this peripheral
1413                  * so it will fit in the queue
1414                  * when it's scheduled to run
1415                  */
1416                 s = splsoftcam();
1417                 status = camq_resize(&device->drvq,
1418                                      device->drvq.array_size + 1);
1419
1420                 device->generation++;
1421
1422                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1423
1424                 splx(s);
1425         }
1426
1427         xsoftc.generation++;
1428
1429         return (status);
1430 }
1431
1432 void
1433 xpt_remove_periph(struct cam_periph *periph)
1434 {
1435         struct cam_ed *device;
1436
1437         device = periph->path->device;
1438
1439         if (device != NULL) {
1440                 int s;
1441                 struct periph_list *periph_head;
1442
1443                 periph_head = &device->periphs;
1444                 
1445                 /* Release the slot for this peripheral */
1446                 s = splsoftcam();
1447                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1448
1449                 device->generation++;
1450
1451                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1452
1453                 splx(s);
1454         }
1455
1456         xsoftc.generation++;
1457
1458 }
1459
1460 void
1461 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1462 {
1463         int s;
1464         u_int mb;
1465         struct cam_path *path;
1466         struct ccb_trans_settings cts;
1467
1468         path = periph->path;
1469         /*
1470          * To ensure that this is printed in one piece,
1471          * mask out CAM interrupts.
1472          */
1473         s = splsoftcam();
1474         printf("%s%d at %s%d bus %d target %d lun %d\n",
1475                periph->periph_name, periph->unit_number,
1476                path->bus->sim->sim_name,
1477                path->bus->sim->unit_number,
1478                path->bus->sim->bus_id,
1479                path->target->target_id,
1480                path->device->lun_id);
1481         printf("%s%d: ", periph->periph_name, periph->unit_number);
1482         scsi_print_inquiry(&path->device->inq_data);
1483         if ((bootverbose)
1484          && (path->device->serial_num_len > 0)) {
1485                 /* Don't wrap the screen  - print only the first 60 chars */
1486                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1487                        periph->unit_number, path->device->serial_num);
1488         }
1489         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1490         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1491         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1492         xpt_action((union ccb*)&cts);
1493         if (cts.ccb_h.status == CAM_REQ_CMP) {
1494                 u_int speed;
1495                 u_int freq;
1496
1497                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1498                   && cts.sync_offset != 0) {
1499                         freq = scsi_calc_syncsrate(cts.sync_period);
1500                         speed = freq;
1501                 } else {
1502                         struct ccb_pathinq cpi;
1503
1504                         /* Ask the SIM for its base transfer speed */
1505                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1506                         cpi.ccb_h.func_code = XPT_PATH_INQ;
1507                         xpt_action((union ccb *)&cpi);
1508
1509                         speed = cpi.base_transfer_speed;
1510                         freq = 0;
1511                 }
1512                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1513                         speed *= (0x01 << cts.bus_width);
1514                 mb = speed / 1000;
1515                 if (mb > 0)
1516                         printf("%s%d: %d.%03dMB/s transfers",
1517                                periph->periph_name, periph->unit_number,
1518                                mb, speed % 1000);
1519                 else
1520                         printf("%s%d: %dKB/s transfers", periph->periph_name,
1521                                periph->unit_number, speed);
1522                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1523                  && cts.sync_offset != 0) {
1524                         printf(" (%d.%03dMHz, offset %d", freq / 1000,
1525                                freq % 1000, cts.sync_offset);
1526                 }
1527                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1528                  && cts.bus_width > 0) {
1529                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1530                          && cts.sync_offset != 0) {
1531                                 printf(", ");
1532                         } else {
1533                                 printf(" (");
1534                         }
1535                         printf("%dbit)", 8 * (0x01 << cts.bus_width));
1536                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1537                         && cts.sync_offset != 0) {
1538                         printf(")");
1539                 }
1540
1541                 if (path->device->inq_flags & SID_CmdQue
1542                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1543                         printf(", Tagged Queueing Enabled");
1544                 }
1545
1546                 printf("\n");
1547         } else if (path->device->inq_flags & SID_CmdQue
1548                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1549                 printf("%s%d: Tagged Queueing Enabled\n",
1550                        periph->periph_name, periph->unit_number);
1551         }
1552
1553         /*
1554          * We only want to print the caller's announce string if they've
1555          * passed one in..
1556          */
1557         if (announce_string != NULL)
1558                 printf("%s%d: %s\n", periph->periph_name,
1559                        periph->unit_number, announce_string);
1560         splx(s);
1561 }
1562
1563
1564 static dev_match_ret
1565 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1566             struct cam_eb *bus)
1567 {
1568         dev_match_ret retval;
1569         int i;
1570
1571         retval = DM_RET_NONE;
1572
1573         /*
1574          * If we aren't given something to match against, that's an error.
1575          */
1576         if (bus == NULL)
1577                 return(DM_RET_ERROR);
1578
1579         /*
1580          * If there are no match entries, then this bus matches no
1581          * matter what.
1582          */
1583         if ((patterns == NULL) || (num_patterns == 0))
1584                 return(DM_RET_DESCEND | DM_RET_COPY);
1585
1586         for (i = 0; i < num_patterns; i++) {
1587                 struct bus_match_pattern *cur_pattern;
1588
1589                 /*
1590                  * If the pattern in question isn't for a bus node, we
1591                  * aren't interested.  However, we do indicate to the
1592                  * calling routine that we should continue descending the
1593                  * tree, since the user wants to match against lower-level
1594                  * EDT elements.
1595                  */
1596                 if (patterns[i].type != DEV_MATCH_BUS) {
1597                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1598                                 retval |= DM_RET_DESCEND;
1599                         continue;
1600                 }
1601
1602                 cur_pattern = &patterns[i].pattern.bus_pattern;
1603
1604                 /*
1605                  * If they want to match any bus node, we give them any
1606                  * device node.
1607                  */
1608                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1609                         /* set the copy flag */
1610                         retval |= DM_RET_COPY;
1611
1612                         /*
1613                          * If we've already decided on an action, go ahead
1614                          * and return.
1615                          */
1616                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1617                                 return(retval);
1618                 }
1619
1620                 /*
1621                  * Not sure why someone would do this...
1622                  */
1623                 if (cur_pattern->flags == BUS_MATCH_NONE)
1624                         continue;
1625
1626                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1627                  && (cur_pattern->path_id != bus->path_id))
1628                         continue;
1629
1630                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1631                  && (cur_pattern->bus_id != bus->sim->bus_id))
1632                         continue;
1633
1634                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1635                  && (cur_pattern->unit_number != bus->sim->unit_number))
1636                         continue;
1637
1638                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1639                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1640                              DEV_IDLEN) != 0))
1641                         continue;
1642
1643                 /*
1644                  * If we get to this point, the user definitely wants 
1645                  * information on this bus.  So tell the caller to copy the
1646                  * data out.
1647                  */
1648                 retval |= DM_RET_COPY;
1649
1650                 /*
1651                  * If the return action has been set to descend, then we
1652                  * know that we've already seen a non-bus matching
1653                  * expression, therefore we need to further descend the tree.
1654                  * This won't change by continuing around the loop, so we
1655                  * go ahead and return.  If we haven't seen a non-bus
1656                  * matching expression, we keep going around the loop until
1657                  * we exhaust the matching expressions.  We'll set the stop
1658                  * flag once we fall out of the loop.
1659                  */
1660                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1661                         return(retval);
1662         }
1663
1664         /*
1665          * If the return action hasn't been set to descend yet, that means
1666          * we haven't seen anything other than bus matching patterns.  So
1667          * tell the caller to stop descending the tree -- the user doesn't
1668          * want to match against lower level tree elements.
1669          */
1670         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1671                 retval |= DM_RET_STOP;
1672
1673         return(retval);
1674 }
1675
1676 static dev_match_ret
1677 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1678                struct cam_ed *device)
1679 {
1680         dev_match_ret retval;
1681         int i;
1682
1683         retval = DM_RET_NONE;
1684
1685         /*
1686          * If we aren't given something to match against, that's an error.
1687          */
1688         if (device == NULL)
1689                 return(DM_RET_ERROR);
1690
1691         /*
1692          * If there are no match entries, then this device matches no
1693          * matter what.
1694          */
1695         if ((patterns == NULL) || (patterns == 0))
1696                 return(DM_RET_DESCEND | DM_RET_COPY);
1697
1698         for (i = 0; i < num_patterns; i++) {
1699                 struct device_match_pattern *cur_pattern;
1700
1701                 /*
1702                  * If the pattern in question isn't for a device node, we
1703                  * aren't interested.
1704                  */
1705                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1706                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1707                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1708                                 retval |= DM_RET_DESCEND;
1709                         continue;
1710                 }
1711
1712                 cur_pattern = &patterns[i].pattern.device_pattern;
1713
1714                 /*
1715                  * If they want to match any device node, we give them any
1716                  * device node.
1717                  */
1718                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1719                         /* set the copy flag */
1720                         retval |= DM_RET_COPY;
1721
1722                         
1723                         /*
1724                          * If we've already decided on an action, go ahead
1725                          * and return.
1726                          */
1727                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1728                                 return(retval);
1729                 }
1730
1731                 /*
1732                  * Not sure why someone would do this...
1733                  */
1734                 if (cur_pattern->flags == DEV_MATCH_NONE)
1735                         continue;
1736
1737                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1738                  && (cur_pattern->path_id != device->target->bus->path_id))
1739                         continue;
1740
1741                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1742                  && (cur_pattern->target_id != device->target->target_id))
1743                         continue;
1744
1745                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1746                  && (cur_pattern->target_lun != device->lun_id))
1747                         continue;
1748
1749                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1750                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1751                                     (caddr_t)&cur_pattern->inq_pat,
1752                                     1, sizeof(cur_pattern->inq_pat),
1753                                     scsi_static_inquiry_match) == NULL))
1754                         continue;
1755
1756                 /*
1757                  * If we get to this point, the user definitely wants 
1758                  * information on this device.  So tell the caller to copy
1759                  * the data out.
1760                  */
1761                 retval |= DM_RET_COPY;
1762
1763                 /*
1764                  * If the return action has been set to descend, then we
1765                  * know that we've already seen a peripheral matching
1766                  * expression, therefore we need to further descend the tree.
1767                  * This won't change by continuing around the loop, so we
1768                  * go ahead and return.  If we haven't seen a peripheral
1769                  * matching expression, we keep going around the loop until
1770                  * we exhaust the matching expressions.  We'll set the stop
1771                  * flag once we fall out of the loop.
1772                  */
1773                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1774                         return(retval);
1775         }
1776
1777         /*
1778          * If the return action hasn't been set to descend yet, that means
1779          * we haven't seen any peripheral matching patterns.  So tell the
1780          * caller to stop descending the tree -- the user doesn't want to
1781          * match against lower level tree elements.
1782          */
1783         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1784                 retval |= DM_RET_STOP;
1785
1786         return(retval);
1787 }
1788
1789 /*
1790  * Match a single peripheral against any number of match patterns.
1791  */
1792 static dev_match_ret
1793 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1794                struct cam_periph *periph)
1795 {
1796         dev_match_ret retval;
1797         int i;
1798
1799         /*
1800          * If we aren't given something to match against, that's an error.
1801          */
1802         if (periph == NULL)
1803                 return(DM_RET_ERROR);
1804
1805         /*
1806          * If there are no match entries, then this peripheral matches no
1807          * matter what.
1808          */
1809         if ((patterns == NULL) || (num_patterns == 0))
1810                 return(DM_RET_STOP | DM_RET_COPY);
1811
1812         /*
1813          * There aren't any nodes below a peripheral node, so there's no
1814          * reason to descend the tree any further.
1815          */
1816         retval = DM_RET_STOP;
1817
1818         for (i = 0; i < num_patterns; i++) {
1819                 struct periph_match_pattern *cur_pattern;
1820
1821                 /*
1822                  * If the pattern in question isn't for a peripheral, we
1823                  * aren't interested.
1824                  */
1825                 if (patterns[i].type != DEV_MATCH_PERIPH)
1826                         continue;
1827
1828                 cur_pattern = &patterns[i].pattern.periph_pattern;
1829
1830                 /*
1831                  * If they want to match on anything, then we will do so.
1832                  */
1833                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1834                         /* set the copy flag */
1835                         retval |= DM_RET_COPY;
1836
1837                         /*
1838                          * We've already set the return action to stop,
1839                          * since there are no nodes below peripherals in
1840                          * the tree.
1841                          */
1842                         return(retval);
1843                 }
1844
1845                 /*
1846                  * Not sure why someone would do this...
1847                  */
1848                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1849                         continue;
1850
1851                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1852                  && (cur_pattern->path_id != periph->path->bus->path_id))
1853                         continue;
1854
1855                 /*
1856                  * For the target and lun id's, we have to make sure the
1857                  * target and lun pointers aren't NULL.  The xpt peripheral
1858                  * has a wildcard target and device.
1859                  */
1860                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1861                  && ((periph->path->target == NULL)
1862                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1863                         continue;
1864
1865                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1866                  && ((periph->path->device == NULL)
1867                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1868                         continue;
1869
1870                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1871                  && (cur_pattern->unit_number != periph->unit_number))
1872                         continue;
1873
1874                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1875                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1876                              DEV_IDLEN) != 0))
1877                         continue;
1878
1879                 /*
1880                  * If we get to this point, the user definitely wants 
1881                  * information on this peripheral.  So tell the caller to
1882                  * copy the data out.
1883                  */
1884                 retval |= DM_RET_COPY;
1885
1886                 /*
1887                  * The return action has already been set to stop, since
1888                  * peripherals don't have any nodes below them in the EDT.
1889                  */
1890                 return(retval);
1891         }
1892
1893         /*
1894          * If we get to this point, the peripheral that was passed in
1895          * doesn't match any of the patterns.
1896          */
1897         return(retval);
1898 }
1899
1900 static int
1901 xptedtbusfunc(struct cam_eb *bus, void *arg)
1902 {
1903         struct ccb_dev_match *cdm;
1904         dev_match_ret retval;
1905
1906         cdm = (struct ccb_dev_match *)arg;
1907
1908         /*
1909          * If our position is for something deeper in the tree, that means
1910          * that we've already seen this node.  So, we keep going down.
1911          */
1912         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1913          && (cdm->pos.cookie.bus == bus)
1914          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1915          && (cdm->pos.cookie.target != NULL))
1916                 retval = DM_RET_DESCEND;
1917         else
1918                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1919
1920         /*
1921          * If we got an error, bail out of the search.
1922          */
1923         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1924                 cdm->status = CAM_DEV_MATCH_ERROR;
1925                 return(0);
1926         }
1927
1928         /*
1929          * If the copy flag is set, copy this bus out.
1930          */
1931         if (retval & DM_RET_COPY) {
1932                 int spaceleft, j;
1933
1934                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1935                         sizeof(struct dev_match_result));
1936
1937                 /*
1938                  * If we don't have enough space to put in another
1939                  * match result, save our position and tell the
1940                  * user there are more devices to check.
1941                  */
1942                 if (spaceleft < sizeof(struct dev_match_result)) {
1943                         bzero(&cdm->pos, sizeof(cdm->pos));
1944                         cdm->pos.position_type = 
1945                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1946
1947                         cdm->pos.cookie.bus = bus;
1948                         cdm->pos.generations[CAM_BUS_GENERATION]=
1949                                 bus_generation;
1950                         cdm->status = CAM_DEV_MATCH_MORE;
1951                         return(0);
1952                 }
1953                 j = cdm->num_matches;
1954                 cdm->num_matches++;
1955                 cdm->matches[j].type = DEV_MATCH_BUS;
1956                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1957                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1958                 cdm->matches[j].result.bus_result.unit_number =
1959                         bus->sim->unit_number;
1960                 strncpy(cdm->matches[j].result.bus_result.dev_name,
1961                         bus->sim->sim_name, DEV_IDLEN);
1962         }
1963
1964         /*
1965          * If the user is only interested in busses, there's no
1966          * reason to descend to the next level in the tree.
1967          */
1968         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1969                 return(1);
1970
1971         /*
1972          * If there is a target generation recorded, check it to
1973          * make sure the target list hasn't changed.
1974          */
1975         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1976          && (bus == cdm->pos.cookie.bus)
1977          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1978          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1979          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1980              bus->generation)) {
1981                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1982                 return(0);
1983         }
1984
1985         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1986          && (cdm->pos.cookie.bus == bus)
1987          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1988          && (cdm->pos.cookie.target != NULL))
1989                 return(xpttargettraverse(bus,
1990                                         (struct cam_et *)cdm->pos.cookie.target,
1991                                          xptedttargetfunc, arg));
1992         else
1993                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1994 }
1995
1996 static int
1997 xptedttargetfunc(struct cam_et *target, void *arg)
1998 {
1999         struct ccb_dev_match *cdm;
2000
2001         cdm = (struct ccb_dev_match *)arg;
2002
2003         /*
2004          * If there is a device list generation recorded, check it to
2005          * make sure the device list hasn't changed.
2006          */
2007         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2008          && (cdm->pos.cookie.bus == target->bus)
2009          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2010          && (cdm->pos.cookie.target == target)
2011          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2012          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2013          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2014              target->generation)) {
2015                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2016                 return(0);
2017         }
2018
2019         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2020          && (cdm->pos.cookie.bus == target->bus)
2021          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2022          && (cdm->pos.cookie.target == target)
2023          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2024          && (cdm->pos.cookie.device != NULL))
2025                 return(xptdevicetraverse(target,
2026                                         (struct cam_ed *)cdm->pos.cookie.device,
2027                                          xptedtdevicefunc, arg));
2028         else
2029                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2030 }
2031
2032 static int
2033 xptedtdevicefunc(struct cam_ed *device, void *arg)
2034 {
2035
2036         struct ccb_dev_match *cdm;
2037         dev_match_ret retval;
2038
2039         cdm = (struct ccb_dev_match *)arg;
2040
2041         /*
2042          * If our position is for something deeper in the tree, that means
2043          * that we've already seen this node.  So, we keep going down.
2044          */
2045         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2046          && (cdm->pos.cookie.device == device)
2047          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2048          && (cdm->pos.cookie.periph != NULL))
2049                 retval = DM_RET_DESCEND;
2050         else
2051                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2052                                         device);
2053
2054         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2055                 cdm->status = CAM_DEV_MATCH_ERROR;
2056                 return(0);
2057         }
2058
2059         /*
2060          * If the copy flag is set, copy this device out.
2061          */
2062         if (retval & DM_RET_COPY) {
2063                 int spaceleft, j;
2064
2065                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2066                         sizeof(struct dev_match_result));
2067
2068                 /*
2069                  * If we don't have enough space to put in another
2070                  * match result, save our position and tell the
2071                  * user there are more devices to check.
2072                  */
2073                 if (spaceleft < sizeof(struct dev_match_result)) {
2074                         bzero(&cdm->pos, sizeof(cdm->pos));
2075                         cdm->pos.position_type = 
2076                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2077                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2078
2079                         cdm->pos.cookie.bus = device->target->bus;
2080                         cdm->pos.generations[CAM_BUS_GENERATION]=
2081                                 bus_generation;
2082                         cdm->pos.cookie.target = device->target;
2083                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2084                                 device->target->bus->generation;
2085                         cdm->pos.cookie.device = device;
2086                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2087                                 device->target->generation;
2088                         cdm->status = CAM_DEV_MATCH_MORE;
2089                         return(0);
2090                 }
2091                 j = cdm->num_matches;
2092                 cdm->num_matches++;
2093                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2094                 cdm->matches[j].result.device_result.path_id =
2095                         device->target->bus->path_id;
2096                 cdm->matches[j].result.device_result.target_id =
2097                         device->target->target_id;
2098                 cdm->matches[j].result.device_result.target_lun =
2099                         device->lun_id;
2100                 bcopy(&device->inq_data,
2101                       &cdm->matches[j].result.device_result.inq_data,
2102                       sizeof(struct scsi_inquiry_data));
2103
2104                 /* Let the user know whether this device is unconfigured */
2105                 if (device->flags & CAM_DEV_UNCONFIGURED)
2106                         cdm->matches[j].result.device_result.flags =
2107                                 DEV_RESULT_UNCONFIGURED;
2108                 else
2109                         cdm->matches[j].result.device_result.flags =
2110                                 DEV_RESULT_NOFLAG;
2111         }
2112
2113         /*
2114          * If the user isn't interested in peripherals, don't descend
2115          * the tree any further.
2116          */
2117         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2118                 return(1);
2119
2120         /*
2121          * If there is a peripheral list generation recorded, make sure
2122          * it hasn't changed.
2123          */
2124         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2125          && (device->target->bus == cdm->pos.cookie.bus)
2126          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2127          && (device->target == cdm->pos.cookie.target)
2128          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2129          && (device == cdm->pos.cookie.device)
2130          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2131          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2132          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2133              device->generation)){
2134                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2135                 return(0);
2136         }
2137
2138         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2139          && (cdm->pos.cookie.bus == device->target->bus)
2140          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2141          && (cdm->pos.cookie.target == device->target)
2142          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2143          && (cdm->pos.cookie.device == device)
2144          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2145          && (cdm->pos.cookie.periph != NULL))
2146                 return(xptperiphtraverse(device,
2147                                 (struct cam_periph *)cdm->pos.cookie.periph,
2148                                 xptedtperiphfunc, arg));
2149         else
2150                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2151 }
2152
2153 static int
2154 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2155 {
2156         struct ccb_dev_match *cdm;
2157         dev_match_ret retval;
2158
2159         cdm = (struct ccb_dev_match *)arg;
2160
2161         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2162
2163         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2164                 cdm->status = CAM_DEV_MATCH_ERROR;
2165                 return(0);
2166         }
2167
2168         /*
2169          * If the copy flag is set, copy this peripheral out.
2170          */
2171         if (retval & DM_RET_COPY) {
2172                 int spaceleft, j;
2173
2174                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2175                         sizeof(struct dev_match_result));
2176
2177                 /*
2178                  * If we don't have enough space to put in another
2179                  * match result, save our position and tell the
2180                  * user there are more devices to check.
2181                  */
2182                 if (spaceleft < sizeof(struct dev_match_result)) {
2183                         bzero(&cdm->pos, sizeof(cdm->pos));
2184                         cdm->pos.position_type = 
2185                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2186                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2187                                 CAM_DEV_POS_PERIPH;
2188
2189                         cdm->pos.cookie.bus = periph->path->bus;
2190                         cdm->pos.generations[CAM_BUS_GENERATION]=
2191                                 bus_generation;
2192                         cdm->pos.cookie.target = periph->path->target;
2193                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2194                                 periph->path->bus->generation;
2195                         cdm->pos.cookie.device = periph->path->device;
2196                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2197                                 periph->path->target->generation;
2198                         cdm->pos.cookie.periph = periph;
2199                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2200                                 periph->path->device->generation;
2201                         cdm->status = CAM_DEV_MATCH_MORE;
2202                         return(0);
2203                 }
2204
2205                 j = cdm->num_matches;
2206                 cdm->num_matches++;
2207                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2208                 cdm->matches[j].result.periph_result.path_id =
2209                         periph->path->bus->path_id;
2210                 cdm->matches[j].result.periph_result.target_id =
2211                         periph->path->target->target_id;
2212                 cdm->matches[j].result.periph_result.target_lun =
2213                         periph->path->device->lun_id;
2214                 cdm->matches[j].result.periph_result.unit_number =
2215                         periph->unit_number;
2216                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2217                         periph->periph_name, DEV_IDLEN);
2218         }
2219
2220         return(1);
2221 }
2222
2223 static int
2224 xptedtmatch(struct ccb_dev_match *cdm)
2225 {
2226         int ret;
2227
2228         cdm->num_matches = 0;
2229
2230         /*
2231          * Check the bus list generation.  If it has changed, the user
2232          * needs to reset everything and start over.
2233          */
2234         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2235          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2236          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2237                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2238                 return(0);
2239         }
2240
2241         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2242          && (cdm->pos.cookie.bus != NULL))
2243                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2244                                      xptedtbusfunc, cdm);
2245         else
2246                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2247
2248         /*
2249          * If we get back 0, that means that we had to stop before fully
2250          * traversing the EDT.  It also means that one of the subroutines
2251          * has set the status field to the proper value.  If we get back 1,
2252          * we've fully traversed the EDT and copied out any matching entries.
2253          */
2254         if (ret == 1)
2255                 cdm->status = CAM_DEV_MATCH_LAST;
2256
2257         return(ret);
2258 }
2259
2260 static int
2261 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2262 {
2263         struct ccb_dev_match *cdm;
2264
2265         cdm = (struct ccb_dev_match *)arg;
2266
2267         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2268          && (cdm->pos.cookie.pdrv == pdrv)
2269          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2270          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2271          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2272              (*pdrv)->generation)) {
2273                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2274                 return(0);
2275         }
2276
2277         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2278          && (cdm->pos.cookie.pdrv == pdrv)
2279          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2280          && (cdm->pos.cookie.periph != NULL))
2281                 return(xptpdperiphtraverse(pdrv,
2282                                 (struct cam_periph *)cdm->pos.cookie.periph,
2283                                 xptplistperiphfunc, arg));
2284         else
2285                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2286 }
2287
2288 static int
2289 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2290 {
2291         struct ccb_dev_match *cdm;
2292         dev_match_ret retval;
2293
2294         cdm = (struct ccb_dev_match *)arg;
2295
2296         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2297
2298         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2299                 cdm->status = CAM_DEV_MATCH_ERROR;
2300                 return(0);
2301         }
2302
2303         /*
2304          * If the copy flag is set, copy this peripheral out.
2305          */
2306         if (retval & DM_RET_COPY) {
2307                 int spaceleft, j;
2308
2309                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2310                         sizeof(struct dev_match_result));
2311
2312                 /*
2313                  * If we don't have enough space to put in another
2314                  * match result, save our position and tell the
2315                  * user there are more devices to check.
2316                  */
2317                 if (spaceleft < sizeof(struct dev_match_result)) {
2318                         struct periph_driver **pdrv;
2319
2320                         pdrv = NULL;
2321                         bzero(&cdm->pos, sizeof(cdm->pos));
2322                         cdm->pos.position_type = 
2323                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2324                                 CAM_DEV_POS_PERIPH;
2325
2326                         /*
2327                          * This may look a bit non-sensical, but it is
2328                          * actually quite logical.  There are very few
2329                          * peripheral drivers, and bloating every peripheral
2330                          * structure with a pointer back to its parent
2331                          * peripheral driver linker set entry would cost
2332                          * more in the long run than doing this quick lookup.
2333                          */
2334                         for (pdrv =
2335                              (struct periph_driver **)periphdriver_set.ls_items;
2336                              *pdrv != NULL; pdrv++) {
2337                                 if (strcmp((*pdrv)->driver_name,
2338                                     periph->periph_name) == 0)
2339                                         break;
2340                         }
2341
2342                         if (pdrv == NULL) {
2343                                 cdm->status = CAM_DEV_MATCH_ERROR;
2344                                 return(0);
2345                         }
2346
2347                         cdm->pos.cookie.pdrv = pdrv;
2348                         /*
2349                          * The periph generation slot does double duty, as
2350                          * does the periph pointer slot.  They are used for
2351                          * both edt and pdrv lookups and positioning.
2352                          */
2353                         cdm->pos.cookie.periph = periph;
2354                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2355                                 (*pdrv)->generation;
2356                         cdm->status = CAM_DEV_MATCH_MORE;
2357                         return(0);
2358                 }
2359
2360                 j = cdm->num_matches;
2361                 cdm->num_matches++;
2362                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2363                 cdm->matches[j].result.periph_result.path_id =
2364                         periph->path->bus->path_id;
2365
2366                 /*
2367                  * The transport layer peripheral doesn't have a target or
2368                  * lun.
2369                  */
2370                 if (periph->path->target)
2371                         cdm->matches[j].result.periph_result.target_id =
2372                                 periph->path->target->target_id;
2373                 else
2374                         cdm->matches[j].result.periph_result.target_id = -1;
2375
2376                 if (periph->path->device)
2377                         cdm->matches[j].result.periph_result.target_lun =
2378                                 periph->path->device->lun_id;
2379                 else
2380                         cdm->matches[j].result.periph_result.target_lun = -1;
2381
2382                 cdm->matches[j].result.periph_result.unit_number =
2383                         periph->unit_number;
2384                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2385                         periph->periph_name, DEV_IDLEN);
2386         }
2387
2388         return(1);
2389 }
2390
2391 static int
2392 xptperiphlistmatch(struct ccb_dev_match *cdm)
2393 {
2394         int ret;
2395
2396         cdm->num_matches = 0;
2397
2398         /*
2399          * At this point in the edt traversal function, we check the bus
2400          * list generation to make sure that no busses have been added or
2401          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2402          * For the peripheral driver list traversal function, however, we
2403          * don't have to worry about new peripheral driver types coming or
2404          * going; they're in a linker set, and therefore can't change
2405          * without a recompile.
2406          */
2407
2408         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2409          && (cdm->pos.cookie.pdrv != NULL))
2410                 ret = xptpdrvtraverse(
2411                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2412                                 xptplistpdrvfunc, cdm);
2413         else
2414                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2415
2416         /*
2417          * If we get back 0, that means that we had to stop before fully
2418          * traversing the peripheral driver tree.  It also means that one of
2419          * the subroutines has set the status field to the proper value.  If
2420          * we get back 1, we've fully traversed the EDT and copied out any
2421          * matching entries.
2422          */
2423         if (ret == 1)
2424                 cdm->status = CAM_DEV_MATCH_LAST;
2425
2426         return(ret);
2427 }
2428
2429 static int
2430 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2431 {
2432         struct cam_eb *bus, *next_bus;
2433         int retval;
2434
2435         retval = 1;
2436
2437         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2438              bus != NULL;
2439              bus = next_bus) {
2440                 next_bus = TAILQ_NEXT(bus, links);
2441
2442                 retval = tr_func(bus, arg);
2443                 if (retval == 0)
2444                         return(retval);
2445         }
2446
2447         return(retval);
2448 }
2449
2450 static int
2451 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2452                   xpt_targetfunc_t *tr_func, void *arg)
2453 {
2454         struct cam_et *target, *next_target;
2455         int retval;
2456
2457         retval = 1;
2458         for (target = (start_target ? start_target :
2459                        TAILQ_FIRST(&bus->et_entries));
2460              target != NULL; target = next_target) {
2461
2462                 next_target = TAILQ_NEXT(target, links);
2463
2464                 retval = tr_func(target, arg);
2465
2466                 if (retval == 0)
2467                         return(retval);
2468         }
2469
2470         return(retval);
2471 }
2472
2473 static int
2474 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2475                   xpt_devicefunc_t *tr_func, void *arg)
2476 {
2477         struct cam_ed *device, *next_device;
2478         int retval;
2479
2480         retval = 1;
2481         for (device = (start_device ? start_device :
2482                        TAILQ_FIRST(&target->ed_entries));
2483              device != NULL;
2484              device = next_device) {
2485
2486                 next_device = TAILQ_NEXT(device, links);
2487
2488                 retval = tr_func(device, arg);
2489
2490                 if (retval == 0)
2491                         return(retval);
2492         }
2493
2494         return(retval);
2495 }
2496
2497 static int
2498 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2499                   xpt_periphfunc_t *tr_func, void *arg)
2500 {
2501         struct cam_periph *periph, *next_periph;
2502         int retval;
2503
2504         retval = 1;
2505
2506         for (periph = (start_periph ? start_periph :
2507                        SLIST_FIRST(&device->periphs));
2508              periph != NULL;
2509              periph = next_periph) {
2510
2511                 next_periph = SLIST_NEXT(periph, periph_links);
2512
2513                 retval = tr_func(periph, arg);
2514                 if (retval == 0)
2515                         return(retval);
2516         }
2517
2518         return(retval);
2519 }
2520
2521 static int
2522 xptpdrvtraverse(struct periph_driver **start_pdrv,
2523                 xpt_pdrvfunc_t *tr_func, void *arg)
2524 {
2525         struct periph_driver **pdrv;
2526         int retval;
2527
2528         retval = 1;
2529
2530         /*
2531          * We don't traverse the peripheral driver list like we do the
2532          * other lists, because it is a linker set, and therefore cannot be
2533          * changed during runtime.  If the peripheral driver list is ever
2534          * re-done to be something other than a linker set (i.e. it can
2535          * change while the system is running), the list traversal should
2536          * be modified to work like the other traversal functions.
2537          */
2538         for (pdrv = (start_pdrv ? start_pdrv :
2539              (struct periph_driver **)periphdriver_set.ls_items);
2540              *pdrv != NULL; pdrv++) {
2541                 retval = tr_func(pdrv, arg);
2542
2543                 if (retval == 0)
2544                         return(retval);
2545         }
2546
2547         return(retval);
2548 }
2549
2550 static int
2551 xptpdperiphtraverse(struct periph_driver **pdrv,
2552                     struct cam_periph *start_periph,
2553                     xpt_periphfunc_t *tr_func, void *arg)
2554 {
2555         struct cam_periph *periph, *next_periph;
2556         int retval;
2557
2558         retval = 1;
2559
2560         for (periph = (start_periph ? start_periph :
2561              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2562              periph = next_periph) {
2563
2564                 next_periph = TAILQ_NEXT(periph, unit_links);
2565
2566                 retval = tr_func(periph, arg);
2567                 if (retval == 0)
2568                         return(retval);
2569         }
2570         return(retval);
2571 }
2572
2573 static int
2574 xptdefbusfunc(struct cam_eb *bus, void *arg)
2575 {
2576         struct xpt_traverse_config *tr_config;
2577
2578         tr_config = (struct xpt_traverse_config *)arg;
2579
2580         if (tr_config->depth == XPT_DEPTH_BUS) {
2581                 xpt_busfunc_t *tr_func;
2582
2583                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2584
2585                 return(tr_func(bus, tr_config->tr_arg));
2586         } else
2587                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2588 }
2589
2590 static int
2591 xptdeftargetfunc(struct cam_et *target, void *arg)
2592 {
2593         struct xpt_traverse_config *tr_config;
2594
2595         tr_config = (struct xpt_traverse_config *)arg;
2596
2597         if (tr_config->depth == XPT_DEPTH_TARGET) {
2598                 xpt_targetfunc_t *tr_func;
2599
2600                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2601
2602                 return(tr_func(target, tr_config->tr_arg));
2603         } else
2604                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2605 }
2606
2607 static int
2608 xptdefdevicefunc(struct cam_ed *device, void *arg)
2609 {
2610         struct xpt_traverse_config *tr_config;
2611
2612         tr_config = (struct xpt_traverse_config *)arg;
2613
2614         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2615                 xpt_devicefunc_t *tr_func;
2616
2617                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2618
2619                 return(tr_func(device, tr_config->tr_arg));
2620         } else
2621                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2622 }
2623
2624 static int
2625 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2626 {
2627         struct xpt_traverse_config *tr_config;
2628         xpt_periphfunc_t *tr_func;
2629
2630         tr_config = (struct xpt_traverse_config *)arg;
2631
2632         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2633
2634         /*
2635          * Unlike the other default functions, we don't check for depth
2636          * here.  The peripheral driver level is the last level in the EDT,
2637          * so if we're here, we should execute the function in question.
2638          */
2639         return(tr_func(periph, tr_config->tr_arg));
2640 }
2641
2642 /*
2643  * Execute the given function for every bus in the EDT.
2644  */
2645 static int
2646 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2647 {
2648         struct xpt_traverse_config tr_config;
2649
2650         tr_config.depth = XPT_DEPTH_BUS;
2651         tr_config.tr_func = tr_func;
2652         tr_config.tr_arg = arg;
2653
2654         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2655 }
2656
2657 #ifdef notusedyet
2658 /*
2659  * Execute the given function for every target in the EDT.
2660  */
2661 static int
2662 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2663 {
2664         struct xpt_traverse_config tr_config;
2665
2666         tr_config.depth = XPT_DEPTH_TARGET;
2667         tr_config.tr_func = tr_func;
2668         tr_config.tr_arg = arg;
2669
2670         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2671 }
2672 #endif /* notusedyet */
2673
2674 /*
2675  * Execute the given function for every device in the EDT.
2676  */
2677 static int
2678 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2679 {
2680         struct xpt_traverse_config tr_config;
2681
2682         tr_config.depth = XPT_DEPTH_DEVICE;
2683         tr_config.tr_func = tr_func;
2684         tr_config.tr_arg = arg;
2685
2686         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2687 }
2688
2689 #ifdef notusedyet
2690 /*
2691  * Execute the given function for every peripheral in the EDT.
2692  */
2693 static int
2694 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2695 {
2696         struct xpt_traverse_config tr_config;
2697
2698         tr_config.depth = XPT_DEPTH_PERIPH;
2699         tr_config.tr_func = tr_func;
2700         tr_config.tr_arg = arg;
2701
2702         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2703 }
2704 #endif /* notusedyet */
2705
2706 static int
2707 xptsetasyncfunc(struct cam_ed *device, void *arg)
2708 {
2709         struct cam_path path;
2710         struct ccb_getdev cgd;
2711         struct async_node *cur_entry;
2712
2713         cur_entry = (struct async_node *)arg;
2714
2715         /*
2716          * Don't report unconfigured devices (Wildcard devs,
2717          * devices only for target mode, device instances
2718          * that have been invalidated but are waiting for
2719          * their last reference count to be released).
2720          */
2721         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2722                 return (1);
2723
2724         xpt_compile_path(&path,
2725                          NULL,
2726                          device->target->bus->path_id,
2727                          device->target->target_id,
2728                          device->lun_id);
2729         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2730         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2731         xpt_action((union ccb *)&cgd);
2732         cur_entry->callback(cur_entry->callback_arg,
2733                             AC_FOUND_DEVICE,
2734                             &path, &cgd);
2735         xpt_release_path(&path);
2736
2737         return(1);
2738 }
2739
2740 static int
2741 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2742 {
2743         struct cam_path path;
2744         struct ccb_pathinq cpi;
2745         struct async_node *cur_entry;
2746
2747         cur_entry = (struct async_node *)arg;
2748
2749         xpt_compile_path(&path, /*periph*/NULL,
2750                          bus->sim->path_id,
2751                          CAM_TARGET_WILDCARD,
2752                          CAM_LUN_WILDCARD);
2753         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2754         cpi.ccb_h.func_code = XPT_PATH_INQ;
2755         xpt_action((union ccb *)&cpi);
2756         cur_entry->callback(cur_entry->callback_arg,
2757                             AC_PATH_REGISTERED,
2758                             &path, &cpi);
2759         xpt_release_path(&path);
2760
2761         return(1);
2762 }
2763
2764 void
2765 xpt_action(union ccb *start_ccb)
2766 {
2767         int iopl;
2768
2769         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2770
2771         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2772
2773         iopl = splsoftcam();
2774         switch (start_ccb->ccb_h.func_code) {
2775         case XPT_SCSI_IO:
2776         {
2777 #ifdef CAMDEBUG
2778                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2779                 struct cam_path *path;
2780
2781                 path = start_ccb->ccb_h.path;
2782 #endif
2783
2784                 /*
2785                  * For the sake of compatibility with SCSI-1
2786                  * devices that may not understand the identify
2787                  * message, we include lun information in the
2788                  * second byte of all commands.  SCSI-1 specifies
2789                  * that luns are a 3 bit value and reserves only 3
2790                  * bits for lun information in the CDB.  Later
2791                  * revisions of the SCSI spec allow for more than 8
2792                  * luns, but have deprecated lun information in the
2793                  * CDB.  So, if the lun won't fit, we must omit.
2794                  *
2795                  * Also be aware that during initial probing for devices,
2796                  * the inquiry information is unknown but initialized to 0.
2797                  * This means that this code will be exercised while probing
2798                  * devices with an ANSI revision greater than 2.
2799                  */
2800                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2801                  && start_ccb->ccb_h.target_lun < 8
2802                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2803
2804                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2805                             start_ccb->ccb_h.target_lun << 5;
2806                 }
2807                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2808                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2809                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2810                                        &path->device->inq_data),
2811                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2812                                           cdb_str, sizeof(cdb_str))));
2813                 /* FALLTHROUGH */
2814         }
2815         case XPT_TARGET_IO:
2816         case XPT_CONT_TARGET_IO:
2817                 start_ccb->csio.sense_resid = 0;
2818                 start_ccb->csio.resid = 0;
2819                 /* FALLTHROUGH */
2820         case XPT_RESET_DEV:
2821         case XPT_ENG_EXEC:
2822         {
2823                 struct cam_path *path;
2824                 int s;
2825                 int runq;
2826
2827                 path = start_ccb->ccb_h.path;
2828                 s = splsoftcam();
2829
2830                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2831                 if (path->device->qfrozen_cnt == 0)
2832                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
2833                 else
2834                         runq = 0;
2835                 splx(s);
2836                 if (runq != 0)
2837                         xpt_run_dev_sendq(path->bus);
2838                 break;
2839         }
2840         case XPT_SET_TRAN_SETTINGS:
2841         {
2842                 xpt_set_transfer_settings(&start_ccb->cts,
2843                                           start_ccb->ccb_h.path->device,
2844                                           /*async_update*/FALSE);
2845                 break;
2846         }
2847         case XPT_CALC_GEOMETRY:
2848         {
2849                 struct cam_sim *sim;
2850
2851                 /* Filter out garbage */
2852                 if (start_ccb->ccg.block_size == 0
2853                  || start_ccb->ccg.volume_size == 0) {
2854                         start_ccb->ccg.cylinders = 0;
2855                         start_ccb->ccg.heads = 0;
2856                         start_ccb->ccg.secs_per_track = 0;
2857                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2858                         break;
2859                 }
2860 #ifdef PC98
2861                 /*
2862                  * In a PC-98 system, geometry translation depens on
2863                  * the "real" device geometry obtained from mode page 4.
2864                  * SCSI geometry translation is performed in the
2865                  * initialization routine of the SCSI BIOS and the result
2866                  * stored in host memory.  If the translation is available
2867                  * in host memory, use it.  If not, rely on the default
2868                  * translation the device driver performs.
2869                  */
2870                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2871                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2872                         break;
2873                 }
2874 #endif
2875                 sim = start_ccb->ccb_h.path->bus->sim;
2876                 (*(sim->sim_action))(sim, start_ccb);
2877                 break;
2878         }
2879         case XPT_ABORT:
2880         {
2881                 union ccb* abort_ccb;
2882                 int s;                          
2883
2884                 abort_ccb = start_ccb->cab.abort_ccb;
2885                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2886
2887                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
2888                                 struct cam_ccbq *ccbq;
2889
2890                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2891                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
2892                                 abort_ccb->ccb_h.status =
2893                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2894                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2895                                 s = splcam();
2896                                 xpt_done(abort_ccb);
2897                                 splx(s);
2898                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2899                                 break;
2900                         }
2901                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2902                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2903                                 /*
2904                                  * We've caught this ccb en route to
2905                                  * the SIM.  Flag it for abort and the
2906                                  * SIM will do so just before starting
2907                                  * real work on the CCB.
2908                                  */
2909                                 abort_ccb->ccb_h.status =
2910                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2911                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2912                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2913                                 break;
2914                         }
2915                 } 
2916                 if (XPT_FC_IS_QUEUED(abort_ccb)
2917                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2918                         /*
2919                          * It's already completed but waiting
2920                          * for our SWI to get to it.
2921                          */
2922                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2923                         break;
2924                 }
2925                 /*
2926                  * If we weren't able to take care of the abort request
2927                  * in the XPT, pass the request down to the SIM for processing.
2928                  */
2929                 /* FALLTHROUGH */
2930         }
2931         case XPT_ACCEPT_TARGET_IO:
2932         case XPT_EN_LUN:
2933         case XPT_IMMED_NOTIFY:
2934         case XPT_NOTIFY_ACK:
2935         case XPT_GET_TRAN_SETTINGS:
2936         case XPT_RESET_BUS:
2937         {
2938                 struct cam_sim *sim;
2939
2940                 sim = start_ccb->ccb_h.path->bus->sim;
2941                 (*(sim->sim_action))(sim, start_ccb);
2942                 break;
2943         }
2944         case XPT_PATH_INQ:
2945         {
2946                 struct cam_sim *sim;
2947
2948                 sim = start_ccb->ccb_h.path->bus->sim;
2949                 (*(sim->sim_action))(sim, start_ccb);
2950                 break;
2951         }
2952         case XPT_PATH_STATS:
2953                 start_ccb->cpis.last_reset =
2954                         start_ccb->ccb_h.path->bus->last_reset;
2955                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2956                 break;
2957         case XPT_GDEV_TYPE:
2958         {
2959                 struct cam_ed *dev;
2960                 int s;
2961
2962                 dev = start_ccb->ccb_h.path->device;
2963                 s = splcam();
2964                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2965                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2966                 } else {
2967                         struct ccb_getdev *cgd;
2968                         struct cam_eb *bus;
2969                         struct cam_et *tar;
2970
2971                         cgd = &start_ccb->cgd;
2972                         bus = cgd->ccb_h.path->bus;
2973                         tar = cgd->ccb_h.path->target;
2974                         cgd->inq_data = dev->inq_data;
2975                         cgd->ccb_h.status = CAM_REQ_CMP;
2976                         cgd->serial_num_len = dev->serial_num_len;
2977                         if ((dev->serial_num_len > 0)
2978                          && (dev->serial_num != NULL))
2979                                 bcopy(dev->serial_num, cgd->serial_num,
2980                                       dev->serial_num_len);
2981                 }
2982                 splx(s);
2983                 break; 
2984         }
2985         case XPT_GDEV_STATS:
2986         {
2987                 struct cam_ed *dev;
2988                 int s;
2989
2990                 dev = start_ccb->ccb_h.path->device;
2991                 s = splcam();
2992                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2993                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2994                 } else {
2995                         struct ccb_getdevstats *cgds;
2996                         struct cam_eb *bus;
2997                         struct cam_et *tar;
2998
2999                         cgds = &start_ccb->cgds;
3000                         bus = cgds->ccb_h.path->bus;
3001                         tar = cgds->ccb_h.path->target;
3002                         cgds->dev_openings = dev->ccbq.dev_openings;
3003                         cgds->dev_active = dev->ccbq.dev_active;
3004                         cgds->devq_openings = dev->ccbq.devq_openings;
3005                         cgds->devq_queued = dev->ccbq.queue.entries;
3006                         cgds->held = dev->ccbq.held;
3007                         cgds->last_reset = tar->last_reset;
3008                         cgds->maxtags = dev->quirk->maxtags;
3009                         cgds->mintags = dev->quirk->mintags;
3010                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3011                                 cgds->last_reset = bus->last_reset;
3012                         cgds->ccb_h.status = CAM_REQ_CMP;
3013                 }
3014                 splx(s);
3015                 break;
3016         }
3017         case XPT_GDEVLIST:
3018         {
3019                 struct cam_periph       *nperiph;
3020                 struct periph_list      *periph_head;
3021                 struct ccb_getdevlist   *cgdl;
3022                 int                     i;
3023                 int                     s;
3024                 struct cam_ed           *device;
3025                 int                     found;
3026
3027
3028                 found = 0;
3029
3030                 /*
3031                  * Don't want anyone mucking with our data.
3032                  */
3033                 s = splcam();
3034                 device = start_ccb->ccb_h.path->device;
3035                 periph_head = &device->periphs;
3036                 cgdl = &start_ccb->cgdl;
3037
3038                 /*
3039                  * Check and see if the list has changed since the user
3040                  * last requested a list member.  If so, tell them that the
3041                  * list has changed, and therefore they need to start over 
3042                  * from the beginning.
3043                  */
3044                 if ((cgdl->index != 0) && 
3045                     (cgdl->generation != device->generation)) {
3046                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3047                         splx(s);
3048                         break;
3049                 }
3050
3051                 /*
3052                  * Traverse the list of peripherals and attempt to find 
3053                  * the requested peripheral.
3054                  */
3055                 for (nperiph = periph_head->slh_first, i = 0;
3056                      (nperiph != NULL) && (i <= cgdl->index);
3057                      nperiph = nperiph->periph_links.sle_next, i++) {
3058                         if (i == cgdl->index) {
3059                                 strncpy(cgdl->periph_name,
3060                                         nperiph->periph_name,
3061                                         DEV_IDLEN);
3062                                 cgdl->unit_number = nperiph->unit_number;
3063                                 found = 1;
3064                         }
3065                 }
3066                 if (found == 0) {
3067                         cgdl->status = CAM_GDEVLIST_ERROR;
3068                         splx(s);
3069                         break;
3070                 }
3071
3072                 if (nperiph == NULL)
3073                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3074                 else
3075                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3076
3077                 cgdl->index++;
3078                 cgdl->generation = device->generation;
3079
3080                 splx(s);
3081                 cgdl->ccb_h.status = CAM_REQ_CMP;
3082                 break;
3083         }
3084         case XPT_DEV_MATCH:
3085         {
3086                 int s;
3087                 dev_pos_type position_type;
3088                 struct ccb_dev_match *cdm;
3089                 int ret;
3090
3091                 cdm = &start_ccb->cdm;
3092
3093                 /*
3094                  * Prevent EDT changes while we traverse it.
3095                  */
3096                 s = splcam();
3097                 /*
3098                  * There are two ways of getting at information in the EDT.
3099                  * The first way is via the primary EDT tree.  It starts
3100                  * with a list of busses, then a list of targets on a bus,
3101                  * then devices/luns on a target, and then peripherals on a
3102                  * device/lun.  The "other" way is by the peripheral driver
3103                  * lists.  The peripheral driver lists are organized by
3104                  * peripheral driver.  (obviously)  So it makes sense to
3105                  * use the peripheral driver list if the user is looking
3106                  * for something like "da1", or all "da" devices.  If the
3107                  * user is looking for something on a particular bus/target
3108                  * or lun, it's generally better to go through the EDT tree.
3109                  */
3110
3111                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3112                         position_type = cdm->pos.position_type;
3113                 else {
3114                         int i;
3115
3116                         position_type = CAM_DEV_POS_NONE;
3117
3118                         for (i = 0; i < cdm->num_patterns; i++) {
3119                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3120                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3121                                         position_type = CAM_DEV_POS_EDT;
3122                                         break;
3123                                 }
3124                         }
3125
3126                         if (cdm->num_patterns == 0)
3127                                 position_type = CAM_DEV_POS_EDT;
3128                         else if (position_type == CAM_DEV_POS_NONE)
3129                                 position_type = CAM_DEV_POS_PDRV;
3130                 }
3131
3132                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3133                 case CAM_DEV_POS_EDT:
3134                         ret = xptedtmatch(cdm);
3135                         break;
3136                 case CAM_DEV_POS_PDRV:
3137                         ret = xptperiphlistmatch(cdm);
3138                         break;
3139                 default:
3140                         cdm->status = CAM_DEV_MATCH_ERROR;
3141                         break;
3142                 }
3143
3144                 splx(s);
3145
3146                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3147                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3148                 else
3149                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3150
3151                 break;
3152         }
3153         case XPT_SASYNC_CB:
3154         {
3155                 struct ccb_setasync *csa;
3156                 struct async_node *cur_entry;
3157                 struct async_list *async_head;
3158                 u_int32_t added;
3159                 int s;
3160
3161                 csa = &start_ccb->csa;
3162                 added = csa->event_enable;
3163                 async_head = &csa->ccb_h.path->device->asyncs;
3164
3165                 /*
3166                  * If there is already an entry for us, simply
3167                  * update it.
3168                  */
3169                 s = splcam();
3170                 cur_entry = SLIST_FIRST(async_head);
3171                 while (cur_entry != NULL) {
3172                         if ((cur_entry->callback_arg == csa->callback_arg)
3173                          && (cur_entry->callback == csa->callback))
3174                                 break;
3175                         cur_entry = SLIST_NEXT(cur_entry, links);
3176                 }
3177
3178                 if (cur_entry != NULL) {
3179                         /*
3180                          * If the request has no flags set,
3181                          * remove the entry.
3182                          */
3183                         added &= ~cur_entry->event_enable;
3184                         if (csa->event_enable == 0) {
3185                                 SLIST_REMOVE(async_head, cur_entry,
3186                                              async_node, links);
3187                                 csa->ccb_h.path->device->refcount--;
3188                                 free(cur_entry, M_DEVBUF);
3189                         } else {
3190                                 cur_entry->event_enable = csa->event_enable;
3191                         }
3192                 } else {
3193                         cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3194                                            M_NOWAIT);
3195                         if (cur_entry == NULL) {
3196                                 splx(s);
3197                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3198                                 break;
3199                         }
3200                         cur_entry->event_enable = csa->event_enable;
3201                         cur_entry->callback_arg = csa->callback_arg;
3202                         cur_entry->callback = csa->callback;
3203                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3204                         csa->ccb_h.path->device->refcount++;
3205                 }
3206
3207                 if ((added & AC_FOUND_DEVICE) != 0) {
3208                         /*
3209                          * Get this peripheral up to date with all
3210                          * the currently existing devices.
3211                          */
3212                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3213                 }
3214                 if ((added & AC_PATH_REGISTERED) != 0) {
3215                         /*
3216                          * Get this peripheral up to date with all
3217                          * the currently existing busses.
3218                          */
3219                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3220                 }
3221                 splx(s);
3222                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3223                 break;
3224         }
3225         case XPT_REL_SIMQ:
3226         {
3227                 struct ccb_relsim *crs;
3228                 struct cam_ed *dev;
3229                 int s;
3230
3231                 crs = &start_ccb->crs;
3232                 dev = crs->ccb_h.path->device;
3233                 if (dev == NULL) {
3234
3235                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3236                         break;
3237                 }
3238
3239                 s = splcam();
3240
3241                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3242
3243                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3244
3245                                 /* Don't ever go below one opening */
3246                                 if (crs->openings > 0) {
3247                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3248                                                             crs->openings);
3249
3250                                         if (bootverbose) {
3251                                                 xpt_print_path(crs->ccb_h.path);
3252                                                 printf("tagged openings "
3253                                                        "now %d\n",
3254                                                        crs->openings);
3255                                         }
3256                                 }
3257                         }
3258                 }
3259
3260                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3261
3262                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3263
3264                                 /*
3265                                  * Just extend the old timeout and decrement
3266                                  * the freeze count so that a single timeout
3267                                  * is sufficient for releasing the queue.
3268                                  */
3269                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3270                                 untimeout(xpt_release_devq_timeout,
3271                                           dev, dev->c_handle);
3272                         } else {
3273
3274                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3275                         }
3276
3277                         dev->c_handle =
3278                                 timeout(xpt_release_devq_timeout,
3279                                         dev,
3280                                         (crs->release_timeout * hz) / 1000);
3281
3282                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3283
3284                 }
3285
3286                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3287
3288                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3289                                 /*
3290                                  * Decrement the freeze count so that a single
3291                                  * completion is still sufficient to unfreeze
3292                                  * the queue.
3293                                  */
3294                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3295                         } else {
3296                                 
3297                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3298                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3299                         }
3300                 }
3301
3302                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3303
3304                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3305                          || (dev->ccbq.dev_active == 0)) {
3306
3307                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3308                         } else {
3309                                 
3310                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3311                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3312                         }
3313                 }
3314                 splx(s);
3315                 
3316                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3317
3318                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3319                                          /*run_queue*/TRUE);
3320                 }
3321                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3322                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3323                 break;
3324         }
3325         case XPT_SCAN_BUS:
3326                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3327                 break;
3328         case XPT_SCAN_LUN:
3329                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3330                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3331                              start_ccb);
3332                 break;
3333         case XPT_DEBUG: {
3334 #ifdef CAMDEBUG
3335                 int s;
3336                 
3337                 s = splcam();
3338 #ifdef CAM_DEBUG_DELAY
3339                 cam_debug_delay = CAM_DEBUG_DELAY;
3340 #endif
3341                 cam_dflags = start_ccb->cdbg.flags;
3342                 if (cam_dpath != NULL) {
3343                         xpt_free_path(cam_dpath);
3344                         cam_dpath = NULL;
3345                 }
3346
3347                 if (cam_dflags != CAM_DEBUG_NONE) {
3348                         if (xpt_create_path(&cam_dpath, xpt_periph,
3349                                             start_ccb->ccb_h.path_id,
3350                                             start_ccb->ccb_h.target_id,
3351                                             start_ccb->ccb_h.target_lun) !=
3352                                             CAM_REQ_CMP) {
3353                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3354                                 cam_dflags = CAM_DEBUG_NONE;
3355                         } else {
3356                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3357                                 xpt_print_path(cam_dpath);
3358                                 printf("debugging flags now %x\n", cam_dflags);
3359                         }
3360                 } else {
3361                         cam_dpath = NULL;
3362                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3363                 }
3364                 splx(s);
3365 #else /* !CAMDEBUG */
3366                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3367 #endif /* CAMDEBUG */
3368                 break;
3369         }
3370         case XPT_NOOP:
3371                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3372                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3373                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3374                 break;
3375         default:
3376         case XPT_SDEV_TYPE:
3377         case XPT_TERM_IO:
3378         case XPT_ENG_INQ:
3379                 /* XXX Implement */
3380                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3381                 break;
3382         }
3383         splx(iopl);
3384 }
3385
3386 void
3387 xpt_polled_action(union ccb *start_ccb)
3388 {
3389         int       s;
3390         u_int32_t timeout;
3391         struct    cam_sim *sim; 
3392         struct    cam_devq *devq;
3393         struct    cam_ed *dev;
3394
3395         timeout = start_ccb->ccb_h.timeout;
3396         sim = start_ccb->ccb_h.path->bus->sim;
3397         devq = sim->devq;
3398         dev = start_ccb->ccb_h.path->device;
3399
3400         s = splcam();
3401
3402         /*
3403          * Steal an opening so that no other queued requests
3404          * can get it before us while we simulate interrupts.
3405          */
3406         dev->ccbq.devq_openings--;
3407         dev->ccbq.dev_openings--;       
3408         
3409         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3410            && (--timeout > 0)) {
3411                 DELAY(1000);
3412                 (*(sim->sim_poll))(sim);
3413                 swi_camnet();
3414                 swi_cambio();           
3415         }
3416         
3417         dev->ccbq.devq_openings++;
3418         dev->ccbq.dev_openings++;
3419         
3420         if (timeout != 0) {
3421                 xpt_action(start_ccb);
3422                 while(--timeout > 0) {
3423                         (*(sim->sim_poll))(sim);
3424                         swi_camnet();
3425                         swi_cambio();
3426                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3427                             != CAM_REQ_INPROG)
3428                                 break;
3429                         DELAY(1000);
3430                 }
3431                 if (timeout == 0) {
3432                         /*
3433                          * XXX Is it worth adding a sim_timeout entry
3434                          * point so we can attempt recovery?  If
3435                          * this is only used for dumps, I don't think
3436                          * it is.
3437                          */
3438                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3439                 }
3440         } else {
3441                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3442         }
3443         splx(s);
3444 }
3445         
3446 /*
3447  * Schedule a peripheral driver to receive a ccb when it's
3448  * target device has space for more transactions.
3449  */
3450 void
3451 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3452 {
3453         struct cam_ed *device;
3454         int s;
3455         int runq;
3456
3457         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3458         device = perph->path->device;
3459         s = splsoftcam();
3460         if (periph_is_queued(perph)) {
3461                 /* Simply reorder based on new priority */
3462                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3463                           ("   change priority to %d\n", new_priority));
3464                 if (new_priority < perph->pinfo.priority) {
3465                         camq_change_priority(&device->drvq,
3466                                              perph->pinfo.index,
3467                                              new_priority);
3468                 }
3469                 runq = 0;
3470         } else {
3471                 /* New entry on the queue */
3472                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3473                           ("   added periph to queue\n"));
3474                 perph->pinfo.priority = new_priority;
3475                 perph->pinfo.generation = ++device->drvq.generation;
3476                 camq_insert(&device->drvq, &perph->pinfo);
3477                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3478         }
3479         splx(s);
3480         if (runq != 0) {
3481                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3482                           ("   calling xpt_run_devq\n"));
3483                 xpt_run_dev_allocq(perph->path->bus);
3484         }
3485 }
3486
3487
3488 /*
3489  * Schedule a device to run on a given queue.
3490  * If the device was inserted as a new entry on the queue,
3491  * return 1 meaning the device queue should be run. If we
3492  * were already queued, implying someone else has already
3493  * started the queue, return 0 so the caller doesn't attempt
3494  * to run the queue.  Must be run at either splsoftcam
3495  * (or splcam since that encompases splsoftcam).
3496  */
3497 static int
3498 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3499                  u_int32_t new_priority)
3500 {
3501         int retval;
3502         u_int32_t old_priority;
3503
3504         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3505
3506         old_priority = pinfo->priority;
3507
3508         /*
3509          * Are we already queued?
3510          */
3511         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3512                 /* Simply reorder based on new priority */
3513                 if (new_priority < old_priority) {
3514                         camq_change_priority(queue, pinfo->index,
3515                                              new_priority);
3516                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3517                                         ("changed priority to %d\n",
3518                                          new_priority));
3519                 }
3520                 retval = 0;
3521         } else {
3522                 /* New entry on the queue */
3523                 if (new_priority < old_priority)
3524                         pinfo->priority = new_priority;
3525
3526                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3527                                 ("Inserting onto queue\n"));
3528                 pinfo->generation = ++queue->generation;
3529                 camq_insert(queue, pinfo);
3530                 retval = 1;
3531         }
3532         return (retval);
3533 }
3534
3535 static void
3536 xpt_run_dev_allocq(struct cam_eb *bus)
3537 {
3538         struct  cam_devq *devq;
3539         int     s;
3540
3541         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3542         devq = bus->sim->devq;
3543
3544         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3545                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3546                          "openings == %d, active == %d\n",
3547                          devq->alloc_queue.qfrozen_cnt,
3548                          devq->alloc_queue.entries,
3549                          devq->alloc_openings,
3550                          devq->alloc_active));
3551
3552         s = splsoftcam();
3553         devq->alloc_queue.qfrozen_cnt++;
3554         while ((devq->alloc_queue.entries > 0)
3555             && (devq->alloc_openings > 0)
3556             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3557                 struct  cam_ed_qinfo *qinfo;
3558                 struct  cam_ed *device;
3559                 union   ccb *work_ccb;
3560                 struct  cam_periph *drv;
3561                 struct  camq *drvq;
3562                 
3563                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3564                                                            CAMQ_HEAD);
3565                 device = qinfo->device;
3566
3567                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3568                                 ("running device %p\n", device));
3569
3570                 drvq = &device->drvq;
3571
3572 #ifdef CAMDEBUG
3573                 if (drvq->entries <= 0) {
3574                         panic("xpt_run_dev_allocq: "
3575                               "Device on queue without any work to do");
3576                 }
3577 #endif
3578                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3579                         devq->alloc_openings--;
3580                         devq->alloc_active++;
3581                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3582                         splx(s);
3583                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3584                                       drv->pinfo.priority);
3585                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3586                                         ("calling periph start\n"));
3587                         drv->periph_start(drv, work_ccb);
3588                 } else {
3589                         /*
3590                          * Malloc failure in alloc_ccb
3591                          */
3592                         /*
3593                          * XXX add us to a list to be run from free_ccb
3594                          * if we don't have any ccbs active on this
3595                          * device queue otherwise we may never get run
3596                          * again.
3597                          */
3598                         break;
3599                 }
3600         
3601                 /* Raise IPL for possible insertion and test at top of loop */
3602                 s = splsoftcam();
3603
3604                 if (drvq->entries > 0) {
3605                         /* We have more work.  Attempt to reschedule */
3606                         xpt_schedule_dev_allocq(bus, device);
3607                 }
3608         }
3609         devq->alloc_queue.qfrozen_cnt--;
3610         splx(s);
3611 }
3612
3613 static void
3614 xpt_run_dev_sendq(struct cam_eb *bus)
3615 {
3616         struct  cam_devq *devq;
3617         int     s;
3618
3619         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3620         
3621         devq = bus->sim->devq;
3622
3623         s = splcam();
3624         devq->send_queue.qfrozen_cnt++;
3625         splx(s);
3626         s = splsoftcam();
3627         while ((devq->send_queue.entries > 0)
3628             && (devq->send_openings > 0)) {
3629                 struct  cam_ed_qinfo *qinfo;
3630                 struct  cam_ed *device;
3631                 union ccb *work_ccb;
3632                 struct  cam_sim *sim;
3633                 int     ospl;
3634
3635                 ospl = splcam();
3636                 if (devq->send_queue.qfrozen_cnt > 1) {
3637                         splx(ospl);
3638                         break;
3639                 }
3640
3641                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3642                                                            CAMQ_HEAD);
3643                 device = qinfo->device;
3644
3645                 /*
3646                  * If the device has been "frozen", don't attempt
3647                  * to run it.
3648                  */
3649                 if (device->qfrozen_cnt > 0) {
3650                         splx(ospl);
3651                         continue;
3652                 }
3653
3654                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3655                                 ("running device %p\n", device));
3656
3657                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3658                 if (work_ccb == NULL) {
3659                         printf("device on run queue with no ccbs???\n");
3660                         splx(ospl);
3661                         continue;
3662                 }
3663
3664                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3665
3666                         if (num_highpower <= 0) {
3667                                 /*
3668                                  * We got a high power command, but we
3669                                  * don't have any available slots.  Freeze
3670                                  * the device queue until we have a slot
3671                                  * available.
3672                                  */
3673                                 device->qfrozen_cnt++;
3674                                 STAILQ_INSERT_TAIL(&highpowerq, 
3675                                                    &work_ccb->ccb_h, 
3676                                                    xpt_links.stqe);
3677
3678                                 splx(ospl);
3679                                 continue;
3680                         } else {
3681                                 /*
3682                                  * Consume a high power slot while
3683                                  * this ccb runs.
3684                                  */
3685                                 num_highpower--;
3686                         }
3687                 }
3688                 devq->active_dev = device;
3689                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3690
3691                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3692                 splx(ospl);
3693
3694                 devq->send_openings--;
3695                 devq->send_active++;            
3696                 
3697                 if (device->ccbq.queue.entries > 0)
3698                         xpt_schedule_dev_sendq(bus, device);
3699
3700                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3701                         /*
3702                          * The client wants to freeze the queue
3703                          * after this CCB is sent.
3704                          */
3705                         ospl = splcam();
3706                         device->qfrozen_cnt++;
3707                         splx(ospl);
3708                 }
3709                 
3710                 splx(s);
3711
3712                 /* In Target mode, the peripheral driver knows best... */
3713                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3714                         if ((device->inq_flags & SID_CmdQue) != 0
3715                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3716                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3717                         else
3718                                 /*
3719                                  * Clear this in case of a retried CCB that
3720                                  * failed due to a rejected tag.
3721                                  */
3722                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3723                 }
3724
3725                 /*
3726                  * Device queues can be shared among multiple sim instances
3727                  * that reside on different busses.  Use the SIM in the queue
3728                  * CCB's path, rather than the one in the bus that was passed
3729                  * into this function.
3730                  */
3731                 sim = work_ccb->ccb_h.path->bus->sim;
3732                 (*(sim->sim_action))(sim, work_ccb);
3733
3734                 ospl = splcam();
3735                 devq->active_dev = NULL;
3736                 splx(ospl);
3737                 /* Raise IPL for possible insertion and test at top of loop */
3738                 s = splsoftcam();
3739         }
3740         splx(s);
3741         s = splcam();
3742         devq->send_queue.qfrozen_cnt--;
3743         splx(s);
3744 }
3745
3746 /*
3747  * This function merges stuff from the slave ccb into the master ccb, while
3748  * keeping important fields in the master ccb constant.
3749  */
3750 void
3751 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3752 {
3753         /*
3754          * Pull fields that are valid for peripheral drivers to set
3755          * into the master CCB along with the CCB "payload".
3756          */
3757         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3758         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3759         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3760         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3761         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3762               sizeof(union ccb) - sizeof(struct ccb_hdr));
3763 }
3764
3765 void
3766 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3767 {
3768         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3769         ccb_h->pinfo.priority = priority;
3770         ccb_h->path = path;
3771         ccb_h->path_id = path->bus->path_id;
3772         if (path->target)
3773                 ccb_h->target_id = path->target->target_id;
3774         else
3775                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3776         if (path->device) {
3777                 ccb_h->target_lun = path->device->lun_id;
3778                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3779         } else {
3780                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3781         }
3782         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3783         ccb_h->flags = 0;
3784 }
3785
3786 /* Path manipulation functions */
3787 cam_status
3788 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3789                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3790 {
3791         struct     cam_path *path;
3792         cam_status status;
3793
3794         path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3795
3796         if (path == NULL) {
3797                 status = CAM_RESRC_UNAVAIL;
3798                 return(status);
3799         }
3800         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3801         if (status != CAM_REQ_CMP) {
3802                 free(path, M_DEVBUF);
3803                 path = NULL;
3804         }
3805         *new_path_ptr = path;
3806         return (status);
3807 }
3808
3809 static cam_status
3810 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3811                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3812 {
3813         struct       cam_eb *bus;
3814         struct       cam_et *target;
3815         struct       cam_ed *device;
3816         cam_status   status;
3817         int          s;
3818
3819         status = CAM_REQ_CMP;   /* Completed without error */
3820         target = NULL;          /* Wildcarded */
3821         device = NULL;          /* Wildcarded */
3822
3823         /*
3824          * We will potentially modify the EDT, so block interrupts
3825          * that may attempt to create cam paths.
3826          */
3827         s = splcam();
3828         bus = xpt_find_bus(path_id);
3829         if (bus == NULL) {
3830                 status = CAM_PATH_INVALID;
3831         } else {
3832                 target = xpt_find_target(bus, target_id);
3833                 if (target == NULL) {
3834                         /* Create one */
3835                         struct cam_et *new_target;
3836
3837                         new_target = xpt_alloc_target(bus, target_id);
3838                         if (new_target == NULL) {
3839                                 status = CAM_RESRC_UNAVAIL;
3840                         } else {
3841                                 target = new_target;
3842                         }
3843                 }
3844                 if (target != NULL) {
3845                         device = xpt_find_device(target, lun_id);
3846                         if (device == NULL) {
3847                                 /* Create one */
3848                                 struct cam_ed *new_device;
3849
3850                                 new_device = xpt_alloc_device(bus,
3851                                                               target,
3852                                                               lun_id);
3853                                 if (new_device == NULL) {
3854                                         status = CAM_RESRC_UNAVAIL;
3855                                 } else {
3856                                         device = new_device;
3857                                 }
3858                         }
3859                 }
3860         }
3861         splx(s);
3862
3863         /*
3864          * Only touch the user's data if we are successful.
3865          */
3866         if (status == CAM_REQ_CMP) {
3867                 new_path->periph = perph;
3868                 new_path->bus = bus;
3869                 new_path->target = target;
3870                 new_path->device = device;
3871                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3872         } else {
3873                 if (device != NULL)
3874                         xpt_release_device(bus, target, device);
3875                 if (target != NULL)
3876                         xpt_release_target(bus, target);
3877                 if (bus != NULL)
3878                         xpt_release_bus(bus);
3879         }
3880         return (status);
3881 }
3882
3883 static void
3884 xpt_release_path(struct cam_path *path)
3885 {
3886         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3887         if (path->device != NULL) {
3888                 xpt_release_device(path->bus, path->target, path->device);
3889                 path->device = NULL;
3890         }
3891         if (path->target != NULL) {
3892                 xpt_release_target(path->bus, path->target);
3893                 path->target = NULL;
3894         }
3895         if (path->bus != NULL) {
3896                 xpt_release_bus(path->bus);
3897                 path->bus = NULL;
3898         }
3899 }
3900
3901 void
3902 xpt_free_path(struct cam_path *path)
3903 {
3904         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3905         xpt_release_path(path);
3906         free(path, M_DEVBUF);
3907 }
3908
3909
3910 /*
3911  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3912  * in path1, 2 for match with wildcards in path2.
3913  */
3914 int
3915 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3916 {
3917         int retval = 0;
3918
3919         if (path1->bus != path2->bus) {
3920                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3921                         retval = 1;
3922                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3923                         retval = 2;
3924                 else
3925                         return (-1);
3926         }
3927         if (path1->target != path2->target) {
3928                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3929                         if (retval == 0)
3930                                 retval = 1;
3931                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3932                         retval = 2;
3933                 else
3934                         return (-1);
3935         }
3936         if (path1->device != path2->device) {
3937                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3938                         if (retval == 0)
3939                                 retval = 1;
3940                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3941                         retval = 2;
3942                 else
3943                         return (-1);
3944         }
3945         return (retval);
3946 }
3947
3948 void
3949 xpt_print_path(struct cam_path *path)
3950 {
3951         if (path == NULL)
3952                 printf("(nopath): ");
3953         else {
3954                 if (path->periph != NULL)
3955                         printf("(%s%d:", path->periph->periph_name,
3956                                path->periph->unit_number);
3957                 else
3958                         printf("(noperiph:");
3959
3960                 if (path->bus != NULL)
3961                         printf("%s%d:%d:", path->bus->sim->sim_name,
3962                                path->bus->sim->unit_number,
3963                                path->bus->sim->bus_id);
3964                 else
3965                         printf("nobus:");
3966
3967                 if (path->target != NULL)
3968                         printf("%d:", path->target->target_id);
3969                 else
3970                         printf("X:");
3971
3972                 if (path->device != NULL)
3973                         printf("%d): ", path->device->lun_id);
3974                 else
3975                         printf("X): ");
3976         }
3977 }
3978
3979 path_id_t
3980 xpt_path_path_id(struct cam_path *path)
3981 {
3982         return(path->bus->path_id);
3983 }
3984
3985 target_id_t
3986 xpt_path_target_id(struct cam_path *path)
3987 {
3988         if (path->target != NULL)
3989                 return (path->target->target_id);
3990         else
3991                 return (CAM_TARGET_WILDCARD);
3992 }
3993
3994 lun_id_t
3995 xpt_path_lun_id(struct cam_path *path)
3996 {
3997         if (path->device != NULL)
3998                 return (path->device->lun_id);
3999         else
4000                 return (CAM_LUN_WILDCARD);
4001 }
4002
4003 struct cam_sim *
4004 xpt_path_sim(struct cam_path *path)
4005 {
4006         return (path->bus->sim);
4007 }
4008
4009 struct cam_periph*
4010 xpt_path_periph(struct cam_path *path)
4011 {
4012         return (path->periph);
4013 }
4014
4015 /*
4016  * Release a CAM control block for the caller.  Remit the cost of the structure
4017  * to the device referenced by the path.  If the this device had no 'credits'
4018  * and peripheral drivers have registered async callbacks for this notification
4019  * call them now.
4020  */
4021 void
4022 xpt_release_ccb(union ccb *free_ccb)
4023 {
4024         int      s;
4025         struct   cam_path *path;
4026         struct   cam_ed *device;
4027         struct   cam_eb *bus;
4028
4029         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4030         path = free_ccb->ccb_h.path;
4031         device = path->device;
4032         bus = path->bus;
4033         s = splsoftcam();
4034         cam_ccbq_release_opening(&device->ccbq);
4035         if (xpt_ccb_count > xpt_max_ccbs) {
4036                 xpt_free_ccb(free_ccb);
4037                 xpt_ccb_count--;
4038         } else {
4039                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4040         }
4041         bus->sim->devq->alloc_openings++;
4042         bus->sim->devq->alloc_active--;
4043         /* XXX Turn this into an inline function - xpt_run_device?? */
4044         if ((device_is_alloc_queued(device) == 0)
4045          && (device->drvq.entries > 0)) {
4046                 xpt_schedule_dev_allocq(bus, device);
4047         }
4048         splx(s);
4049         if (dev_allocq_is_runnable(bus->sim->devq))
4050                 xpt_run_dev_allocq(bus);
4051 }
4052
4053 /* Functions accessed by SIM drivers */
4054
4055 /*
4056  * A sim structure, listing the SIM entry points and instance
4057  * identification info is passed to xpt_bus_register to hook the SIM
4058  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4059  * for this new bus and places it in the array of busses and assigns
4060  * it a path_id.  The path_id may be influenced by "hard wiring"
4061  * information specified by the user.  Once interrupt services are
4062  * availible, the bus will be probed.
4063  */
4064 int32_t
4065 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4066 {
4067         struct cam_eb *new_bus;
4068         struct cam_eb *old_bus;
4069         struct ccb_pathinq cpi;
4070         int s;
4071
4072         sim->bus_id = bus;
4073         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4074                                           M_DEVBUF, M_NOWAIT);
4075         if (new_bus == NULL) {
4076                 /* Couldn't satisfy request */
4077                 return (CAM_RESRC_UNAVAIL);
4078         }
4079
4080         if (strcmp(sim->sim_name, "xpt") != 0) {
4081
4082                 sim->path_id =
4083                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4084         }
4085
4086         TAILQ_INIT(&new_bus->et_entries);
4087         new_bus->path_id = sim->path_id;
4088         new_bus->sim = sim;
4089         timevalclear(&new_bus->last_reset);
4090         new_bus->flags = 0;
4091         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4092         new_bus->generation = 0;
4093         s = splcam();
4094         old_bus = TAILQ_FIRST(&xpt_busses);
4095         while (old_bus != NULL
4096             && old_bus->path_id < new_bus->path_id)
4097                 old_bus = TAILQ_NEXT(old_bus, links);
4098         if (old_bus != NULL)
4099                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4100         else
4101                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4102         bus_generation++;
4103         splx(s);
4104
4105         /* Notify interested parties */
4106         if (sim->path_id != CAM_XPT_PATH_ID) {
4107                 struct cam_path path;
4108
4109                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4110                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4111                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4112                 cpi.ccb_h.func_code = XPT_PATH_INQ;
4113                 xpt_action((union ccb *)&cpi);
4114                 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
4115                 xpt_release_path(&path);
4116         }
4117         return (CAM_SUCCESS);
4118 }
4119
4120 int32_t
4121 xpt_bus_deregister(path_id_t pathid)
4122 {
4123         struct cam_path bus_path;
4124         cam_status status;
4125
4126         status = xpt_compile_path(&bus_path, NULL, pathid,
4127                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4128         if (status != CAM_REQ_CMP)
4129                 return (status);
4130
4131         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4132         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4133         
4134         /* Release the reference count held while registered. */
4135         xpt_release_bus(bus_path.bus);
4136         xpt_release_path(&bus_path);
4137
4138         return (CAM_REQ_CMP);
4139 }
4140
4141 static path_id_t
4142 xptnextfreepathid(void)
4143 {
4144         struct cam_eb *bus;
4145         path_id_t pathid;
4146         char *strval;
4147
4148         pathid = 0;
4149         bus = TAILQ_FIRST(&xpt_busses);
4150 retry:
4151         /* Find an unoccupied pathid */
4152         while (bus != NULL
4153             && bus->path_id <= pathid) {
4154                 if (bus->path_id == pathid)
4155                         pathid++;
4156                 bus = TAILQ_NEXT(bus, links);
4157         }
4158
4159         /*
4160          * Ensure that this pathid is not reserved for
4161          * a bus that may be registered in the future.
4162          */
4163         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4164                 ++pathid;
4165                 /* Start the search over */
4166                 goto retry;
4167         }
4168         return (pathid);
4169 }
4170
4171 static path_id_t
4172 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4173 {
4174         path_id_t pathid;
4175         int i, dunit, val;
4176         char buf[32], *strval;
4177
4178         pathid = CAM_XPT_PATH_ID;
4179         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4180         i = -1;
4181         while ((i = resource_locate(i, "scbus")) != -1) {
4182                 dunit = resource_query_unit(i);
4183                 if (dunit < 0)          /* unwired?! */
4184                         continue;
4185                 if (resource_string_value("scbus", dunit, "at", &strval) != 0)
4186                         continue;
4187                 if (strcmp(buf, strval) != 0)
4188                         continue;
4189                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4190                         if (sim_bus == val) {
4191                                 pathid = dunit;
4192                                 break;
4193                         }
4194                 } else if (sim_bus == 0) {
4195                         /* Unspecified matches bus 0 */
4196                         pathid = dunit;
4197                         break;
4198                 } else {
4199                         printf("Ambiguous scbus configuration for %s%d "
4200                                "bus %d, cannot wire down.  The kernel "
4201                                "config entry for scbus%d should "
4202                                "specify a controller bus.\n"
4203                                "Scbus will be assigned dynamically.\n",
4204                                sim_name, sim_unit, sim_bus, dunit);
4205                         break;
4206                 }
4207         }
4208
4209         if (pathid == CAM_XPT_PATH_ID)
4210                 pathid = xptnextfreepathid();
4211         return (pathid);
4212 }
4213
4214 void
4215 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4216 {
4217         struct cam_eb *bus;
4218         struct cam_et *target, *next_target;
4219         struct cam_ed *device, *next_device;
4220         int s;
4221
4222         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4223
4224         /*
4225          * Most async events come from a CAM interrupt context.  In
4226          * a few cases, the error recovery code at the peripheral layer,
4227          * which may run from our SWI or a process context, may signal
4228          * deferred events with a call to xpt_async. Ensure async
4229          * notifications are serialized by blocking cam interrupts.
4230          */
4231         s = splcam();
4232
4233         bus = path->bus;
4234
4235         if (async_code == AC_BUS_RESET) { 
4236                 int s;
4237
4238                 s = splclock();
4239                 /* Update our notion of when the last reset occurred */
4240                 microtime(&bus->last_reset);
4241                 splx(s);
4242         }
4243
4244         for (target = TAILQ_FIRST(&bus->et_entries);
4245              target != NULL;
4246              target = next_target) {
4247
4248                 next_target = TAILQ_NEXT(target, links);
4249
4250                 if (path->target != target
4251                  && path->target->target_id != CAM_TARGET_WILDCARD
4252                  && target->target_id != CAM_TARGET_WILDCARD)
4253                         continue;
4254
4255                 if (async_code == AC_SENT_BDR) {
4256                         int s;
4257
4258                         /* Update our notion of when the last reset occurred */
4259                         s = splclock();
4260                         microtime(&path->target->last_reset);
4261                         splx(s);
4262                 }
4263
4264                 for (device = TAILQ_FIRST(&target->ed_entries);
4265                      device != NULL;
4266                      device = next_device) {
4267
4268                         next_device = TAILQ_NEXT(device, links);
4269
4270                         if (path->device != device 
4271                          && path->device->lun_id != CAM_LUN_WILDCARD
4272                          && device->lun_id != CAM_LUN_WILDCARD)
4273                                 continue;
4274
4275                         xpt_dev_async(async_code, bus, target,
4276                                       device, async_arg);
4277
4278                         xpt_async_bcast(&device->asyncs, async_code,
4279                                         path, async_arg);
4280                 }
4281         }
4282         
4283         /*
4284          * If this wasn't a fully wildcarded async, tell all
4285          * clients that want all async events.
4286          */
4287         if (bus != xpt_periph->path->bus)
4288                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4289                                 path, async_arg);
4290         splx(s);
4291 }
4292
4293 static void
4294 xpt_async_bcast(struct async_list *async_head,
4295                 u_int32_t async_code,
4296                 struct cam_path *path, void *async_arg)
4297 {
4298         struct async_node *cur_entry;
4299
4300         cur_entry = SLIST_FIRST(async_head);
4301         while (cur_entry != NULL) {
4302                 struct async_node *next_entry;
4303                 /*
4304                  * Grab the next list entry before we call the current
4305                  * entry's callback.  This is because the callback function
4306                  * can delete its async callback entry.
4307                  */
4308                 next_entry = SLIST_NEXT(cur_entry, links);
4309                 if ((cur_entry->event_enable & async_code) != 0)
4310                         cur_entry->callback(cur_entry->callback_arg,
4311                                             async_code, path,
4312                                             async_arg);
4313                 cur_entry = next_entry;
4314         }
4315 }
4316
4317 /*
4318  * Handle any per-device event notifications that require action by the XPT.
4319  */
4320 static void
4321 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4322               struct cam_ed *device, void *async_arg)
4323 {
4324         cam_status status;
4325         struct cam_path newpath;
4326
4327         /*
4328          * We only need to handle events for real devices.
4329          */
4330         if (target->target_id == CAM_TARGET_WILDCARD
4331          || device->lun_id == CAM_LUN_WILDCARD)
4332                 return;
4333
4334         /*
4335          * We need our own path with wildcards expanded to
4336          * handle certain types of events.
4337          */
4338         if ((async_code == AC_SENT_BDR)
4339          || (async_code == AC_BUS_RESET)
4340          || (async_code == AC_INQ_CHANGED))
4341                 status = xpt_compile_path(&newpath, NULL,
4342                                           bus->path_id,
4343                                           target->target_id,
4344                                           device->lun_id);
4345         else
4346                 status = CAM_REQ_CMP_ERR;
4347
4348         if (status == CAM_REQ_CMP) {
4349
4350                 /*
4351                  * Allow transfer negotiation to occur in a
4352                  * tag free environment.
4353                  */
4354                 if (async_code == AC_SENT_BDR
4355                  || async_code == AC_BUS_RESET)
4356                         xpt_toggle_tags(&newpath);
4357
4358                 if (async_code == AC_INQ_CHANGED) {
4359                         /*
4360                          * We've sent a start unit command, or
4361                          * something similar to a device that
4362                          * may have caused its inquiry data to
4363                          * change. So we re-scan the device to
4364                          * refresh the inquiry data for it.
4365                          */
4366                         xpt_scan_lun(newpath.periph, &newpath,
4367                                      CAM_EXPECT_INQ_CHANGE, NULL);
4368                 }
4369                 xpt_release_path(&newpath);
4370         } else if (async_code == AC_LOST_DEVICE) {
4371                 device->flags |= CAM_DEV_UNCONFIGURED;
4372         } else if (async_code == AC_TRANSFER_NEG) {
4373                 struct ccb_trans_settings *settings;
4374
4375                 settings = (struct ccb_trans_settings *)async_arg;
4376                 xpt_set_transfer_settings(settings, device,
4377                                           /*async_update*/TRUE);
4378         }
4379 }
4380
4381 u_int32_t
4382 xpt_freeze_devq(struct cam_path *path, u_int count)
4383 {
4384         int s;
4385         struct ccb_hdr *ccbh;
4386
4387         s = splcam();
4388         path->device->qfrozen_cnt += count;
4389
4390         /*
4391          * Mark the last CCB in the queue as needing
4392          * to be requeued if the driver hasn't
4393          * changed it's state yet.  This fixes a race
4394          * where a ccb is just about to be queued to
4395          * a controller driver when it's interrupt routine
4396          * freezes the queue.  To completly close the
4397          * hole, controller drives must check to see
4398          * if a ccb's status is still CAM_REQ_INPROG
4399          * under spl protection just before they queue
4400          * the CCB.  See ahc_action/ahc_freeze_devq for
4401          * an example.
4402          */
4403         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4404         if (ccbh && ccbh->status == CAM_REQ_INPROG)
4405                 ccbh->status = CAM_REQUEUE_REQ;
4406         splx(s);
4407         return (path->device->qfrozen_cnt);
4408 }
4409
4410 u_int32_t
4411 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4412 {
4413         sim->devq->send_queue.qfrozen_cnt += count;
4414         if (sim->devq->active_dev != NULL) {
4415                 struct ccb_hdr *ccbh;
4416                 
4417                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4418                                   ccb_hdr_tailq);
4419                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4420                         ccbh->status = CAM_REQUEUE_REQ;
4421         }
4422         return (sim->devq->send_queue.qfrozen_cnt);
4423 }
4424
4425 static void
4426 xpt_release_devq_timeout(void *arg)
4427 {
4428         struct cam_ed *device;
4429
4430         device = (struct cam_ed *)arg;
4431
4432         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4433 }
4434
4435 void
4436 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4437 {
4438         xpt_release_devq_device(path->device, count, run_queue);
4439 }
4440
4441 static void
4442 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4443 {
4444         int     rundevq;
4445         int     s0, s1;
4446
4447         rundevq = 0;
4448         s0 = splsoftcam();
4449         s1 = splcam();
4450         if (dev->qfrozen_cnt > 0) {
4451
4452                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4453                 dev->qfrozen_cnt -= count;
4454                 if (dev->qfrozen_cnt == 0) {
4455
4456                         /*
4457                          * No longer need to wait for a successful
4458                          * command completion.
4459                          */
4460                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4461
4462                         /*
4463                          * Remove any timeouts that might be scheduled
4464                          * to release this queue.
4465                          */
4466                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4467                                 untimeout(xpt_release_devq_timeout, dev,
4468                                           dev->c_handle);
4469                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4470                         }
4471
4472                         /*
4473                          * Now that we are unfrozen schedule the
4474                          * device so any pending transactions are
4475                          * run.
4476                          */
4477                         if ((dev->ccbq.queue.entries > 0)
4478                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4479                          && (run_queue != 0)) {
4480                                 rundevq = 1;
4481                         }
4482                 }
4483         }
4484         splx(s1);
4485         if (rundevq != 0)
4486                 xpt_run_dev_sendq(dev->target->bus);
4487         splx(s0);
4488 }
4489
4490 void
4491 xpt_release_simq(struct cam_sim *sim, int run_queue)
4492 {
4493         int     s;
4494         struct  camq *sendq;
4495
4496         sendq = &(sim->devq->send_queue);
4497         s = splcam();
4498         if (sendq->qfrozen_cnt > 0) {
4499
4500                 sendq->qfrozen_cnt--;
4501                 if (sendq->qfrozen_cnt == 0) {
4502                         struct cam_eb *bus;
4503
4504                         /*
4505                          * If there is a timeout scheduled to release this
4506                          * sim queue, remove it.  The queue frozen count is
4507                          * already at 0.
4508                          */
4509                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4510                                 untimeout(xpt_release_simq_timeout, sim,
4511                                           sim->c_handle);
4512                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4513                         }
4514                         bus = xpt_find_bus(sim->path_id);
4515                         splx(s);
4516
4517                         if (run_queue) {
4518                                 /*
4519                                  * Now that we are unfrozen run the send queue.
4520                                  */
4521                                 xpt_run_dev_sendq(bus);
4522                         }
4523                         xpt_release_bus(bus);
4524                 } else
4525                         splx(s);
4526         } else
4527                 splx(s);
4528 }
4529
4530 static void
4531 xpt_release_simq_timeout(void *arg)
4532 {
4533         struct cam_sim *sim;
4534
4535         sim = (struct cam_sim *)arg;
4536         xpt_release_simq(sim, /* run_queue */ TRUE);
4537 }
4538
4539 void
4540 xpt_done(union ccb *done_ccb)
4541 {
4542         int s;
4543
4544         s = splcam();
4545
4546         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4547         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4548                 /*
4549                  * Queue up the request for handling by our SWI handler
4550                  * any of the "non-immediate" type of ccbs.
4551                  */
4552                 switch (done_ccb->ccb_h.path->periph->type) {
4553                 case CAM_PERIPH_BIO:
4554                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4555                                           sim_links.tqe);
4556                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4557                         setsoftcambio();
4558                         break;
4559                 case CAM_PERIPH_NET:
4560                         TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4561                                           sim_links.tqe);
4562                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4563                         setsoftcamnet();
4564                         break;
4565                 }
4566         }
4567         splx(s);
4568 }
4569
4570 union ccb *
4571 xpt_alloc_ccb()
4572 {
4573         union ccb *new_ccb;
4574
4575         new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4576         return (new_ccb);
4577 }
4578
4579 void
4580 xpt_free_ccb(union ccb *free_ccb)
4581 {
4582         free(free_ccb, M_DEVBUF);
4583 }
4584
4585
4586
4587 /* Private XPT functions */
4588
4589 /*
4590  * Get a CAM control block for the caller. Charge the structure to the device
4591  * referenced by the path.  If the this device has no 'credits' then the
4592  * device already has the maximum number of outstanding operations under way
4593  * and we return NULL. If we don't have sufficient resources to allocate more
4594  * ccbs, we also return NULL.
4595  */
4596 static union ccb *
4597 xpt_get_ccb(struct cam_ed *device)
4598 {
4599         union ccb *new_ccb;
4600         int s;
4601
4602         s = splsoftcam();
4603         if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4604                 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4605                 if (new_ccb == NULL) {
4606                         splx(s);
4607                         return (NULL);
4608                 }
4609                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4610                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4611                                   xpt_links.sle);
4612                 xpt_ccb_count++;
4613         }
4614         cam_ccbq_take_opening(&device->ccbq);
4615         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4616         splx(s);
4617         return (new_ccb);
4618 }
4619
4620 static void
4621 xpt_release_bus(struct cam_eb *bus)
4622 {
4623         int s;
4624
4625         s = splcam();
4626         if ((--bus->refcount == 0)
4627          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4628                 TAILQ_REMOVE(&xpt_busses, bus, links);
4629                 bus_generation++;
4630                 splx(s);
4631                 free(bus, M_DEVBUF);
4632         } else
4633                 splx(s);
4634 }
4635
4636 static struct cam_et *
4637 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4638 {
4639         struct cam_et *target;
4640
4641         target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4642         if (target != NULL) {
4643                 struct cam_et *cur_target;
4644
4645                 TAILQ_INIT(&target->ed_entries);
4646                 target->bus = bus;
4647                 target->target_id = target_id;
4648                 target->refcount = 1;
4649                 target->generation = 0;
4650                 timevalclear(&target->last_reset);
4651                 /*
4652                  * Hold a reference to our parent bus so it
4653                  * will not go away before we do.
4654                  */
4655                 bus->refcount++;
4656
4657                 /* Insertion sort into our bus's target list */
4658                 cur_target = TAILQ_FIRST(&bus->et_entries);
4659                 while (cur_target != NULL && cur_target->target_id < target_id)
4660                         cur_target = TAILQ_NEXT(cur_target, links);
4661
4662                 if (cur_target != NULL) {
4663                         TAILQ_INSERT_BEFORE(cur_target, target, links);
4664                 } else {
4665                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4666                 }
4667                 bus->generation++;
4668         }
4669         return (target);
4670 }
4671
4672 static void
4673 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4674 {
4675         int s;
4676
4677         s = splcam();
4678         if ((--target->refcount == 0)
4679          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4680                 TAILQ_REMOVE(&bus->et_entries, target, links);
4681                 bus->generation++;
4682                 splx(s);
4683                 free(target, M_DEVBUF);
4684                 xpt_release_bus(bus);
4685         } else
4686                 splx(s);
4687 }
4688
4689 static struct cam_ed *
4690 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4691 {
4692         struct     cam_ed *device;
4693         struct     cam_devq *devq;
4694         cam_status status;
4695
4696         /* Make space for us in the device queue on our bus */
4697         devq = bus->sim->devq;
4698         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4699
4700         if (status != CAM_REQ_CMP) {
4701                 device = NULL;
4702         } else {
4703                 device = (struct cam_ed *)malloc(sizeof(*device),
4704                                                  M_DEVBUF, M_NOWAIT);
4705         }
4706
4707         if (device != NULL) {
4708                 struct cam_ed *cur_device;
4709
4710                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4711                 device->alloc_ccb_entry.device = device;
4712                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4713                 device->send_ccb_entry.device = device;
4714                 device->target = target;
4715                 device->lun_id = lun_id;
4716                 /* Initialize our queues */
4717                 if (camq_init(&device->drvq, 0) != 0) {
4718                         free(device, M_DEVBUF);
4719                         return (NULL);
4720                 }
4721                 if (cam_ccbq_init(&device->ccbq,
4722                                   bus->sim->max_dev_openings) != 0) {
4723                         camq_fini(&device->drvq);
4724                         free(device, M_DEVBUF);
4725                         return (NULL);
4726                 }
4727                 SLIST_INIT(&device->asyncs);
4728                 SLIST_INIT(&device->periphs);
4729                 device->generation = 0;
4730                 device->owner = NULL;
4731                 /*
4732                  * Take the default quirk entry until we have inquiry
4733                  * data and can determine a better quirk to use.
4734                  */
4735                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4736                 bzero(&device->inq_data, sizeof(device->inq_data));
4737                 device->inq_flags = 0;
4738                 device->queue_flags = 0;
4739                 device->serial_num = NULL;
4740                 device->serial_num_len = 0;
4741                 device->qfrozen_cnt = 0;
4742                 device->flags = CAM_DEV_UNCONFIGURED;
4743                 device->tag_delay_count = 0;
4744                 device->refcount = 1;
4745                 callout_handle_init(&device->c_handle);
4746
4747                 /*
4748                  * Hold a reference to our parent target so it
4749                  * will not go away before we do.
4750                  */
4751                 target->refcount++;
4752
4753                 /*
4754                  * XXX should be limited by number of CCBs this bus can
4755                  * do.
4756                  */
4757                 xpt_max_ccbs += device->ccbq.devq_openings;
4758                 /* Insertion sort into our target's device list */
4759                 cur_device = TAILQ_FIRST(&target->ed_entries);
4760                 while (cur_device != NULL && cur_device->lun_id < lun_id)
4761                         cur_device = TAILQ_NEXT(cur_device, links);
4762                 if (cur_device != NULL) {
4763                         TAILQ_INSERT_BEFORE(cur_device, device, links);
4764                 } else {
4765                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4766                 }
4767                 target->generation++;
4768         }
4769         return (device);
4770 }
4771
4772 static void
4773 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4774                    struct cam_ed *device)
4775 {
4776         int s;
4777
4778         s = splcam();
4779         if ((--device->refcount == 0)
4780          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4781                 struct cam_devq *devq;
4782
4783                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4784                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4785                         panic("Removing device while still queued for ccbs");
4786
4787                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4788                                 untimeout(xpt_release_devq_timeout, device,
4789                                           device->c_handle);
4790
4791                 TAILQ_REMOVE(&target->ed_entries, device,links);
4792                 target->generation++;
4793                 xpt_max_ccbs -= device->ccbq.devq_openings;
4794                 /* Release our slot in the devq */
4795                 devq = bus->sim->devq;
4796                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4797                 splx(s);
4798                 free(device, M_DEVBUF);
4799                 xpt_release_target(bus, target);
4800         } else
4801                 splx(s);
4802 }
4803
4804 static u_int32_t
4805 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4806 {
4807         int     s;
4808         int     diff;
4809         int     result;
4810         struct  cam_ed *dev;
4811
4812         dev = path->device;
4813         s = splsoftcam();
4814
4815         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4816         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4817         if (result == CAM_REQ_CMP && (diff < 0)) {
4818                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4819         }
4820         /* Adjust the global limit */
4821         xpt_max_ccbs += diff;
4822         splx(s);
4823         return (result);
4824 }
4825
4826 static struct cam_eb *
4827 xpt_find_bus(path_id_t path_id)
4828 {
4829         struct cam_eb *bus;
4830
4831         for (bus = TAILQ_FIRST(&xpt_busses);
4832              bus != NULL;
4833              bus = TAILQ_NEXT(bus, links)) {
4834                 if (bus->path_id == path_id) {
4835                         bus->refcount++;
4836                         break;
4837                 }
4838         }
4839         return (bus);
4840 }
4841
4842 static struct cam_et *
4843 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4844 {
4845         struct cam_et *target;
4846
4847         for (target = TAILQ_FIRST(&bus->et_entries);
4848              target != NULL;
4849              target = TAILQ_NEXT(target, links)) {
4850                 if (target->target_id == target_id) {
4851                         target->refcount++;
4852                         break;
4853                 }
4854         }
4855         return (target);
4856 }
4857
4858 static struct cam_ed *
4859 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4860 {
4861         struct cam_ed *device;
4862
4863         for (device = TAILQ_FIRST(&target->ed_entries);
4864              device != NULL;
4865              device = TAILQ_NEXT(device, links)) {
4866                 if (device->lun_id == lun_id) {
4867                         device->refcount++;
4868                         break;
4869                 }
4870         }
4871         return (device);
4872 }
4873
4874 typedef struct {
4875         union   ccb *request_ccb;
4876         struct  ccb_pathinq *cpi;
4877         int     pending_count;
4878 } xpt_scan_bus_info;
4879
4880 /*
4881  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4882  * As the scan progresses, xpt_scan_bus is used as the
4883  * callback on completion function.
4884  */
4885 static void
4886 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4887 {
4888         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4889                   ("xpt_scan_bus\n"));
4890         switch (request_ccb->ccb_h.func_code) {
4891         case XPT_SCAN_BUS:
4892         {
4893                 xpt_scan_bus_info *scan_info;
4894                 union   ccb *work_ccb;
4895                 struct  cam_path *path;
4896                 u_int   i;
4897                 u_int   max_target;
4898                 u_int   initiator_id;
4899
4900                 /* Find out the characteristics of the bus */
4901                 work_ccb = xpt_alloc_ccb();
4902                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4903                               request_ccb->ccb_h.pinfo.priority);
4904                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4905                 xpt_action(work_ccb);
4906                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4907                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4908                         xpt_free_ccb(work_ccb);
4909                         xpt_done(request_ccb);
4910                         return;
4911                 }
4912
4913                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4914                         /*
4915                          * Can't scan the bus on an adapter that
4916                          * cannot perform the initiator role.
4917                          */
4918                         request_ccb->ccb_h.status = CAM_REQ_CMP;
4919                         xpt_free_ccb(work_ccb);
4920                         xpt_done(request_ccb);
4921                         return;
4922                 }
4923
4924                 /* Save some state for use while we probe for devices */
4925                 scan_info = (xpt_scan_bus_info *)
4926                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4927                 scan_info->request_ccb = request_ccb;
4928                 scan_info->cpi = &work_ccb->cpi;
4929
4930                 /* Cache on our stack so we can work asynchronously */
4931                 max_target = scan_info->cpi->max_target;
4932                 initiator_id = scan_info->cpi->initiator_id;
4933
4934                 /*
4935                  * Don't count the initiator if the
4936                  * initiator is addressable.
4937                  */
4938                 scan_info->pending_count = max_target + 1;
4939                 if (initiator_id <= max_target)
4940                         scan_info->pending_count--;
4941
4942                 for (i = 0; i <= max_target; i++) {
4943                         cam_status status;
4944                         if (i == initiator_id)
4945                                 continue;
4946
4947                         status = xpt_create_path(&path, xpt_periph,
4948                                                  request_ccb->ccb_h.path_id,
4949                                                  i, 0);
4950                         if (status != CAM_REQ_CMP) {
4951                                 printf("xpt_scan_bus: xpt_create_path failed"
4952                                        " with status %#x, bus scan halted\n",
4953                                        status);
4954                                 break;
4955                         }
4956                         work_ccb = xpt_alloc_ccb();
4957                         xpt_setup_ccb(&work_ccb->ccb_h, path,
4958                                       request_ccb->ccb_h.pinfo.priority);
4959                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4960                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4961                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4962                         work_ccb->crcn.flags = request_ccb->crcn.flags;
4963 #if 0
4964                         printf("xpt_scan_bus: probing %d:%d:%d\n",
4965                                 request_ccb->ccb_h.path_id, i, 0);
4966 #endif
4967                         xpt_action(work_ccb);
4968                 }
4969                 break;
4970         }
4971         case XPT_SCAN_LUN:
4972         {
4973                 xpt_scan_bus_info *scan_info;
4974                 path_id_t path_id;
4975                 target_id_t target_id;
4976                 lun_id_t lun_id;
4977
4978                 /* Reuse the same CCB to query if a device was really found */
4979                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4980                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4981                               request_ccb->ccb_h.pinfo.priority);
4982                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4983
4984                 path_id = request_ccb->ccb_h.path_id;
4985                 target_id = request_ccb->ccb_h.target_id;
4986                 lun_id = request_ccb->ccb_h.target_lun;
4987                 xpt_action(request_ccb);
4988
4989 #if 0
4990                 printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4991                         path_id, target_id, lun_id);
4992 #endif
4993
4994                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4995                         struct cam_ed *device;
4996                         struct cam_et *target;
4997                         int s, phl;
4998
4999                         /*
5000                          * If we already probed lun 0 successfully, or
5001                          * we have additional configured luns on this
5002                          * target that might have "gone away", go onto
5003                          * the next lun.
5004                          */
5005                         target = request_ccb->ccb_h.path->target;
5006                         /*
5007                          * We may touch devices that we don't
5008                          * hold references too, so ensure they
5009                          * don't disappear out from under us.
5010                          * The target above is referenced by the
5011                          * path in the request ccb.
5012                          */
5013                         phl = 0;
5014                         s = splcam();
5015                         device = TAILQ_FIRST(&target->ed_entries);
5016                         if (device != NULL) {
5017                                 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
5018                                 if (device->lun_id == 0)
5019                                         device = TAILQ_NEXT(device, links);
5020                         }
5021                         splx(s);
5022                         if ((lun_id != 0) || (device != NULL)) {
5023                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5024                                         lun_id++;
5025                         }
5026                 } else {
5027                         struct cam_ed *device;
5028                         
5029                         device = request_ccb->ccb_h.path->device;
5030
5031                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5032                                 /* Try the next lun */
5033                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
5034                                     (device->quirk->quirks & CAM_QUIRK_HILUNS))
5035                                         lun_id++;
5036                         }
5037                 }
5038
5039                 xpt_free_path(request_ccb->ccb_h.path);
5040
5041                 /* Check Bounds */
5042                 if ((lun_id == request_ccb->ccb_h.target_lun)
5043                  || lun_id > scan_info->cpi->max_lun) {
5044                         /* We're done */
5045
5046                         xpt_free_ccb(request_ccb);
5047                         scan_info->pending_count--;
5048                         if (scan_info->pending_count == 0) {
5049                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5050                                 request_ccb = scan_info->request_ccb;
5051                                 free(scan_info, M_TEMP);
5052                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
5053                                 xpt_done(request_ccb);
5054                         }
5055                 } else {
5056                         /* Try the next device */
5057                         struct cam_path *path;
5058                         cam_status status;
5059
5060                         path = request_ccb->ccb_h.path;
5061                         status = xpt_create_path(&path, xpt_periph,
5062                                                  path_id, target_id, lun_id);
5063                         if (status != CAM_REQ_CMP) {
5064                                 printf("xpt_scan_bus: xpt_create_path failed "
5065                                        "with status %#x, halting LUN scan\n",
5066                                        status);
5067                                 xpt_free_ccb(request_ccb);
5068                                 scan_info->pending_count--;
5069                                 if (scan_info->pending_count == 0) {
5070                                         xpt_free_ccb(
5071                                                 (union ccb *)scan_info->cpi);
5072                                         request_ccb = scan_info->request_ccb;
5073                                         free(scan_info, M_TEMP);
5074                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5075                                         xpt_done(request_ccb);
5076                                         break;
5077                                 }
5078                         }
5079                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5080                                       request_ccb->ccb_h.pinfo.priority);
5081                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5082                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5083                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5084                         request_ccb->crcn.flags =
5085                                 scan_info->request_ccb->crcn.flags;
5086 #if 0
5087                         xpt_print_path(path);
5088                         printf("xpt_scan bus probing\n");
5089 #endif
5090                         xpt_action(request_ccb);
5091                 }
5092                 break;
5093         }
5094         default:
5095                 break;
5096         }
5097 }
5098
5099 typedef enum {
5100         PROBE_TUR,
5101         PROBE_INQUIRY,
5102         PROBE_FULL_INQUIRY,
5103         PROBE_MODE_SENSE,
5104         PROBE_SERIAL_NUM,
5105         PROBE_TUR_FOR_NEGOTIATION
5106 } probe_action;
5107
5108 typedef enum {
5109         PROBE_INQUIRY_CKSUM     = 0x01,
5110         PROBE_SERIAL_CKSUM      = 0x02,
5111         PROBE_NO_ANNOUNCE       = 0x04
5112 } probe_flags;
5113
5114 typedef struct {
5115         TAILQ_HEAD(, ccb_hdr) request_ccbs;
5116         probe_action    action;
5117         union ccb       saved_ccb;
5118         probe_flags     flags;
5119         MD5_CTX         context;
5120         u_int8_t        digest[16];
5121 } probe_softc;
5122
5123 static void
5124 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5125              cam_flags flags, union ccb *request_ccb)
5126 {
5127         struct ccb_pathinq cpi;
5128         cam_status status;
5129         struct cam_path *new_path;
5130         struct cam_periph *old_periph;
5131         int s;
5132         
5133         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5134                   ("xpt_scan_lun\n"));
5135         
5136         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5137         cpi.ccb_h.func_code = XPT_PATH_INQ;
5138         xpt_action((union ccb *)&cpi);
5139
5140         if (cpi.ccb_h.status != CAM_REQ_CMP) {
5141                 if (request_ccb != NULL) {
5142                         request_ccb->ccb_h.status = cpi.ccb_h.status;
5143                         xpt_done(request_ccb);
5144                 }
5145                 return;
5146         }
5147
5148         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5149                 /*
5150                  * Can't scan the bus on an adapter that
5151                  * cannot perform the initiator role.
5152                  */
5153                 if (request_ccb != NULL) {
5154                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5155                         xpt_done(request_ccb);
5156                 }
5157                 return;
5158         }
5159
5160         if (request_ccb == NULL) {
5161                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5162                 if (request_ccb == NULL) {
5163                         xpt_print_path(path);
5164                         printf("xpt_scan_lun: can't allocate CCB, can't "
5165                                "continue\n");
5166                         return;
5167                 }
5168                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5169                 if (new_path == NULL) {
5170                         xpt_print_path(path);
5171                         printf("xpt_scan_lun: can't allocate path, can't "
5172                                "continue\n");
5173                         free(request_ccb, M_TEMP);
5174                         return;
5175                 }
5176                 status = xpt_compile_path(new_path, xpt_periph,
5177                                           path->bus->path_id,
5178                                           path->target->target_id,
5179                                           path->device->lun_id);
5180
5181                 if (status != CAM_REQ_CMP) {
5182                         xpt_print_path(path);
5183                         printf("xpt_scan_lun: can't compile path, can't "
5184                                "continue\n");
5185                         free(request_ccb, M_TEMP);
5186                         free(new_path, M_TEMP);
5187                         return;
5188                 }
5189                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5190                 request_ccb->ccb_h.cbfcnp = xptscandone;
5191                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5192                 request_ccb->crcn.flags = flags;
5193         }
5194
5195         s = splsoftcam();
5196         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5197                 probe_softc *softc;
5198
5199                 softc = (probe_softc *)old_periph->softc;
5200                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5201                                   periph_links.tqe);
5202         } else {
5203                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5204                                           probestart, "probe",
5205                                           CAM_PERIPH_BIO,
5206                                           request_ccb->ccb_h.path, NULL, 0,
5207                                           request_ccb);
5208
5209                 if (status != CAM_REQ_CMP) {
5210                         xpt_print_path(path);
5211                         printf("xpt_scan_lun: cam_alloc_periph returned an "
5212                                "error, can't continue probe\n");
5213                         request_ccb->ccb_h.status = status;
5214                         xpt_done(request_ccb);
5215                 }
5216         }
5217         splx(s);
5218 }
5219
5220 static void
5221 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5222 {
5223         xpt_release_path(done_ccb->ccb_h.path);
5224         free(done_ccb->ccb_h.path, M_TEMP);
5225         free(done_ccb, M_TEMP);
5226 }
5227
5228 static cam_status
5229 proberegister(struct cam_periph *periph, void *arg)
5230 {
5231         union ccb *request_ccb; /* CCB representing the probe request */
5232         probe_softc *softc;
5233
5234         request_ccb = (union ccb *)arg;
5235         if (periph == NULL) {
5236                 printf("proberegister: periph was NULL!!\n");
5237                 return(CAM_REQ_CMP_ERR);
5238         }
5239
5240         if (request_ccb == NULL) {
5241                 printf("proberegister: no probe CCB, "
5242                        "can't register device\n");
5243                 return(CAM_REQ_CMP_ERR);
5244         }
5245
5246         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5247
5248         if (softc == NULL) {
5249                 printf("proberegister: Unable to probe new device. "
5250                        "Unable to allocate softc\n");                           
5251                 return(CAM_REQ_CMP_ERR);
5252         }
5253         TAILQ_INIT(&softc->request_ccbs);
5254         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5255                           periph_links.tqe);
5256         softc->flags = 0;
5257         periph->softc = softc;
5258         cam_periph_acquire(periph);
5259         /*
5260          * Ensure we've waited at least a bus settle
5261          * delay before attempting to probe the device.
5262          * For HBAs that don't do bus resets, this won't make a difference.
5263          */
5264         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5265                                       SCSI_DELAY);
5266         probeschedule(periph);
5267         return(CAM_REQ_CMP);
5268 }
5269
5270 static void
5271 probeschedule(struct cam_periph *periph)
5272 {
5273         struct ccb_pathinq cpi;
5274         union ccb *ccb;
5275         probe_softc *softc;
5276
5277         softc = (probe_softc *)periph->softc;
5278         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5279
5280         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5281         cpi.ccb_h.func_code = XPT_PATH_INQ;
5282         xpt_action((union ccb *)&cpi);
5283
5284         /*
5285          * If a device has gone away and another device, or the same one,
5286          * is back in the same place, it should have a unit attention
5287          * condition pending.  It will not report the unit attention in
5288          * response to an inquiry, which may leave invalid transfer
5289          * negotiations in effect.  The TUR will reveal the unit attention
5290          * condition.  Only send the TUR for lun 0, since some devices 
5291          * will get confused by commands other than inquiry to non-existent
5292          * luns.  If you think a device has gone away start your scan from
5293          * lun 0.  This will insure that any bogus transfer settings are
5294          * invalidated.
5295          *
5296          * If we haven't seen the device before and the controller supports
5297          * some kind of transfer negotiation, negotiate with the first
5298          * sent command if no bus reset was performed at startup.  This
5299          * ensures that the device is not confused by transfer negotiation
5300          * settings left over by loader or BIOS action.
5301          */
5302         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5303          && (ccb->ccb_h.target_lun == 0)) {
5304                 softc->action = PROBE_TUR;
5305         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5306               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5307                 proberequestdefaultnegotiation(periph);
5308                 softc->action = PROBE_INQUIRY;
5309         } else {
5310                 softc->action = PROBE_INQUIRY;
5311         }
5312
5313         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5314                 softc->flags |= PROBE_NO_ANNOUNCE;
5315         else
5316                 softc->flags &= ~PROBE_NO_ANNOUNCE;
5317
5318         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5319 }
5320
5321 static void
5322 probestart(struct cam_periph *periph, union ccb *start_ccb)
5323 {
5324         /* Probe the device that our peripheral driver points to */
5325         struct ccb_scsiio *csio;
5326         probe_softc *softc;
5327
5328         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5329
5330         softc = (probe_softc *)periph->softc;
5331         csio = &start_ccb->csio;
5332
5333         switch (softc->action) {
5334         case PROBE_TUR:
5335         case PROBE_TUR_FOR_NEGOTIATION:
5336         {
5337                 scsi_test_unit_ready(csio,
5338                                      /*retries*/4,
5339                                      probedone,
5340                                      MSG_SIMPLE_Q_TAG,
5341                                      SSD_FULL_SIZE,
5342                                      /*timeout*/60000);
5343                 break;
5344         }
5345         case PROBE_INQUIRY:
5346         case PROBE_FULL_INQUIRY:
5347         {
5348                 u_int inquiry_len;
5349                 struct scsi_inquiry_data *inq_buf;
5350
5351                 inq_buf = &periph->path->device->inq_data;
5352                 /*
5353                  * If the device is currently configured, we calculate an
5354                  * MD5 checksum of the inquiry data, and if the serial number
5355                  * length is greater than 0, add the serial number data
5356                  * into the checksum as well.  Once the inquiry and the
5357                  * serial number check finish, we attempt to figure out
5358                  * whether we still have the same device.
5359                  */
5360                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5361                         
5362                         MD5Init(&softc->context);
5363                         MD5Update(&softc->context, (unsigned char *)inq_buf,
5364                                   sizeof(struct scsi_inquiry_data));
5365                         softc->flags |= PROBE_INQUIRY_CKSUM;
5366                         if (periph->path->device->serial_num_len > 0) {
5367                                 MD5Update(&softc->context,
5368                                           periph->path->device->serial_num,
5369                                           periph->path->device->serial_num_len);
5370                                 softc->flags |= PROBE_SERIAL_CKSUM;
5371                         }
5372                         MD5Final(softc->digest, &softc->context);
5373                 } 
5374
5375                 if (softc->action == PROBE_INQUIRY)
5376                         inquiry_len = SHORT_INQUIRY_LENGTH;
5377                 else
5378                         inquiry_len = inq_buf->additional_length + 5;
5379         
5380                 scsi_inquiry(csio,
5381                              /*retries*/4,
5382                              probedone,
5383                              MSG_SIMPLE_Q_TAG,
5384                              (u_int8_t *)inq_buf,
5385                              inquiry_len,
5386                              /*evpd*/FALSE,
5387                              /*page_code*/0,
5388                              SSD_MIN_SIZE,
5389                              /*timeout*/60 * 1000);
5390                 break;
5391         }
5392         case PROBE_MODE_SENSE:
5393         {
5394                 void  *mode_buf;
5395                 int    mode_buf_len;
5396
5397                 mode_buf_len = sizeof(struct scsi_mode_header_6)
5398                              + sizeof(struct scsi_mode_blk_desc)
5399                              + sizeof(struct scsi_control_page);
5400                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5401                 if (mode_buf != NULL) {
5402                         scsi_mode_sense(csio,
5403                                         /*retries*/4,
5404                                         probedone,
5405                                         MSG_SIMPLE_Q_TAG,
5406                                         /*dbd*/FALSE,
5407                                         SMS_PAGE_CTRL_CURRENT,
5408                                         SMS_CONTROL_MODE_PAGE,
5409                                         mode_buf,
5410                                         mode_buf_len,
5411                                         SSD_FULL_SIZE,
5412                                         /*timeout*/60000);
5413                         break;
5414                 }
5415                 xpt_print_path(periph->path);
5416                 printf("Unable to mode sense control page - malloc failure\n");
5417                 softc->action = PROBE_SERIAL_NUM;
5418                 /* FALLTHROUGH */
5419         }
5420         case PROBE_SERIAL_NUM:
5421         {
5422                 struct scsi_vpd_unit_serial_number *serial_buf;
5423                 struct cam_ed* device;
5424
5425                 serial_buf = NULL;
5426                 device = periph->path->device;
5427                 device->serial_num = NULL;
5428                 device->serial_num_len = 0;
5429
5430                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5431                         serial_buf = (struct scsi_vpd_unit_serial_number *)
5432                                 malloc(sizeof(*serial_buf), M_TEMP,
5433                                         M_NOWAIT | M_ZERO);
5434
5435                 if (serial_buf != NULL) {
5436                         scsi_inquiry(csio,
5437                                      /*retries*/4,
5438                                      probedone,
5439                                      MSG_SIMPLE_Q_TAG,
5440                                      (u_int8_t *)serial_buf,
5441                                      sizeof(*serial_buf),
5442                                      /*evpd*/TRUE,
5443                                      SVPD_UNIT_SERIAL_NUMBER,
5444                                      SSD_MIN_SIZE,
5445                                      /*timeout*/60 * 1000);
5446                         break;
5447                 }
5448                 /*
5449                  * We'll have to do without, let our probedone
5450                  * routine finish up for us.
5451                  */
5452                 start_ccb->csio.data_ptr = NULL;
5453                 probedone(periph, start_ccb);
5454                 return;
5455         }
5456         }
5457         xpt_action(start_ccb);
5458 }
5459
5460 static void
5461 proberequestdefaultnegotiation(struct cam_periph *periph)
5462 {
5463         struct ccb_trans_settings cts;
5464
5465         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5466         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5467         cts.flags = CCB_TRANS_USER_SETTINGS;
5468         xpt_action((union ccb *)&cts);
5469         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5470         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5471         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5472         xpt_action((union ccb *)&cts);
5473 }
5474
5475 static void
5476 probedone(struct cam_periph *periph, union ccb *done_ccb)
5477 {
5478         probe_softc *softc;
5479         struct cam_path *path;
5480         u_int32_t  priority;
5481
5482         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5483
5484         softc = (probe_softc *)periph->softc;
5485         path = done_ccb->ccb_h.path;
5486         priority = done_ccb->ccb_h.pinfo.priority;
5487
5488         switch (softc->action) {
5489         case PROBE_TUR:
5490         {
5491                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5492
5493                         if (cam_periph_error(done_ccb, 0,
5494                                              SF_NO_PRINT, NULL) == ERESTART)
5495                                 return;
5496                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5497                                 /* Don't wedge the queue */
5498                                 xpt_release_devq(done_ccb->ccb_h.path,
5499                                                  /*count*/1,
5500                                                  /*run_queue*/TRUE);
5501                 }
5502                 softc->action = PROBE_INQUIRY;
5503                 xpt_release_ccb(done_ccb);
5504                 xpt_schedule(periph, priority);
5505                 return;
5506         }
5507         case PROBE_INQUIRY:
5508         case PROBE_FULL_INQUIRY:
5509         {
5510                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5511                         struct scsi_inquiry_data *inq_buf;
5512                         u_int8_t periph_qual;
5513
5514                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5515                         inq_buf = &path->device->inq_data;
5516
5517                         periph_qual = SID_QUAL(inq_buf);
5518                         
5519                         switch(periph_qual) {
5520                         case SID_QUAL_LU_CONNECTED:
5521                         {
5522                                 u_int8_t alen;
5523
5524                                 /*
5525                                  * We conservatively request only
5526                                  * SHORT_INQUIRY_LEN bytes of inquiry
5527                                  * information during our first try
5528                                  * at sending an INQUIRY. If the device
5529                                  * has more information to give,
5530                                  * perform a second request specifying
5531                                  * the amount of information the device
5532                                  * is willing to give.
5533                                  */
5534                                 alen = inq_buf->additional_length;
5535                                 if (softc->action == PROBE_INQUIRY
5536                                  && alen > (SHORT_INQUIRY_LENGTH - 5)) {
5537                                         softc->action = PROBE_FULL_INQUIRY;
5538                                         xpt_release_ccb(done_ccb);
5539                                         xpt_schedule(periph, priority);
5540                                         return;
5541                                 }
5542
5543                                 xpt_find_quirk(path->device);
5544
5545                                 if ((inq_buf->flags & SID_CmdQue) != 0)
5546                                         softc->action = PROBE_MODE_SENSE;
5547                                 else
5548                                         softc->action = PROBE_SERIAL_NUM;
5549
5550                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5551
5552                                 xpt_release_ccb(done_ccb);
5553                                 xpt_schedule(periph, priority);
5554                                 return;
5555                         }
5556                         default:
5557                                 break;
5558                         }
5559                 } else if (cam_periph_error(done_ccb, 0,
5560                                             done_ccb->ccb_h.target_lun > 0
5561                                             ? SF_RETRY_UA|SF_QUIET_IR
5562                                             : SF_RETRY_UA,
5563                                             &softc->saved_ccb) == ERESTART) {
5564                         return;
5565                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5566                         /* Don't wedge the queue */
5567                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5568                                          /*run_queue*/TRUE);
5569                 }
5570                 /*
5571                  * If we get to this point, we got an error status back
5572                  * from the inquiry and the error status doesn't require
5573                  * automatically retrying the command.  Therefore, the
5574                  * inquiry failed.  If we had inquiry information before
5575                  * for this device, but this latest inquiry command failed,
5576                  * the device has probably gone away.  If this device isn't
5577                  * already marked unconfigured, notify the peripheral
5578                  * drivers that this device is no more.
5579                  */
5580                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5581                         /* Send the async notification. */
5582                         xpt_async(AC_LOST_DEVICE, path, NULL);
5583
5584                 xpt_release_ccb(done_ccb);
5585                 break;
5586         }
5587         case PROBE_MODE_SENSE:
5588         {
5589                 struct ccb_scsiio *csio;
5590                 struct scsi_mode_header_6 *mode_hdr;
5591
5592                 csio = &done_ccb->csio;
5593                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5594                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5595                         struct scsi_control_page *page;
5596                         u_int8_t *offset;
5597
5598                         offset = ((u_int8_t *)&mode_hdr[1])
5599                             + mode_hdr->blk_desc_len;
5600                         page = (struct scsi_control_page *)offset;
5601                         path->device->queue_flags = page->queue_flags;
5602                 } else if (cam_periph_error(done_ccb, 0,
5603                                             SF_RETRY_UA|SF_NO_PRINT,
5604                                             &softc->saved_ccb) == ERESTART) {
5605                         return;
5606                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5607                         /* Don't wedge the queue */
5608                         xpt_release_devq(done_ccb->ccb_h.path,
5609                                          /*count*/1, /*run_queue*/TRUE);
5610                 }
5611                 xpt_release_ccb(done_ccb);
5612                 free(mode_hdr, M_TEMP);
5613                 softc->action = PROBE_SERIAL_NUM;
5614                 xpt_schedule(periph, priority);
5615                 return;
5616         }
5617         case PROBE_SERIAL_NUM:
5618         {
5619                 struct ccb_scsiio *csio;
5620                 struct scsi_vpd_unit_serial_number *serial_buf;
5621                 u_int32_t  priority;
5622                 int changed;
5623                 int have_serialnum;
5624
5625                 changed = 1;
5626                 have_serialnum = 0;
5627                 csio = &done_ccb->csio;
5628                 priority = done_ccb->ccb_h.pinfo.priority;
5629                 serial_buf =
5630                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5631
5632                 /* Clean up from previous instance of this device */
5633                 if (path->device->serial_num != NULL) {
5634                         free(path->device->serial_num, M_DEVBUF);
5635                         path->device->serial_num = NULL;
5636                         path->device->serial_num_len = 0;
5637                 }
5638
5639                 if (serial_buf == NULL) {
5640                         /*
5641                          * Don't process the command as it was never sent
5642                          */
5643                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5644                         && (serial_buf->length > 0)) {
5645
5646                         have_serialnum = 1;
5647                         path->device->serial_num =
5648                                 (u_int8_t *)malloc((serial_buf->length + 1),
5649                                                    M_DEVBUF, M_NOWAIT);
5650                         if (path->device->serial_num != NULL) {
5651                                 bcopy(serial_buf->serial_num,
5652                                       path->device->serial_num,
5653                                       serial_buf->length);
5654                                 path->device->serial_num_len =
5655                                     serial_buf->length;
5656                                 path->device->serial_num[serial_buf->length]
5657                                     = '\0';
5658                         }
5659                 } else if (cam_periph_error(done_ccb, 0,
5660                                             SF_RETRY_UA|SF_NO_PRINT,
5661                                             &softc->saved_ccb) == ERESTART) {
5662                         return;
5663                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5664                         /* Don't wedge the queue */
5665                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5666                                          /*run_queue*/TRUE);
5667                 }
5668                 
5669                 /*
5670                  * Let's see if we have seen this device before.
5671                  */
5672                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5673                         MD5_CTX context;
5674                         u_int8_t digest[16];
5675
5676                         MD5Init(&context);
5677                         
5678                         MD5Update(&context,
5679                                   (unsigned char *)&path->device->inq_data,
5680                                   sizeof(struct scsi_inquiry_data));
5681
5682                         if (have_serialnum)
5683                                 MD5Update(&context, serial_buf->serial_num,
5684                                           serial_buf->length);
5685
5686                         MD5Final(digest, &context);
5687                         if (bcmp(softc->digest, digest, 16) == 0)
5688                                 changed = 0;
5689
5690                         /*
5691                          * XXX Do we need to do a TUR in order to ensure
5692                          *     that the device really hasn't changed???
5693                          */
5694                         if ((changed != 0)
5695                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5696                                 xpt_async(AC_LOST_DEVICE, path, NULL);
5697                 }
5698                 if (serial_buf != NULL)
5699                         free(serial_buf, M_TEMP);
5700
5701                 if (changed != 0) {
5702                         /*
5703                          * Now that we have all the necessary
5704                          * information to safely perform transfer
5705                          * negotiations... Controllers don't perform
5706                          * any negotiation or tagged queuing until
5707                          * after the first XPT_SET_TRAN_SETTINGS ccb is
5708                          * received.  So, on a new device, just retreive
5709                          * the user settings, and set them as the current
5710                          * settings to set the device up.
5711                          */
5712                         proberequestdefaultnegotiation(periph);
5713                         xpt_release_ccb(done_ccb);
5714
5715                         /*
5716                          * Perform a TUR to allow the controller to
5717                          * perform any necessary transfer negotiation.
5718                          */
5719                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
5720                         xpt_schedule(periph, priority);
5721                         return;
5722                 }
5723                 xpt_release_ccb(done_ccb);
5724                 break;
5725         }
5726         case PROBE_TUR_FOR_NEGOTIATION:
5727                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5728                         /* Don't wedge the queue */
5729                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5730                                          /*run_queue*/TRUE);
5731                 }
5732
5733                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5734
5735                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5736                         /* Inform the XPT that a new device has been found */
5737                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5738                         xpt_action(done_ccb);
5739
5740                         xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5741                 }
5742                 xpt_release_ccb(done_ccb);
5743                 break;
5744         }
5745         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5746         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5747         done_ccb->ccb_h.status = CAM_REQ_CMP;
5748         xpt_done(done_ccb);
5749         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5750                 cam_periph_invalidate(periph);
5751                 cam_periph_release(periph);
5752         } else {
5753                 probeschedule(periph);
5754         }
5755 }
5756
5757 static void
5758 probecleanup(struct cam_periph *periph)
5759 {
5760         free(periph->softc, M_TEMP);
5761 }
5762
5763 static void
5764 xpt_find_quirk(struct cam_ed *device)
5765 {
5766         caddr_t match;
5767
5768         match = cam_quirkmatch((caddr_t)&device->inq_data,
5769                                (caddr_t)xpt_quirk_table,
5770                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5771                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
5772
5773         if (match == NULL)
5774                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
5775
5776         device->quirk = (struct xpt_quirk_entry *)match;
5777 }
5778
5779 static void
5780 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5781                           int async_update)
5782 {
5783         struct  cam_sim *sim;
5784         int     qfrozen;
5785
5786         sim = cts->ccb_h.path->bus->sim;
5787         if (async_update == FALSE) {
5788                 struct  scsi_inquiry_data *inq_data;
5789                 struct  ccb_pathinq cpi;
5790                 struct  ccb_trans_settings cur_cts;
5791
5792                 if (device == NULL) {
5793                         cts->ccb_h.status = CAM_PATH_INVALID;
5794                         xpt_done((union ccb *)cts);
5795                         return;
5796                 }
5797
5798                 /*
5799                  * Perform sanity checking against what the
5800                  * controller and device can do.
5801                  */
5802                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5803                 cpi.ccb_h.func_code = XPT_PATH_INQ;
5804                 xpt_action((union ccb *)&cpi);
5805                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
5806                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5807                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
5808                 xpt_action((union ccb *)&cur_cts);
5809                 inq_data = &device->inq_data;
5810
5811                 /* Fill in any gaps in what the user gave us */
5812                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
5813                         cts->sync_period = cur_cts.sync_period;
5814                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
5815                         cts->sync_offset = cur_cts.sync_offset;
5816                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
5817                         cts->bus_width = cur_cts.bus_width;
5818                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
5819                         cts->flags &= ~CCB_TRANS_DISC_ENB;
5820                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
5821                 }
5822                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
5823                         cts->flags &= ~CCB_TRANS_TAG_ENB;
5824                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
5825                 }
5826
5827                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5828                   && (inq_data->flags & SID_Sync) == 0)
5829                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
5830                  || (cts->sync_offset == 0)
5831                  || (cts->sync_period == 0)) {
5832                         /* Force async */
5833                         cts->sync_period = 0;
5834                         cts->sync_offset = 0;
5835                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
5836
5837                         if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
5838                          && cts->sync_period <= 0x9) {
5839                                 /*
5840                                  * Don't allow DT transmission rates if the
5841                                  * device does not support it.
5842                                  */
5843                                 cts->sync_period = 0xa;
5844                         }
5845                         if ((inq_data->spi3data & SID_SPI_IUS) == 0
5846                          && cts->sync_period <= 0x8) {
5847                                 /*
5848                                  * Don't allow PACE transmission rates
5849                                  * if the device does support packetized
5850                                  * transfers.
5851                                  */
5852                                 cts->sync_period = 0x9;
5853                         }
5854                 }
5855
5856                 switch (cts->bus_width) {
5857                 case MSG_EXT_WDTR_BUS_32_BIT:
5858                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5859                           || (inq_data->flags & SID_WBus32) != 0)
5860                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5861                                 break;
5862                         /* Fall Through to 16-bit */
5863                 case MSG_EXT_WDTR_BUS_16_BIT:
5864                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5865                           || (inq_data->flags & SID_WBus16) != 0)
5866                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5867                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5868                                 break;
5869                         }
5870                         /* Fall Through to 8-bit */
5871                 default: /* New bus width?? */
5872                 case MSG_EXT_WDTR_BUS_8_BIT:
5873                         /* All targets can do this */
5874                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5875                         break;
5876                 }
5877
5878                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5879                         /*
5880                          * Can't tag queue without disconnection.
5881                          */
5882                         cts->flags &= ~CCB_TRANS_TAG_ENB;
5883                         cts->valid |= CCB_TRANS_TQ_VALID;
5884                 }
5885
5886                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5887                  || (inq_data->flags & SID_CmdQue) == 0
5888                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5889                  || (device->quirk->mintags == 0)) {
5890                         /*
5891                          * Can't tag on hardware that doesn't support,
5892                          * doesn't have it enabled, or has broken tag support.
5893                          */
5894                         cts->flags &= ~CCB_TRANS_TAG_ENB;
5895                 }
5896         }
5897
5898         qfrozen = FALSE;
5899         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
5900                 int device_tagenb;
5901
5902                 /*
5903                  * If we are transitioning from tags to no-tags or
5904                  * vice-versa, we need to carefully freeze and restart
5905                  * the queue so that we don't overlap tagged and non-tagged
5906                  * commands.  We also temporarily stop tags if there is
5907                  * a change in transfer negotiation settings to allow
5908                  * "tag-less" negotiation.
5909                  */
5910                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5911                  || (device->inq_flags & SID_CmdQue) != 0)
5912                         device_tagenb = TRUE;
5913                 else
5914                         device_tagenb = FALSE;
5915
5916                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5917                   && device_tagenb == FALSE)
5918                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5919                   && device_tagenb == TRUE)) {
5920
5921                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5922                                 /*
5923                                  * Delay change to use tags until after a
5924                                  * few commands have gone to this device so
5925                                  * the controller has time to perform transfer
5926                                  * negotiations without tagged messages getting
5927                                  * in the way.
5928                                  */
5929                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5930                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5931                         } else {
5932                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5933                                 qfrozen = TRUE;
5934                                 device->inq_flags &= ~SID_CmdQue;
5935                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
5936                                                     sim->max_dev_openings);
5937                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5938                                 device->tag_delay_count = 0;
5939                         }
5940                 }
5941         }
5942
5943         if (async_update == FALSE) {
5944                 /*
5945                  * If we are currently performing tagged transactions to
5946                  * this device and want to change its negotiation parameters,
5947                  * go non-tagged for a bit to give the controller a chance to
5948                  * negotiate unhampered by tag messages.
5949                  */
5950                 if ((device->inq_flags & SID_CmdQue) != 0
5951                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5952                                    CCB_TRANS_SYNC_OFFSET_VALID|
5953                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
5954                         xpt_toggle_tags(cts->ccb_h.path);
5955
5956                 (*(sim->sim_action))(sim, (union ccb *)cts);
5957         }
5958
5959         if (qfrozen) {
5960                 struct ccb_relsim crs;
5961
5962                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5963                               /*priority*/1);
5964                 crs.ccb_h.func_code = XPT_REL_SIMQ;
5965                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5966                 crs.openings
5967                     = crs.release_timeout 
5968                     = crs.qfrozen_cnt
5969                     = 0;
5970                 xpt_action((union ccb *)&crs);
5971         }
5972 }
5973
5974 static void
5975 xpt_toggle_tags(struct cam_path *path)
5976 {
5977         struct cam_ed *dev;
5978
5979         /*
5980          * Give controllers a chance to renegotiate
5981          * before starting tag operations.  We
5982          * "toggle" tagged queuing off then on
5983          * which causes the tag enable command delay
5984          * counter to come into effect.
5985          */
5986         dev = path->device;
5987         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5988          || ((dev->inq_flags & SID_CmdQue) != 0
5989           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
5990                 struct ccb_trans_settings cts;
5991
5992                 xpt_setup_ccb(&cts.ccb_h, path, 1);
5993                 cts.flags = 0;
5994                 cts.valid = CCB_TRANS_TQ_VALID;
5995                 xpt_set_transfer_settings(&cts, path->device,
5996                                           /*async_update*/TRUE);
5997                 cts.flags = CCB_TRANS_TAG_ENB;
5998                 xpt_set_transfer_settings(&cts, path->device,
5999                                           /*async_update*/TRUE);
6000         }
6001 }
6002
6003 static void
6004 xpt_start_tags(struct cam_path *path)
6005 {
6006         struct ccb_relsim crs;
6007         struct cam_ed *device;
6008         struct cam_sim *sim;
6009         int    newopenings;
6010
6011         device = path->device;
6012         sim = path->bus->sim;
6013         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6014         xpt_freeze_devq(path, /*count*/1);
6015         device->inq_flags |= SID_CmdQue;
6016         newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
6017         xpt_dev_ccbq_resize(path, newopenings);
6018         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6019         crs.ccb_h.func_code = XPT_REL_SIMQ;
6020         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6021         crs.openings
6022             = crs.release_timeout 
6023             = crs.qfrozen_cnt
6024             = 0;
6025         xpt_action((union ccb *)&crs);
6026 }
6027
6028 static int busses_to_config;
6029 static int busses_to_reset;
6030
6031 static int
6032 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6033 {
6034         if (bus->path_id != CAM_XPT_PATH_ID) {
6035                 struct cam_path path;
6036                 struct ccb_pathinq cpi;
6037                 int can_negotiate;
6038
6039                 busses_to_config++;
6040                 xpt_compile_path(&path, NULL, bus->path_id,
6041                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6042                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6043                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6044                 xpt_action((union ccb *)&cpi);
6045                 can_negotiate = cpi.hba_inquiry;
6046                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6047                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6048                  && can_negotiate)
6049                         busses_to_reset++;
6050                 xpt_release_path(&path);
6051         }
6052
6053         return(1);
6054 }
6055
6056 static int
6057 xptconfigfunc(struct cam_eb *bus, void *arg)
6058 {
6059         struct  cam_path *path;
6060         union   ccb *work_ccb;
6061
6062         if (bus->path_id != CAM_XPT_PATH_ID) {
6063                 cam_status status;
6064                 int can_negotiate;
6065
6066                 work_ccb = xpt_alloc_ccb();
6067                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6068                                               CAM_TARGET_WILDCARD,
6069                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6070                         printf("xptconfigfunc: xpt_create_path failed with "
6071                                "status %#x for bus %d\n", status, bus->path_id);
6072                         printf("xptconfigfunc: halting bus configuration\n");
6073                         xpt_free_ccb(work_ccb);
6074                         busses_to_config--;
6075                         xpt_finishconfig(xpt_periph, NULL);
6076                         return(0);
6077                 }
6078                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6079                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6080                 xpt_action(work_ccb);
6081                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6082                         printf("xptconfigfunc: CPI failed on bus %d "
6083                                "with status %d\n", bus->path_id,
6084                                work_ccb->ccb_h.status);
6085                         xpt_finishconfig(xpt_periph, work_ccb);
6086                         return(1);
6087                 }
6088
6089                 can_negotiate = work_ccb->cpi.hba_inquiry;
6090                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6091                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6092                  && (can_negotiate != 0)) {
6093                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6094                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6095                         work_ccb->ccb_h.cbfcnp = NULL;
6096                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6097                                   ("Resetting Bus\n"));
6098                         xpt_action(work_ccb);
6099                         xpt_finishconfig(xpt_periph, work_ccb);
6100                 } else {
6101                         /* Act as though we performed a successful BUS RESET */
6102                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6103                         xpt_finishconfig(xpt_periph, work_ccb);
6104                 }
6105         }
6106
6107         return(1);
6108 }
6109
6110 static void
6111 xpt_config(void *arg)
6112 {
6113         /* Now that interrupts are enabled, go find our devices */
6114
6115 #ifdef CAMDEBUG
6116         /* Setup debugging flags and path */
6117 #ifdef CAM_DEBUG_FLAGS
6118         cam_dflags = CAM_DEBUG_FLAGS;
6119 #else /* !CAM_DEBUG_FLAGS */
6120         cam_dflags = CAM_DEBUG_NONE;
6121 #endif /* CAM_DEBUG_FLAGS */
6122 #ifdef CAM_DEBUG_BUS
6123         if (cam_dflags != CAM_DEBUG_NONE) {
6124                 if (xpt_create_path(&cam_dpath, xpt_periph,
6125                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6126                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6127                         printf("xpt_config: xpt_create_path() failed for debug"
6128                                " target %d:%d:%d, debugging disabled\n",
6129                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6130                         cam_dflags = CAM_DEBUG_NONE;
6131                 }
6132         } else
6133                 cam_dpath = NULL;
6134 #else /* !CAM_DEBUG_BUS */
6135         cam_dpath = NULL;
6136 #endif /* CAM_DEBUG_BUS */
6137 #endif /* CAMDEBUG */
6138
6139         /*
6140          * Scan all installed busses.
6141          */
6142         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6143
6144         if (busses_to_config == 0) {
6145                 /* Call manually because we don't have any busses */
6146                 xpt_finishconfig(xpt_periph, NULL);
6147         } else  {
6148                 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
6149                         printf("Waiting %d seconds for SCSI "
6150                                "devices to settle\n", SCSI_DELAY/1000);
6151                 }
6152                 xpt_for_all_busses(xptconfigfunc, NULL);
6153         }
6154 }
6155
6156 /*
6157  * If the given device only has one peripheral attached to it, and if that
6158  * peripheral is the passthrough driver, announce it.  This insures that the
6159  * user sees some sort of announcement for every peripheral in their system.
6160  */
6161 static int
6162 xptpassannouncefunc(struct cam_ed *device, void *arg)
6163 {
6164         struct cam_periph *periph;
6165         int i;
6166
6167         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6168              periph = SLIST_NEXT(periph, periph_links), i++);
6169
6170         periph = SLIST_FIRST(&device->periphs);
6171         if ((i == 1)
6172          && (strncmp(periph->periph_name, "pass", 4) == 0))
6173                 xpt_announce_periph(periph, NULL);
6174
6175         return(1);
6176 }
6177
6178 static void
6179 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6180 {
6181         struct  periph_driver **p_drv;
6182         int     i;
6183
6184         if (done_ccb != NULL) {
6185                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6186                           ("xpt_finishconfig\n"));
6187                 switch(done_ccb->ccb_h.func_code) {
6188                 case XPT_RESET_BUS:
6189                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6190                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6191                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6192                                 xpt_action(done_ccb);
6193                                 return;
6194                         }
6195                         /* FALLTHROUGH */
6196                 case XPT_SCAN_BUS:
6197                 default:
6198                         xpt_free_path(done_ccb->ccb_h.path);
6199                         busses_to_config--;
6200                         break;
6201                 }
6202         }
6203
6204         if (busses_to_config == 0) {
6205                 /* Register all the peripheral drivers */
6206                 /* XXX This will have to change when we have loadable modules */
6207                 p_drv = (struct periph_driver **)periphdriver_set.ls_items;
6208                 for (i = 0; p_drv[i] != NULL; i++) {
6209                         (*p_drv[i]->init)();
6210                 }
6211
6212                 /*
6213                  * Check for devices with no "standard" peripheral driver
6214                  * attached.  For any devices like that, announce the
6215                  * passthrough driver so the user will see something.
6216                  */
6217                 xpt_for_all_devices(xptpassannouncefunc, NULL);
6218
6219                 /* Release our hook so that the boot can continue. */
6220                 config_intrhook_disestablish(xpt_config_hook);
6221                 free(xpt_config_hook, M_TEMP);
6222                 xpt_config_hook = NULL;
6223         }
6224         if (done_ccb != NULL)
6225                 xpt_free_ccb(done_ccb);
6226 }
6227
6228 static void
6229 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6230 {
6231         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6232
6233         switch (work_ccb->ccb_h.func_code) {
6234         /* Common cases first */
6235         case XPT_PATH_INQ:              /* Path routing inquiry */
6236         {
6237                 struct ccb_pathinq *cpi;
6238
6239                 cpi = &work_ccb->cpi;
6240                 cpi->version_num = 1; /* XXX??? */
6241                 cpi->hba_inquiry = 0;
6242                 cpi->target_sprt = 0;
6243                 cpi->hba_misc = 0;
6244                 cpi->hba_eng_cnt = 0;
6245                 cpi->max_target = 0;
6246                 cpi->max_lun = 0;
6247                 cpi->initiator_id = 0;
6248                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6249                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6250                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6251                 cpi->unit_number = sim->unit_number;
6252                 cpi->bus_id = sim->bus_id;
6253                 cpi->base_transfer_speed = 0;
6254                 cpi->ccb_h.status = CAM_REQ_CMP;
6255                 xpt_done(work_ccb);
6256                 break;
6257         }
6258         default:
6259                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6260                 xpt_done(work_ccb);
6261                 break;
6262         }
6263 }
6264
6265 /*
6266  * The xpt as a "controller" has no interrupt sources, so polling
6267  * is a no-op.
6268  */
6269 static void
6270 xptpoll(struct cam_sim *sim)
6271 {
6272 }
6273
6274 /*
6275  * Should only be called by the machine interrupt dispatch routines,
6276  * so put these prototypes here instead of in the header.
6277  */
6278
6279 static void
6280 swi_camnet(void)
6281 {
6282         camisr(&cam_netq);
6283 }
6284
6285 static void
6286 swi_cambio(void)
6287 {
6288         camisr(&cam_bioq);
6289 }
6290
6291 static void
6292 camisr(cam_isrq_t *queue)
6293 {
6294         int     s;
6295         struct  ccb_hdr *ccb_h;
6296
6297         s = splcam();
6298         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6299                 int     runq;
6300
6301                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6302                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6303                 splx(s);
6304
6305                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6306                           ("camisr\n"));
6307
6308                 runq = FALSE;
6309
6310                 if (ccb_h->flags & CAM_HIGH_POWER) {
6311                         struct highpowerlist    *hphead;
6312                         struct cam_ed           *device;
6313                         union ccb               *send_ccb;
6314
6315                         hphead = &highpowerq;
6316
6317                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6318
6319                         /*
6320                          * Increment the count since this command is done.
6321                          */
6322                         num_highpower++;
6323
6324                         /* 
6325                          * Any high powered commands queued up?
6326                          */
6327                         if (send_ccb != NULL) {
6328                                 device = send_ccb->ccb_h.path->device;
6329
6330                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6331
6332                                 xpt_release_devq(send_ccb->ccb_h.path,
6333                                                  /*count*/1, /*runqueue*/TRUE);
6334                         }
6335                 }
6336                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6337                         struct cam_ed *dev;
6338
6339                         dev = ccb_h->path->device;
6340
6341                         s = splcam();
6342                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6343
6344                         ccb_h->path->bus->sim->devq->send_active--;
6345                         ccb_h->path->bus->sim->devq->send_openings++;
6346                         splx(s);
6347                         
6348                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6349                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6350                           && (dev->ccbq.dev_active == 0))) {
6351                                 
6352                                 xpt_release_devq(ccb_h->path, /*count*/1,
6353                                                  /*run_queue*/TRUE);
6354                         }
6355
6356                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6357                          && (--dev->tag_delay_count == 0))
6358                                 xpt_start_tags(ccb_h->path);
6359
6360                         if ((dev->ccbq.queue.entries > 0)
6361                          && (dev->qfrozen_cnt == 0)
6362                          && (device_is_send_queued(dev) == 0)) {
6363                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6364                                                               dev);
6365                         }
6366                 }
6367
6368                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
6369                         xpt_release_simq(ccb_h->path->bus->sim,
6370                                          /*run_queue*/TRUE);
6371                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
6372                         runq = FALSE;
6373                 } 
6374
6375                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6376                  && (ccb_h->status & CAM_DEV_QFRZN)) {
6377                         xpt_release_devq(ccb_h->path, /*count*/1,
6378                                          /*run_queue*/TRUE);
6379                         ccb_h->status &= ~CAM_DEV_QFRZN;
6380                 } else if (runq) {
6381                         xpt_run_dev_sendq(ccb_h->path->bus);
6382                 }
6383
6384                 /* Call the peripheral driver's callback */
6385                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6386
6387                 /* Raise IPL for while test */
6388                 s = splcam();
6389         }
6390         splx(s);
6391 }