Do not initialize path variable with useless value just before
[dragonfly.git] / sys / bus / cam / cam_xpt.c
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.52 2007/11/29 03:40:09 pavalos Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
48
49 #include <machine/clock.h>
50
51 #include "cam.h"
52 #include "cam_ccb.h"
53 #include "cam_periph.h"
54 #include "cam_sim.h"
55 #include "cam_xpt.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
59
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
63 #include "opt_cam.h"
64
65 /* Datastructures internal to the xpt layer */
66
67 /*
68  * Definition of an async handler callback block.  These are used to add
69  * SIMs and peripherals to the async callback lists.
70  */
71 struct async_node {
72         SLIST_ENTRY(async_node) links;
73         u_int32_t       event_enable;   /* Async Event enables */
74         void            (*callback)(void *arg, u_int32_t code,
75                                     struct cam_path *path, void *args);
76         void            *callback_arg;
77 };
78
79 SLIST_HEAD(async_list, async_node);
80 SLIST_HEAD(periph_list, cam_periph);
81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
82
83 /*
84  * This is the maximum number of high powered commands (e.g. start unit)
85  * that can be outstanding at a particular time.
86  */
87 #ifndef CAM_MAX_HIGHPOWER
88 #define CAM_MAX_HIGHPOWER  4
89 #endif
90
91 /* number of high powered commands that can go through right now */
92 static int num_highpower = CAM_MAX_HIGHPOWER;
93
94 /*
95  * Structure for queueing a device in a run queue.
96  * There is one run queue for allocating new ccbs,
97  * and another for sending ccbs to the controller.
98  */
99 struct cam_ed_qinfo {
100         cam_pinfo pinfo;
101         struct    cam_ed *device;
102 };
103
104 /*
105  * The CAM EDT (Existing Device Table) contains the device information for
106  * all devices for all busses in the system.  The table contains a
107  * cam_ed structure for each device on the bus.
108  */
109 struct cam_ed {
110         TAILQ_ENTRY(cam_ed) links;
111         struct  cam_ed_qinfo alloc_ccb_entry;
112         struct  cam_ed_qinfo send_ccb_entry;
113         struct  cam_et   *target;
114         lun_id_t         lun_id;
115         struct  camq drvq;              /*
116                                          * Queue of type drivers wanting to do
117                                          * work on this device.
118                                          */
119         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
120         struct  async_list asyncs;      /* Async callback info for this B/T/L */
121         struct  periph_list periphs;    /* All attached devices */
122         u_int   generation;             /* Generation number */
123         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
124         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
125                                         /* Storage for the inquiry data */
126 #ifdef CAM_NEW_TRAN_CODE
127         cam_proto        protocol;
128         u_int            protocol_version;
129         cam_xport        transport;
130         u_int            transport_version;
131 #endif /* CAM_NEW_TRAN_CODE */
132         struct           scsi_inquiry_data inq_data;
133         u_int8_t         inq_flags;     /*
134                                          * Current settings for inquiry flags.
135                                          * This allows us to override settings
136                                          * like disconnection and tagged
137                                          * queuing for a device.
138                                          */
139         u_int8_t         queue_flags;   /* Queue flags from the control page */
140         u_int8_t         serial_num_len;
141         u_int8_t        *serial_num;
142         u_int32_t        qfrozen_cnt;
143         u_int32_t        flags;
144 #define CAM_DEV_UNCONFIGURED            0x01
145 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
146 #define CAM_DEV_REL_ON_COMPLETE         0x04
147 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
148 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
149 #define CAM_DEV_TAG_AFTER_COUNT         0x20
150 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
151         u_int32_t        tag_delay_count;
152 #define CAM_TAG_DELAY_COUNT             5
153         u_int32_t        tag_saved_openings;
154         u_int32_t        refcount;
155         struct           callout c_handle;
156 };
157
158 /*
159  * Each target is represented by an ET (Existing Target).  These
160  * entries are created when a target is successfully probed with an
161  * identify, and removed when a device fails to respond after a number
162  * of retries, or a bus rescan finds the device missing.
163  */
164 struct cam_et { 
165         TAILQ_HEAD(, cam_ed) ed_entries;
166         TAILQ_ENTRY(cam_et) links;
167         struct  cam_eb  *bus;   
168         target_id_t     target_id;
169         u_int32_t       refcount;       
170         u_int           generation;
171         struct          timeval last_reset;     /* uptime of last reset */
172 };
173
174 /*
175  * Each bus is represented by an EB (Existing Bus).  These entries
176  * are created by calls to xpt_bus_register and deleted by calls to
177  * xpt_bus_deregister.
178  */
179 struct cam_eb { 
180         TAILQ_HEAD(, cam_et) et_entries;
181         TAILQ_ENTRY(cam_eb)  links;
182         path_id_t            path_id;
183         struct cam_sim       *sim;
184         struct timeval       last_reset;        /* uptime of last reset */
185         u_int32_t            flags;
186 #define CAM_EB_RUNQ_SCHEDULED   0x01
187         u_int32_t            refcount;
188         u_int                generation;
189 };
190
191 struct cam_path {
192         struct cam_periph *periph;
193         struct cam_eb     *bus;
194         struct cam_et     *target;
195         struct cam_ed     *device;
196 };
197
198 struct xpt_quirk_entry {
199         struct scsi_inquiry_pattern inq_pat;
200         u_int8_t quirks;
201 #define CAM_QUIRK_NOLUNS        0x01
202 #define CAM_QUIRK_NOSERIAL      0x02
203 #define CAM_QUIRK_HILUNS        0x04
204 #define CAM_QUIRK_NOHILUNS      0x08
205         u_int mintags;
206         u_int maxtags;
207 };
208 #define CAM_SCSI2_MAXLUN        8
209 /*
210  * If we're not quirked to search <= the first 8 luns
211  * and we are either quirked to search above lun 8,
212  * or we're > SCSI-2, we can look for luns above lun 8.
213  */
214 #define CAN_SRCH_HI(dv)                                 \
215   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
216   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
217   || SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))
218
219 typedef enum {
220         XPT_FLAG_OPEN           = 0x01
221 } xpt_flags;
222
223 struct xpt_softc {
224         xpt_flags       flags;
225         u_int32_t       generation;
226 };
227
228 static const char quantum[] = "QUANTUM";
229 static const char sony[] = "SONY";
230 static const char west_digital[] = "WDIGTL";
231 static const char samsung[] = "SAMSUNG";
232 static const char seagate[] = "SEAGATE";
233 static const char microp[] = "MICROP";
234
235 static struct xpt_quirk_entry xpt_quirk_table[] = 
236 {
237         {
238                 /* Reports QUEUE FULL for temporary resource shortages */
239                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
240                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
241         },
242         {
243                 /* Reports QUEUE FULL for temporary resource shortages */
244                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
245                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
246         },
247         {
248                 /* Reports QUEUE FULL for temporary resource shortages */
249                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
250                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
251         },
252         {
253                 /* Broken tagged queuing drive */
254                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
255                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
256         },
257         {
258                 /* Broken tagged queuing drive */
259                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
260                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
261         },
262         {
263                 /* Broken tagged queuing drive */
264                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
265                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
266         },
267         {
268                 /*
269                  * Unfortunately, the Quantum Atlas III has the same
270                  * problem as the Atlas II drives above.
271                  * Reported by: "Johan Granlund" <johan@granlund.nu>
272                  *
273                  * For future reference, the drive with the problem was:
274                  * QUANTUM QM39100TD-SW N1B0
275                  * 
276                  * It's possible that Quantum will fix the problem in later
277                  * firmware revisions.  If that happens, the quirk entry
278                  * will need to be made specific to the firmware revisions
279                  * with the problem.
280                  * 
281                  */
282                 /* Reports QUEUE FULL for temporary resource shortages */
283                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
284                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
285         },
286         {
287                 /*
288                  * 18 Gig Atlas III, same problem as the 9G version.
289                  * Reported by: Andre Albsmeier
290                  *              <andre.albsmeier@mchp.siemens.de>
291                  *
292                  * For future reference, the drive with the problem was:
293                  * QUANTUM QM318000TD-S N491
294                  */
295                 /* Reports QUEUE FULL for temporary resource shortages */
296                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
297                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
298         },
299         {
300                 /*
301                  * Broken tagged queuing drive
302                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
303                  *         and: Martin Renters <martin@tdc.on.ca>
304                  */
305                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
306                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
307         },
308                 /*
309                  * The Seagate Medalist Pro drives have very poor write
310                  * performance with anything more than 2 tags.
311                  * 
312                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
313                  * Drive:  <SEAGATE ST36530N 1444>
314                  *
315                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
316                  * Drive:  <SEAGATE ST34520W 1281>
317                  *
318                  * No one has actually reported that the 9G version
319                  * (ST39140*) of the Medalist Pro has the same problem, but
320                  * we're assuming that it does because the 4G and 6.5G
321                  * versions of the drive are broken.
322                  */
323         {
324                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
325                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
326         },
327         {
328                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
329                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
330         },
331         {
332                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
333                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
334         },
335         {
336                 /*
337                  * Slow when tagged queueing is enabled.  Write performance
338                  * steadily drops off with more and more concurrent
339                  * transactions.  Best sequential write performance with
340                  * tagged queueing turned off and write caching turned on.
341                  *
342                  * PR:  kern/10398
343                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
344                  * Drive:  DCAS-34330 w/ "S65A" firmware.
345                  *
346                  * The drive with the problem had the "S65A" firmware
347                  * revision, and has also been reported (by Stephen J.
348                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
349                  * firmware revision.
350                  *
351                  * Although no one has reported problems with the 2 gig
352                  * version of the DCAS drive, the assumption is that it
353                  * has the same problems as the 4 gig version.  Therefore
354                  * this quirk entries disables tagged queueing for all
355                  * DCAS drives.
356                  */
357                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
358                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
359         },
360         {
361                 /* Broken tagged queuing drive */
362                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
363                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
364         },
365         {
366                 /* Broken tagged queuing drive */ 
367                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
368                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
369         },
370         {
371                 /*
372                  * Broken tagged queuing drive.
373                  * Submitted by:
374                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
375                  * in PR kern/9535
376                  */
377                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
378                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
379         },
380         {
381                 /*
382                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
383                  * 8MB/sec.)
384                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
385                  * Best performance with these drives is achieved with
386                  * tagged queueing turned off, and write caching turned on.
387                  */
388                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
389                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
390         },
391         {
392                 /*
393                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
394                  * 8MB/sec.)
395                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
396                  * Best performance with these drives is achieved with
397                  * tagged queueing turned off, and write caching turned on.
398                  */
399                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
400                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
401         },
402         {
403                 /*
404                  * Doesn't handle queue full condition correctly,
405                  * so we need to limit maxtags to what the device
406                  * can handle instead of determining this automatically.
407                  */
408                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
409                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
410         },
411         {
412                 /* Really only one LUN */
413                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
414                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
415         },
416         {
417                 /* I can't believe we need a quirk for DPT volumes. */
418                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
419                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
420                 /*mintags*/0, /*maxtags*/255
421         },
422         {
423                 /*
424                  * Many Sony CDROM drives don't like multi-LUN probing.
425                  */
426                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
427                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
428         },
429         {
430                 /*
431                  * This drive doesn't like multiple LUN probing.
432                  * Submitted by:  Parag Patel <parag@cgt.com>
433                  */
434                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
435                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
436         },
437         {
438                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
439                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
440         },
441         {
442                 /*
443                  * The 8200 doesn't like multi-lun probing, and probably
444                  * don't like serial number requests either.
445                  */
446                 {
447                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
448                         "EXB-8200*", "*"
449                 },
450                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
451         },
452         {
453                 /*
454                  * Let's try the same as above, but for a drive that says
455                  * it's an IPL-6860 but is actually an EXB 8200.
456                  */
457                 {
458                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
459                         "IPL-6860*", "*"
460                 },
461                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
462         },
463         {
464                 /*
465                  * These Hitachi drives don't like multi-lun probing.
466                  * The PR submitter has a DK319H, but says that the Linux
467                  * kernel has a similar work-around for the DK312 and DK314,
468                  * so all DK31* drives are quirked here.
469                  * PR:            misc/18793
470                  * Submitted by:  Paul Haddad <paul@pth.com>
471                  */
472                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
473                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
474         },
475         {
476                 /*
477                  * This old revision of the TDC3600 is also SCSI-1, and
478                  * hangs upon serial number probing.
479                  */
480                 {
481                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
482                         " TDC 3600", "U07:"
483                 },
484                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
485         },
486         {
487                 /*
488                  * Would repond to all LUNs if asked for.
489                  */
490                 {
491                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
492                         "CP150", "*"
493                 },
494                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
495         },
496         {
497                 /*
498                  * Would repond to all LUNs if asked for.
499                  */
500                 {
501                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
502                         "96X2*", "*"
503                 },
504                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
505         },
506         {
507                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
508                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
509                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510         },
511         {
512                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
513                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
514                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
515         },
516         {
517                 /* TeraSolutions special settings for TRC-22 RAID */
518                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
519                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
520         },
521         {
522                 /* Veritas Storage Appliance */
523                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
524                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
525         },
526         {
527                 /*
528                  * Would respond to all LUNs.  Device type and removable
529                  * flag are jumper-selectable.
530                  */
531                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
532                   "Tahiti 1", "*"
533                 },
534                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
535         },
536         {
537                 /* Default tagged queuing parameters for all devices */
538                 {
539                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
540                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
541                 },
542                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
543         },
544 };
545
546 static const int xpt_quirk_table_size =
547         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
548
549 typedef enum {
550         DM_RET_COPY             = 0x01,
551         DM_RET_FLAG_MASK        = 0x0f,
552         DM_RET_NONE             = 0x00,
553         DM_RET_STOP             = 0x10,
554         DM_RET_DESCEND          = 0x20,
555         DM_RET_ERROR            = 0x30,
556         DM_RET_ACTION_MASK      = 0xf0
557 } dev_match_ret;
558
559 typedef enum {
560         XPT_DEPTH_BUS,
561         XPT_DEPTH_TARGET,
562         XPT_DEPTH_DEVICE,
563         XPT_DEPTH_PERIPH
564 } xpt_traverse_depth;
565
566 struct xpt_traverse_config {
567         xpt_traverse_depth      depth;
568         void                    *tr_func;
569         void                    *tr_arg;
570 };
571
572 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
573 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
574 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
575 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
576 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
577
578 /* Transport layer configuration information */
579 static struct xpt_softc xsoftc;
580
581 /* Queues for our software interrupt handler */
582 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
583 static cam_isrq_t cam_bioq;
584
585 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
586 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
587 static u_int xpt_max_ccbs;      /*
588                                  * Maximum size of ccb pool.  Modified as
589                                  * devices are added/removed or have their
590                                  * opening counts changed.
591                                  */
592 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
593
594 struct cam_periph *xpt_periph;
595
596 static periph_init_t xpt_periph_init;
597
598 static periph_init_t probe_periph_init;
599
600 static struct periph_driver xpt_driver =
601 {
602         xpt_periph_init, "xpt",
603         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
604 };
605
606 static struct periph_driver probe_driver =
607 {
608         probe_periph_init, "probe",
609         TAILQ_HEAD_INITIALIZER(probe_driver.units)
610 };
611
612 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
613 PERIPHDRIVER_DECLARE(probe, probe_driver);
614
615 #define XPT_CDEV_MAJOR 104
616
617 static d_open_t xptopen;
618 static d_close_t xptclose;
619 static d_ioctl_t xptioctl;
620
621 static struct dev_ops xpt_ops = {
622         { "xpt", XPT_CDEV_MAJOR, 0 },
623         .d_open = xptopen,
624         .d_close = xptclose,
625         .d_ioctl = xptioctl
626 };
627
628 static struct intr_config_hook *xpt_config_hook;
629
630 /* Registered busses */
631 static TAILQ_HEAD(,cam_eb) xpt_busses;
632 static u_int bus_generation;
633
634 /* Storage for debugging datastructures */
635 #ifdef  CAMDEBUG
636 struct cam_path *cam_dpath;
637 u_int32_t cam_dflags;
638 u_int32_t cam_debug_delay;
639 #endif
640
641 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
642 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
643 #endif
644
645 /*
646  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
647  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
648  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
649  */
650 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
651     || defined(CAM_DEBUG_LUN)
652 #ifdef CAMDEBUG
653 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
654     || !defined(CAM_DEBUG_LUN)
655 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
656         and CAM_DEBUG_LUN"
657 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
658 #else /* !CAMDEBUG */
659 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
660 #endif /* CAMDEBUG */
661 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
662
663 /* Our boot-time initialization hook */
664 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
665
666 static moduledata_t cam_moduledata = {
667         "cam",
668         cam_module_event_handler,
669         NULL
670 };
671
672 static void     xpt_init(void *);
673
674 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
675 MODULE_VERSION(cam, 1);
676
677
678 static cam_status       xpt_compile_path(struct cam_path *new_path,
679                                          struct cam_periph *perph,
680                                          path_id_t path_id,
681                                          target_id_t target_id,
682                                          lun_id_t lun_id);
683
684 static void             xpt_release_path(struct cam_path *path);
685
686 static void             xpt_async_bcast(struct async_list *async_head,
687                                         u_int32_t async_code,
688                                         struct cam_path *path,
689                                         void *async_arg);
690 static void             xpt_dev_async(u_int32_t async_code,
691                                       struct cam_eb *bus,
692                                       struct cam_et *target,
693                                       struct cam_ed *device,
694                                       void *async_arg);
695 static path_id_t xptnextfreepathid(void);
696 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
697 static union ccb *xpt_get_ccb(struct cam_ed *device);
698 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
699                                   u_int32_t new_priority);
700 static void      xpt_run_dev_allocq(struct cam_eb *bus);
701 static void      xpt_run_dev_sendq(struct cam_eb *bus);
702 static timeout_t xpt_release_devq_timeout;
703 static void      xpt_release_bus(struct cam_eb *bus);
704 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
705                                          int run_queue);
706 static struct cam_et*
707                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
708 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
709 static struct cam_ed*
710                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
711                                   lun_id_t lun_id);
712 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
713                                     struct cam_ed *device);
714 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
715 static struct cam_eb*
716                  xpt_find_bus(path_id_t path_id);
717 static struct cam_et*
718                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
719 static struct cam_ed*
720                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
721 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
722 static void      xpt_scan_lun(struct cam_periph *periph,
723                               struct cam_path *path, cam_flags flags,
724                               union ccb *ccb);
725 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
726 static xpt_busfunc_t    xptconfigbuscountfunc;
727 static xpt_busfunc_t    xptconfigfunc;
728 static void      xpt_config(void *arg);
729 static xpt_devicefunc_t xptpassannouncefunc;
730 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
731 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
732 static void      xptpoll(struct cam_sim *sim);
733 static inthand2_t swi_cambio;
734 static void      camisr(cam_isrq_t *queue);
735 #if 0
736 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
737 static void      xptasync(struct cam_periph *periph,
738                           u_int32_t code, cam_path *path);
739 #endif
740 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
741                                     u_int num_patterns, struct cam_eb *bus);
742 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
743                                        u_int num_patterns,
744                                        struct cam_ed *device);
745 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
746                                        u_int num_patterns,
747                                        struct cam_periph *periph);
748 static xpt_busfunc_t    xptedtbusfunc;
749 static xpt_targetfunc_t xptedttargetfunc;
750 static xpt_devicefunc_t xptedtdevicefunc;
751 static xpt_periphfunc_t xptedtperiphfunc;
752 static xpt_pdrvfunc_t   xptplistpdrvfunc;
753 static xpt_periphfunc_t xptplistperiphfunc;
754 static int              xptedtmatch(struct ccb_dev_match *cdm);
755 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
756 static int              xptbustraverse(struct cam_eb *start_bus,
757                                        xpt_busfunc_t *tr_func, void *arg);
758 static int              xpttargettraverse(struct cam_eb *bus,
759                                           struct cam_et *start_target,
760                                           xpt_targetfunc_t *tr_func, void *arg);
761 static int              xptdevicetraverse(struct cam_et *target,
762                                           struct cam_ed *start_device,
763                                           xpt_devicefunc_t *tr_func, void *arg);
764 static int              xptperiphtraverse(struct cam_ed *device,
765                                           struct cam_periph *start_periph,
766                                           xpt_periphfunc_t *tr_func, void *arg);
767 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
768                                         xpt_pdrvfunc_t *tr_func, void *arg);
769 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
770                                             struct cam_periph *start_periph,
771                                             xpt_periphfunc_t *tr_func,
772                                             void *arg);
773 static xpt_busfunc_t    xptdefbusfunc;
774 static xpt_targetfunc_t xptdeftargetfunc;
775 static xpt_devicefunc_t xptdefdevicefunc;
776 static xpt_periphfunc_t xptdefperiphfunc;
777 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
778 #ifdef notusedyet
779 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
780                                             void *arg);
781 #endif
782 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
783                                             void *arg);
784 #ifdef notusedyet
785 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
786                                             void *arg);
787 #endif
788 static xpt_devicefunc_t xptsetasyncfunc;
789 static xpt_busfunc_t    xptsetasyncbusfunc;
790 static cam_status       xptregister(struct cam_periph *periph,
791                                     void *arg);
792 static cam_status       proberegister(struct cam_periph *periph,
793                                       void *arg);
794 static void      probeschedule(struct cam_periph *probe_periph);
795 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
796 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
797 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
798 static void      probecleanup(struct cam_periph *periph);
799 static void      xpt_find_quirk(struct cam_ed *device);
800 #ifdef CAM_NEW_TRAN_CODE
801 static void      xpt_devise_transport(struct cam_path *path);
802 #endif /* CAM_NEW_TRAN_CODE */
803 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
804                                            struct cam_ed *device,
805                                            int async_update);
806 static void      xpt_toggle_tags(struct cam_path *path);
807 static void      xpt_start_tags(struct cam_path *path);
808 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
809                                             struct cam_ed *dev);
810 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
811                                            struct cam_ed *dev);
812 static __inline int periph_is_queued(struct cam_periph *periph);
813 static __inline int device_is_alloc_queued(struct cam_ed *device);
814 static __inline int device_is_send_queued(struct cam_ed *device);
815 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
816
817 static __inline int
818 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
819 {
820         int retval;
821
822         if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
823                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
824                         cam_ccbq_resize(&dev->ccbq,
825                                         dev->ccbq.dev_openings
826                                         + dev->ccbq.dev_active);
827                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
828                 }
829                 /*
830                  * The priority of a device waiting for CCB resources
831                  * is that of the the highest priority peripheral driver
832                  * enqueued.
833                  */
834                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
835                                           &dev->alloc_ccb_entry.pinfo,
836                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
837         } else {
838                 retval = 0;
839         }
840
841         return (retval);
842 }
843
844 static __inline int
845 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
846 {
847         int     retval;
848
849         if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
850                 /*
851                  * The priority of a device waiting for controller
852                  * resources is that of the the highest priority CCB
853                  * enqueued.
854                  */
855                 retval =
856                     xpt_schedule_dev(&bus->sim->devq->send_queue,
857                                      &dev->send_ccb_entry.pinfo,
858                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
859         } else {
860                 retval = 0;
861         }
862         return (retval);
863 }
864
865 static __inline int
866 periph_is_queued(struct cam_periph *periph)
867 {
868         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
869 }
870
871 static __inline int
872 device_is_alloc_queued(struct cam_ed *device)
873 {
874         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
875 }
876
877 static __inline int
878 device_is_send_queued(struct cam_ed *device)
879 {
880         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
881 }
882
883 static __inline int
884 dev_allocq_is_runnable(struct cam_devq *devq)
885 {
886         /*
887          * Have work to do.
888          * Have space to do more work.
889          * Allowed to do work.
890          */
891         return ((devq->alloc_queue.qfrozen_cnt == 0)
892              && (devq->alloc_queue.entries > 0)
893              && (devq->alloc_openings > 0));
894 }
895
896 static void
897 xpt_periph_init(void)
898 {
899         dev_ops_add(&xpt_ops, 0, 0);
900         make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
901 }
902
903 static void
904 probe_periph_init(void)
905 {
906 }
907
908
909 static void
910 xptdone(struct cam_periph *periph, union ccb *done_ccb)
911 {
912         /* Caller will release the CCB */
913         wakeup(&done_ccb->ccb_h.cbfcnp);
914 }
915
916 static int
917 xptopen(struct dev_open_args *ap)
918 {
919         cdev_t dev = ap->a_head.a_dev;
920         int unit;
921
922         unit = minor(dev) & 0xff;
923
924         /*
925          * Only allow read-write access.
926          */
927         if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
928                 return(EPERM);
929
930         /*
931          * We don't allow nonblocking access.
932          */
933         if ((ap->a_oflags & O_NONBLOCK) != 0) {
934                 kprintf("xpt%d: can't do nonblocking access\n", unit);
935                 return(ENODEV);
936         }
937
938         /*
939          * We only have one transport layer right now.  If someone accesses
940          * us via something other than minor number 1, point out their
941          * mistake.
942          */
943         if (unit != 0) {
944                 kprintf("xptopen: got invalid xpt unit %d\n", unit);
945                 return(ENXIO);
946         }
947
948         /* Mark ourselves open */
949         xsoftc.flags |= XPT_FLAG_OPEN;
950         
951         return(0);
952 }
953
954 static int
955 xptclose(struct dev_close_args *ap)
956 {
957         cdev_t dev = ap->a_head.a_dev;
958         int unit;
959
960         unit = minor(dev) & 0xff;
961
962         /*
963          * We only have one transport layer right now.  If someone accesses
964          * us via something other than minor number 1, point out their
965          * mistake.
966          */
967         if (unit != 0) {
968                 kprintf("xptclose: got invalid xpt unit %d\n", unit);
969                 return(ENXIO);
970         }
971
972         /* Mark ourselves closed */
973         xsoftc.flags &= ~XPT_FLAG_OPEN;
974
975         return(0);
976 }
977
978 static int
979 xptioctl(struct dev_ioctl_args *ap)
980 {
981         cdev_t dev = ap->a_head.a_dev;
982         int unit, error;
983
984         error = 0;
985         unit = minor(dev) & 0xff;
986
987         /*
988          * We only have one transport layer right now.  If someone accesses
989          * us via something other than minor number 1, point out their
990          * mistake.
991          */
992         if (unit != 0) {
993                 kprintf("xptioctl: got invalid xpt unit %d\n", unit);
994                 return(ENXIO);
995         }
996
997         switch(ap->a_cmd) {
998         /*
999          * For the transport layer CAMIOCOMMAND ioctl, we really only want
1000          * to accept CCB types that don't quite make sense to send through a
1001          * passthrough driver.
1002          */
1003         case CAMIOCOMMAND: {
1004                 union ccb *ccb;
1005                 union ccb *inccb;
1006
1007                 inccb = (union ccb *)ap->a_data;
1008
1009                 switch(inccb->ccb_h.func_code) {
1010                 case XPT_SCAN_BUS:
1011                 case XPT_RESET_BUS:
1012                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1013                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1014                                 error = EINVAL;
1015                                 break;
1016                         }
1017                         /* FALLTHROUGH */
1018                 case XPT_PATH_INQ:
1019                 case XPT_ENG_INQ:
1020                 case XPT_SCAN_LUN:
1021
1022                         ccb = xpt_alloc_ccb();
1023
1024                         /*
1025                          * Create a path using the bus, target, and lun the
1026                          * user passed in.
1027                          */
1028                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1029                                             inccb->ccb_h.path_id,
1030                                             inccb->ccb_h.target_id,
1031                                             inccb->ccb_h.target_lun) !=
1032                                             CAM_REQ_CMP){
1033                                 error = EINVAL;
1034                                 xpt_free_ccb(ccb);
1035                                 break;
1036                         }
1037                         /* Ensure all of our fields are correct */
1038                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1039                                       inccb->ccb_h.pinfo.priority);
1040                         xpt_merge_ccb(ccb, inccb);
1041                         ccb->ccb_h.cbfcnp = xptdone;
1042                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1043                         bcopy(ccb, inccb, sizeof(union ccb));
1044                         xpt_free_path(ccb->ccb_h.path);
1045                         xpt_free_ccb(ccb);
1046                         break;
1047
1048                 case XPT_DEBUG: {
1049                         union ccb ccb;
1050
1051                         /*
1052                          * This is an immediate CCB, so it's okay to
1053                          * allocate it on the stack.
1054                          */
1055
1056                         /*
1057                          * Create a path using the bus, target, and lun the
1058                          * user passed in.
1059                          */
1060                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1061                                             inccb->ccb_h.path_id,
1062                                             inccb->ccb_h.target_id,
1063                                             inccb->ccb_h.target_lun) !=
1064                                             CAM_REQ_CMP){
1065                                 error = EINVAL;
1066                                 break;
1067                         }
1068                         /* Ensure all of our fields are correct */
1069                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1070                                       inccb->ccb_h.pinfo.priority);
1071                         xpt_merge_ccb(&ccb, inccb);
1072                         ccb.ccb_h.cbfcnp = xptdone;
1073                         xpt_action(&ccb);
1074                         bcopy(&ccb, inccb, sizeof(union ccb));
1075                         xpt_free_path(ccb.ccb_h.path);
1076                         break;
1077
1078                 }
1079                 case XPT_DEV_MATCH: {
1080                         struct cam_periph_map_info mapinfo;
1081                         struct cam_path *old_path;
1082
1083                         /*
1084                          * We can't deal with physical addresses for this
1085                          * type of transaction.
1086                          */
1087                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1088                                 error = EINVAL;
1089                                 break;
1090                         }
1091
1092                         /*
1093                          * Save this in case the caller had it set to
1094                          * something in particular.
1095                          */
1096                         old_path = inccb->ccb_h.path;
1097
1098                         /*
1099                          * We really don't need a path for the matching
1100                          * code.  The path is needed because of the
1101                          * debugging statements in xpt_action().  They
1102                          * assume that the CCB has a valid path.
1103                          */
1104                         inccb->ccb_h.path = xpt_periph->path;
1105
1106                         bzero(&mapinfo, sizeof(mapinfo));
1107
1108                         /*
1109                          * Map the pattern and match buffers into kernel
1110                          * virtual address space.
1111                          */
1112                         error = cam_periph_mapmem(inccb, &mapinfo);
1113
1114                         if (error) {
1115                                 inccb->ccb_h.path = old_path;
1116                                 break;
1117                         }
1118
1119                         /*
1120                          * This is an immediate CCB, we can send it on directly.
1121                          */
1122                         xpt_action(inccb);
1123
1124                         /*
1125                          * Map the buffers back into user space.
1126                          */
1127                         cam_periph_unmapmem(inccb, &mapinfo);
1128
1129                         inccb->ccb_h.path = old_path;
1130
1131                         error = 0;
1132                         break;
1133                 }
1134                 default:
1135                         error = ENOTSUP;
1136                         break;
1137                 }
1138                 break;
1139         }
1140         /*
1141          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1142          * with the periphal driver name and unit name filled in.  The other
1143          * fields don't really matter as input.  The passthrough driver name
1144          * ("pass"), and unit number are passed back in the ccb.  The current
1145          * device generation number, and the index into the device peripheral
1146          * driver list, and the status are also passed back.  Note that
1147          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1148          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1149          * (or rather should be) impossible for the device peripheral driver
1150          * list to change since we look at the whole thing in one pass, and
1151          * we do it within a critical section.
1152          * 
1153          */
1154         case CAMGETPASSTHRU: {
1155                 union ccb *ccb;
1156                 struct cam_periph *periph;
1157                 struct periph_driver **p_drv;
1158                 char   *name;
1159                 u_int unit;
1160                 u_int cur_generation;
1161                 int base_periph_found;
1162                 int splbreaknum;
1163
1164                 ccb = (union ccb *)ap->a_data;
1165                 unit = ccb->cgdl.unit_number;
1166                 name = ccb->cgdl.periph_name;
1167                 /*
1168                  * Every 100 devices, we want to call splz() to check for
1169                  * and allow the software interrupt handler a chance to run.
1170                  *
1171                  * Most systems won't run into this check, but this should
1172                  * avoid starvation in the software interrupt handler in
1173                  * large systems.
1174                  */
1175                 splbreaknum = 100;
1176
1177                 ccb = (union ccb *)ap->a_data;
1178
1179                 base_periph_found = 0;
1180
1181                 /*
1182                  * Sanity check -- make sure we don't get a null peripheral
1183                  * driver name.
1184                  */
1185                 if (*ccb->cgdl.periph_name == '\0') {
1186                         error = EINVAL;
1187                         break;
1188                 }
1189
1190                 /* Keep the list from changing while we traverse it */
1191                 crit_enter();
1192 ptstartover:
1193                 cur_generation = xsoftc.generation;
1194
1195                 /* first find our driver in the list of drivers */
1196                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1197                         if (strcmp((*p_drv)->driver_name, name) == 0)
1198                                 break;
1199                 }
1200
1201                 if (*p_drv == NULL) {
1202                         crit_exit();
1203                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1204                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1205                         *ccb->cgdl.periph_name = '\0';
1206                         ccb->cgdl.unit_number = 0;
1207                         error = ENOENT;
1208                         break;
1209                 }       
1210
1211                 /*
1212                  * Run through every peripheral instance of this driver
1213                  * and check to see whether it matches the unit passed
1214                  * in by the user.  If it does, get out of the loops and
1215                  * find the passthrough driver associated with that
1216                  * peripheral driver.
1217                  */
1218                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1219
1220                         if (periph->unit_number == unit) {
1221                                 break;
1222                         } else if (--splbreaknum == 0) {
1223                                 splz();
1224                                 splbreaknum = 100;
1225                                 if (cur_generation != xsoftc.generation)
1226                                        goto ptstartover;
1227                         }
1228                 }
1229                 /*
1230                  * If we found the peripheral driver that the user passed
1231                  * in, go through all of the peripheral drivers for that
1232                  * particular device and look for a passthrough driver.
1233                  */
1234                 if (periph != NULL) {
1235                         struct cam_ed *device;
1236                         int i;
1237
1238                         base_periph_found = 1;
1239                         device = periph->path->device;
1240                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1241                              periph != NULL;
1242                              periph = SLIST_NEXT(periph, periph_links), i++) {
1243                                 /*
1244                                  * Check to see whether we have a
1245                                  * passthrough device or not. 
1246                                  */
1247                                 if (strcmp(periph->periph_name, "pass") == 0) {
1248                                         /*
1249                                          * Fill in the getdevlist fields.
1250                                          */
1251                                         strcpy(ccb->cgdl.periph_name,
1252                                                periph->periph_name);
1253                                         ccb->cgdl.unit_number =
1254                                                 periph->unit_number;
1255                                         if (SLIST_NEXT(periph, periph_links))
1256                                                 ccb->cgdl.status =
1257                                                         CAM_GDEVLIST_MORE_DEVS;
1258                                         else
1259                                                 ccb->cgdl.status =
1260                                                        CAM_GDEVLIST_LAST_DEVICE;
1261                                         ccb->cgdl.generation =
1262                                                 device->generation;
1263                                         ccb->cgdl.index = i;
1264                                         /*
1265                                          * Fill in some CCB header fields
1266                                          * that the user may want.
1267                                          */
1268                                         ccb->ccb_h.path_id =
1269                                                 periph->path->bus->path_id;
1270                                         ccb->ccb_h.target_id =
1271                                                 periph->path->target->target_id;
1272                                         ccb->ccb_h.target_lun =
1273                                                 periph->path->device->lun_id;
1274                                         ccb->ccb_h.status = CAM_REQ_CMP;
1275                                         break;
1276                                 }
1277                         }
1278                 }
1279
1280                 /*
1281                  * If the periph is null here, one of two things has
1282                  * happened.  The first possibility is that we couldn't
1283                  * find the unit number of the particular peripheral driver
1284                  * that the user is asking about.  e.g. the user asks for
1285                  * the passthrough driver for "da11".  We find the list of
1286                  * "da" peripherals all right, but there is no unit 11.
1287                  * The other possibility is that we went through the list
1288                  * of peripheral drivers attached to the device structure,
1289                  * but didn't find one with the name "pass".  Either way,
1290                  * we return ENOENT, since we couldn't find something.
1291                  */
1292                 if (periph == NULL) {
1293                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1294                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1295                         *ccb->cgdl.periph_name = '\0';
1296                         ccb->cgdl.unit_number = 0;
1297                         error = ENOENT;
1298                         /*
1299                          * It is unfortunate that this is even necessary,
1300                          * but there are many, many clueless users out there.
1301                          * If this is true, the user is looking for the
1302                          * passthrough driver, but doesn't have one in his
1303                          * kernel.
1304                          */
1305                         if (base_periph_found == 1) {
1306                                 kprintf("xptioctl: pass driver is not in the "
1307                                        "kernel\n");
1308                                 kprintf("xptioctl: put \"device pass0\" in "
1309                                        "your kernel config file\n");
1310                         }
1311                 }
1312                 crit_exit();
1313                 break;
1314                 }
1315         default:
1316                 error = ENOTTY;
1317                 break;
1318         }
1319
1320         return(error);
1321 }
1322
1323 static int
1324 cam_module_event_handler(module_t mod, int what, void *arg)
1325 {
1326         if (what == MOD_LOAD) {
1327                 xpt_init(NULL);
1328         } else if (what == MOD_UNLOAD) {
1329                 return EBUSY;
1330         } else {
1331                 return EOPNOTSUPP;
1332         }
1333
1334         return 0;
1335 }
1336
1337 /* Functions accessed by the peripheral drivers */
1338 static void
1339 xpt_init(void *dummy)
1340 {
1341         struct cam_sim *xpt_sim;
1342         struct cam_path *path;
1343         struct cam_devq *devq;
1344         cam_status status;
1345
1346         TAILQ_INIT(&xpt_busses);
1347         TAILQ_INIT(&cam_bioq);
1348         SLIST_INIT(&ccb_freeq);
1349         STAILQ_INIT(&highpowerq);
1350
1351         /*
1352          * The xpt layer is, itself, the equivelent of a SIM.
1353          * Allow 16 ccbs in the ccb pool for it.  This should
1354          * give decent parallelism when we probe busses and
1355          * perform other XPT functions.
1356          */
1357         devq = cam_simq_alloc(16);
1358         xpt_sim = cam_sim_alloc(xptaction,
1359                                 xptpoll,
1360                                 "xpt",
1361                                 /*softc*/NULL,
1362                                 /*unit*/0,
1363                                 /*max_dev_transactions*/0,
1364                                 /*max_tagged_dev_transactions*/0,
1365                                 devq);
1366         cam_simq_release(devq);
1367         xpt_max_ccbs = 16;
1368                                 
1369         xpt_bus_register(xpt_sim, /*bus #*/0);
1370
1371         /*
1372          * Looking at the XPT from the SIM layer, the XPT is
1373          * the equivelent of a peripheral driver.  Allocate
1374          * a peripheral driver entry for us.
1375          */
1376         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1377                                       CAM_TARGET_WILDCARD,
1378                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1379                 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1380                        " failing attach\n", status);
1381                 return;
1382         }
1383
1384         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1385                          path, NULL, 0, NULL);
1386         xpt_free_path(path);
1387
1388         xpt_sim->softc = xpt_periph;
1389
1390         /*
1391          * Register a callback for when interrupts are enabled.
1392          */
1393         xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1394                                   M_TEMP, M_INTWAIT | M_ZERO);
1395         xpt_config_hook->ich_func = xpt_config;
1396         xpt_config_hook->ich_desc = "xpt";
1397         xpt_config_hook->ich_order = 1000;
1398         if (config_intrhook_establish(xpt_config_hook) != 0) {
1399                 kfree (xpt_config_hook, M_TEMP);
1400                 kprintf("xpt_init: config_intrhook_establish failed "
1401                        "- failing attach\n");
1402         }
1403
1404         /* Install our software interrupt handlers */
1405         register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1406 }
1407
1408 static cam_status
1409 xptregister(struct cam_periph *periph, void *arg)
1410 {
1411         if (periph == NULL) {
1412                 kprintf("xptregister: periph was NULL!!\n");
1413                 return(CAM_REQ_CMP_ERR);
1414         }
1415
1416         periph->softc = NULL;
1417
1418         xpt_periph = periph;
1419
1420         return(CAM_REQ_CMP);
1421 }
1422
1423 int32_t
1424 xpt_add_periph(struct cam_periph *periph)
1425 {
1426         struct cam_ed *device;
1427         int32_t  status;
1428         struct periph_list *periph_head;
1429
1430         device = periph->path->device;
1431
1432         periph_head = &device->periphs;
1433
1434         status = CAM_REQ_CMP;
1435
1436         if (device != NULL) {
1437                 /*
1438                  * Make room for this peripheral
1439                  * so it will fit in the queue
1440                  * when it's scheduled to run
1441                  */
1442                 crit_enter();
1443                 status = camq_resize(&device->drvq,
1444                                      device->drvq.array_size + 1);
1445
1446                 device->generation++;
1447
1448                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1449                 crit_exit();
1450         }
1451
1452         xsoftc.generation++;
1453
1454         return (status);
1455 }
1456
1457 void
1458 xpt_remove_periph(struct cam_periph *periph)
1459 {
1460         struct cam_ed *device;
1461
1462         device = periph->path->device;
1463
1464         if (device != NULL) {
1465                 struct periph_list *periph_head;
1466
1467                 periph_head = &device->periphs;
1468                 
1469                 /* Release the slot for this peripheral */
1470                 crit_enter();
1471                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1472
1473                 device->generation++;
1474
1475                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1476                 crit_exit();
1477         }
1478
1479         xsoftc.generation++;
1480
1481 }
1482
1483 #ifdef CAM_NEW_TRAN_CODE
1484
1485 void
1486 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1487 {
1488         struct  ccb_pathinq cpi;
1489         struct  ccb_trans_settings cts;
1490         struct  cam_path *path;
1491         u_int   speed;
1492         u_int   freq;
1493         u_int   mb;
1494
1495         path = periph->path;
1496         /*
1497          * To ensure that this is printed in one piece,
1498          * mask out CAM interrupts.
1499          */
1500         crit_enter();
1501         printf("%s%d at %s%d bus %d target %d lun %d\n",
1502                periph->periph_name, periph->unit_number,
1503                path->bus->sim->sim_name,
1504                path->bus->sim->unit_number,
1505                path->bus->sim->bus_id,
1506                path->target->target_id,
1507                path->device->lun_id);
1508         printf("%s%d: ", periph->periph_name, periph->unit_number);
1509         scsi_print_inquiry(&path->device->inq_data);
1510         if (bootverbose && path->device->serial_num_len > 0) {
1511                 /* Don't wrap the screen  - print only the first 60 chars */
1512                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1513                        periph->unit_number, path->device->serial_num);
1514         }
1515         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1516         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1517         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1518         xpt_action((union ccb*)&cts);
1519
1520         /* Ask the SIM for its base transfer speed */
1521         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1522         cpi.ccb_h.func_code = XPT_PATH_INQ;
1523         xpt_action((union ccb *)&cpi);
1524
1525         speed = cpi.base_transfer_speed;
1526         freq = 0;
1527         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1528                 struct  ccb_trans_settings_spi *spi;
1529
1530                 spi = &cts.xport_specific.spi;
1531                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1532                   && spi->sync_offset != 0) {
1533                         freq = scsi_calc_syncsrate(spi->sync_period);
1534                         speed = freq;
1535                 }
1536
1537                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1538                         speed *= (0x01 << spi->bus_width);
1539         }
1540         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1541                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1542                 if (fc->valid & CTS_FC_VALID_SPEED) {
1543                         speed = fc->bitrate;
1544                 }
1545         }
1546
1547         mb = speed / 1000;
1548         if (mb > 0)
1549                 printf("%s%d: %d.%03dMB/s transfers",
1550                        periph->periph_name, periph->unit_number,
1551                        mb, speed % 1000);
1552         else
1553                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1554                        periph->unit_number, speed);
1555         /* Report additional information about SPI connections */
1556         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1557                 struct  ccb_trans_settings_spi *spi;
1558
1559                 spi = &cts.xport_specific.spi;
1560                 if (freq != 0) {
1561                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1562                                freq % 1000,
1563                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1564                              ? " DT" : "",
1565                                spi->sync_offset);
1566                 }
1567                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1568                  && spi->bus_width > 0) {
1569                         if (freq != 0) {
1570                                 printf(", ");
1571                         } else {
1572                                 printf(" (");
1573                         }
1574                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1575                 } else if (freq != 0) {
1576                         printf(")");
1577                 }
1578         }
1579         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1580                 struct  ccb_trans_settings_fc *fc;
1581
1582                 fc = &cts.xport_specific.fc;
1583                 if (fc->valid & CTS_FC_VALID_WWNN)
1584                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1585                 if (fc->valid & CTS_FC_VALID_WWPN)
1586                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1587                 if (fc->valid & CTS_FC_VALID_PORT)
1588                         printf(" PortID 0x%x", fc->port);
1589         }
1590
1591         if (path->device->inq_flags & SID_CmdQue
1592          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1593                 printf("\n%s%d: Tagged Queueing Enabled",
1594                        periph->periph_name, periph->unit_number);
1595         }
1596         printf("\n");
1597
1598         /*
1599          * We only want to print the caller's announce string if they've
1600          * passed one in..
1601          */
1602         if (announce_string != NULL)
1603                 printf("%s%d: %s\n", periph->periph_name,
1604                        periph->unit_number, announce_string);
1605         crit_exit();
1606 }
1607 #else /* CAM_NEW_TRAN_CODE */
1608 void
1609 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1610 {
1611         u_int mb;
1612         struct cam_path *path;
1613         struct ccb_trans_settings cts;
1614
1615         path = periph->path;
1616         /*
1617          * To ensure that this is printed in one piece,
1618          * mask out CAM interrupts.
1619          */
1620         crit_enter();
1621         kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1622                periph->periph_name, periph->unit_number,
1623                path->bus->sim->sim_name,
1624                path->bus->sim->unit_number,
1625                path->bus->sim->bus_id,
1626                path->target->target_id,
1627                path->device->lun_id);
1628         kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1629         scsi_print_inquiry(&path->device->inq_data);
1630         if ((bootverbose)
1631          && (path->device->serial_num_len > 0)) {
1632                 /* Don't wrap the screen  - print only the first 60 chars */
1633                 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1634                        periph->unit_number, path->device->serial_num);
1635         }
1636         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1637         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1638         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1639         xpt_action((union ccb*)&cts);
1640         if (cts.ccb_h.status == CAM_REQ_CMP) {
1641                 u_int speed;
1642                 u_int freq;
1643
1644                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1645                   && cts.sync_offset != 0) {
1646                         freq = scsi_calc_syncsrate(cts.sync_period);
1647                         speed = freq;
1648                 } else {
1649                         struct ccb_pathinq cpi;
1650
1651                         /* Ask the SIM for its base transfer speed */
1652                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1653                         cpi.ccb_h.func_code = XPT_PATH_INQ;
1654                         xpt_action((union ccb *)&cpi);
1655
1656                         speed = cpi.base_transfer_speed;
1657                         freq = 0;
1658                 }
1659                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1660                         speed *= (0x01 << cts.bus_width);
1661                 mb = speed / 1000;
1662                 if (mb > 0)
1663                         kprintf("%s%d: %d.%03dMB/s transfers",
1664                                periph->periph_name, periph->unit_number,
1665                                mb, speed % 1000);
1666                 else
1667                         kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1668                                periph->unit_number, speed);
1669                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1670                  && cts.sync_offset != 0) {
1671                         kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1672                                freq % 1000, cts.sync_offset);
1673                 }
1674                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1675                  && cts.bus_width > 0) {
1676                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1677                          && cts.sync_offset != 0) {
1678                                 kprintf(", ");
1679                         } else {
1680                                 kprintf(" (");
1681                         }
1682                         kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1683                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1684                         && cts.sync_offset != 0) {
1685                         kprintf(")");
1686                 }
1687
1688                 if (path->device->inq_flags & SID_CmdQue
1689                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1690                         kprintf(", Tagged Queueing Enabled");
1691                 }
1692
1693                 kprintf("\n");
1694         } else if (path->device->inq_flags & SID_CmdQue
1695                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1696                 kprintf("%s%d: Tagged Queueing Enabled\n",
1697                        periph->periph_name, periph->unit_number);
1698         }
1699
1700         /*
1701          * We only want to print the caller's announce string if they've
1702          * passed one in..
1703          */
1704         if (announce_string != NULL)
1705                 kprintf("%s%d: %s\n", periph->periph_name,
1706                        periph->unit_number, announce_string);
1707         crit_exit();
1708 }
1709
1710 #endif /* CAM_NEW_TRAN_CODE */
1711
1712 static dev_match_ret
1713 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1714             struct cam_eb *bus)
1715 {
1716         dev_match_ret retval;
1717         int i;
1718
1719         retval = DM_RET_NONE;
1720
1721         /*
1722          * If we aren't given something to match against, that's an error.
1723          */
1724         if (bus == NULL)
1725                 return(DM_RET_ERROR);
1726
1727         /*
1728          * If there are no match entries, then this bus matches no
1729          * matter what.
1730          */
1731         if ((patterns == NULL) || (num_patterns == 0))
1732                 return(DM_RET_DESCEND | DM_RET_COPY);
1733
1734         for (i = 0; i < num_patterns; i++) {
1735                 struct bus_match_pattern *cur_pattern;
1736
1737                 /*
1738                  * If the pattern in question isn't for a bus node, we
1739                  * aren't interested.  However, we do indicate to the
1740                  * calling routine that we should continue descending the
1741                  * tree, since the user wants to match against lower-level
1742                  * EDT elements.
1743                  */
1744                 if (patterns[i].type != DEV_MATCH_BUS) {
1745                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1746                                 retval |= DM_RET_DESCEND;
1747                         continue;
1748                 }
1749
1750                 cur_pattern = &patterns[i].pattern.bus_pattern;
1751
1752                 /*
1753                  * If they want to match any bus node, we give them any
1754                  * device node.
1755                  */
1756                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1757                         /* set the copy flag */
1758                         retval |= DM_RET_COPY;
1759
1760                         /*
1761                          * If we've already decided on an action, go ahead
1762                          * and return.
1763                          */
1764                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1765                                 return(retval);
1766                 }
1767
1768                 /*
1769                  * Not sure why someone would do this...
1770                  */
1771                 if (cur_pattern->flags == BUS_MATCH_NONE)
1772                         continue;
1773
1774                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1775                  && (cur_pattern->path_id != bus->path_id))
1776                         continue;
1777
1778                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1779                  && (cur_pattern->bus_id != bus->sim->bus_id))
1780                         continue;
1781
1782                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1783                  && (cur_pattern->unit_number != bus->sim->unit_number))
1784                         continue;
1785
1786                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1787                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1788                              DEV_IDLEN) != 0))
1789                         continue;
1790
1791                 /*
1792                  * If we get to this point, the user definitely wants 
1793                  * information on this bus.  So tell the caller to copy the
1794                  * data out.
1795                  */
1796                 retval |= DM_RET_COPY;
1797
1798                 /*
1799                  * If the return action has been set to descend, then we
1800                  * know that we've already seen a non-bus matching
1801                  * expression, therefore we need to further descend the tree.
1802                  * This won't change by continuing around the loop, so we
1803                  * go ahead and return.  If we haven't seen a non-bus
1804                  * matching expression, we keep going around the loop until
1805                  * we exhaust the matching expressions.  We'll set the stop
1806                  * flag once we fall out of the loop.
1807                  */
1808                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1809                         return(retval);
1810         }
1811
1812         /*
1813          * If the return action hasn't been set to descend yet, that means
1814          * we haven't seen anything other than bus matching patterns.  So
1815          * tell the caller to stop descending the tree -- the user doesn't
1816          * want to match against lower level tree elements.
1817          */
1818         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1819                 retval |= DM_RET_STOP;
1820
1821         return(retval);
1822 }
1823
1824 static dev_match_ret
1825 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1826                struct cam_ed *device)
1827 {
1828         dev_match_ret retval;
1829         int i;
1830
1831         retval = DM_RET_NONE;
1832
1833         /*
1834          * If we aren't given something to match against, that's an error.
1835          */
1836         if (device == NULL)
1837                 return(DM_RET_ERROR);
1838
1839         /*
1840          * If there are no match entries, then this device matches no
1841          * matter what.
1842          */
1843         if ((patterns == NULL) || (num_patterns == 0))
1844                 return(DM_RET_DESCEND | DM_RET_COPY);
1845
1846         for (i = 0; i < num_patterns; i++) {
1847                 struct device_match_pattern *cur_pattern;
1848
1849                 /*
1850                  * If the pattern in question isn't for a device node, we
1851                  * aren't interested.
1852                  */
1853                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1854                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1855                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1856                                 retval |= DM_RET_DESCEND;
1857                         continue;
1858                 }
1859
1860                 cur_pattern = &patterns[i].pattern.device_pattern;
1861
1862                 /*
1863                  * If they want to match any device node, we give them any
1864                  * device node.
1865                  */
1866                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1867                         /* set the copy flag */
1868                         retval |= DM_RET_COPY;
1869
1870                         
1871                         /*
1872                          * If we've already decided on an action, go ahead
1873                          * and return.
1874                          */
1875                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1876                                 return(retval);
1877                 }
1878
1879                 /*
1880                  * Not sure why someone would do this...
1881                  */
1882                 if (cur_pattern->flags == DEV_MATCH_NONE)
1883                         continue;
1884
1885                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1886                  && (cur_pattern->path_id != device->target->bus->path_id))
1887                         continue;
1888
1889                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1890                  && (cur_pattern->target_id != device->target->target_id))
1891                         continue;
1892
1893                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1894                  && (cur_pattern->target_lun != device->lun_id))
1895                         continue;
1896
1897                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1898                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1899                                     (caddr_t)&cur_pattern->inq_pat,
1900                                     1, sizeof(cur_pattern->inq_pat),
1901                                     scsi_static_inquiry_match) == NULL))
1902                         continue;
1903
1904                 /*
1905                  * If we get to this point, the user definitely wants 
1906                  * information on this device.  So tell the caller to copy
1907                  * the data out.
1908                  */
1909                 retval |= DM_RET_COPY;
1910
1911                 /*
1912                  * If the return action has been set to descend, then we
1913                  * know that we've already seen a peripheral matching
1914                  * expression, therefore we need to further descend the tree.
1915                  * This won't change by continuing around the loop, so we
1916                  * go ahead and return.  If we haven't seen a peripheral
1917                  * matching expression, we keep going around the loop until
1918                  * we exhaust the matching expressions.  We'll set the stop
1919                  * flag once we fall out of the loop.
1920                  */
1921                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1922                         return(retval);
1923         }
1924
1925         /*
1926          * If the return action hasn't been set to descend yet, that means
1927          * we haven't seen any peripheral matching patterns.  So tell the
1928          * caller to stop descending the tree -- the user doesn't want to
1929          * match against lower level tree elements.
1930          */
1931         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1932                 retval |= DM_RET_STOP;
1933
1934         return(retval);
1935 }
1936
1937 /*
1938  * Match a single peripheral against any number of match patterns.
1939  */
1940 static dev_match_ret
1941 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1942                struct cam_periph *periph)
1943 {
1944         dev_match_ret retval;
1945         int i;
1946
1947         /*
1948          * If we aren't given something to match against, that's an error.
1949          */
1950         if (periph == NULL)
1951                 return(DM_RET_ERROR);
1952
1953         /*
1954          * If there are no match entries, then this peripheral matches no
1955          * matter what.
1956          */
1957         if ((patterns == NULL) || (num_patterns == 0))
1958                 return(DM_RET_STOP | DM_RET_COPY);
1959
1960         /*
1961          * There aren't any nodes below a peripheral node, so there's no
1962          * reason to descend the tree any further.
1963          */
1964         retval = DM_RET_STOP;
1965
1966         for (i = 0; i < num_patterns; i++) {
1967                 struct periph_match_pattern *cur_pattern;
1968
1969                 /*
1970                  * If the pattern in question isn't for a peripheral, we
1971                  * aren't interested.
1972                  */
1973                 if (patterns[i].type != DEV_MATCH_PERIPH)
1974                         continue;
1975
1976                 cur_pattern = &patterns[i].pattern.periph_pattern;
1977
1978                 /*
1979                  * If they want to match on anything, then we will do so.
1980                  */
1981                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1982                         /* set the copy flag */
1983                         retval |= DM_RET_COPY;
1984
1985                         /*
1986                          * We've already set the return action to stop,
1987                          * since there are no nodes below peripherals in
1988                          * the tree.
1989                          */
1990                         return(retval);
1991                 }
1992
1993                 /*
1994                  * Not sure why someone would do this...
1995                  */
1996                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1997                         continue;
1998
1999                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2000                  && (cur_pattern->path_id != periph->path->bus->path_id))
2001                         continue;
2002
2003                 /*
2004                  * For the target and lun id's, we have to make sure the
2005                  * target and lun pointers aren't NULL.  The xpt peripheral
2006                  * has a wildcard target and device.
2007                  */
2008                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2009                  && ((periph->path->target == NULL)
2010                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2011                         continue;
2012
2013                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2014                  && ((periph->path->device == NULL)
2015                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2016                         continue;
2017
2018                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2019                  && (cur_pattern->unit_number != periph->unit_number))
2020                         continue;
2021
2022                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2023                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2024                              DEV_IDLEN) != 0))
2025                         continue;
2026
2027                 /*
2028                  * If we get to this point, the user definitely wants 
2029                  * information on this peripheral.  So tell the caller to
2030                  * copy the data out.
2031                  */
2032                 retval |= DM_RET_COPY;
2033
2034                 /*
2035                  * The return action has already been set to stop, since
2036                  * peripherals don't have any nodes below them in the EDT.
2037                  */
2038                 return(retval);
2039         }
2040
2041         /*
2042          * If we get to this point, the peripheral that was passed in
2043          * doesn't match any of the patterns.
2044          */
2045         return(retval);
2046 }
2047
2048 static int
2049 xptedtbusfunc(struct cam_eb *bus, void *arg)
2050 {
2051         struct ccb_dev_match *cdm;
2052         dev_match_ret retval;
2053
2054         cdm = (struct ccb_dev_match *)arg;
2055
2056         /*
2057          * If our position is for something deeper in the tree, that means
2058          * that we've already seen this node.  So, we keep going down.
2059          */
2060         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2061          && (cdm->pos.cookie.bus == bus)
2062          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2063          && (cdm->pos.cookie.target != NULL))
2064                 retval = DM_RET_DESCEND;
2065         else
2066                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2067
2068         /*
2069          * If we got an error, bail out of the search.
2070          */
2071         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2072                 cdm->status = CAM_DEV_MATCH_ERROR;
2073                 return(0);
2074         }
2075
2076         /*
2077          * If the copy flag is set, copy this bus out.
2078          */
2079         if (retval & DM_RET_COPY) {
2080                 int spaceleft, j;
2081
2082                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2083                         sizeof(struct dev_match_result));
2084
2085                 /*
2086                  * If we don't have enough space to put in another
2087                  * match result, save our position and tell the
2088                  * user there are more devices to check.
2089                  */
2090                 if (spaceleft < sizeof(struct dev_match_result)) {
2091                         bzero(&cdm->pos, sizeof(cdm->pos));
2092                         cdm->pos.position_type = 
2093                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2094
2095                         cdm->pos.cookie.bus = bus;
2096                         cdm->pos.generations[CAM_BUS_GENERATION]=
2097                                 bus_generation;
2098                         cdm->status = CAM_DEV_MATCH_MORE;
2099                         return(0);
2100                 }
2101                 j = cdm->num_matches;
2102                 cdm->num_matches++;
2103                 cdm->matches[j].type = DEV_MATCH_BUS;
2104                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2105                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2106                 cdm->matches[j].result.bus_result.unit_number =
2107                         bus->sim->unit_number;
2108                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2109                         bus->sim->sim_name, DEV_IDLEN);
2110         }
2111
2112         /*
2113          * If the user is only interested in busses, there's no
2114          * reason to descend to the next level in the tree.
2115          */
2116         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2117                 return(1);
2118
2119         /*
2120          * If there is a target generation recorded, check it to
2121          * make sure the target list hasn't changed.
2122          */
2123         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2124          && (bus == cdm->pos.cookie.bus)
2125          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2126          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2127          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2128              bus->generation)) {
2129                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2130                 return(0);
2131         }
2132
2133         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2134          && (cdm->pos.cookie.bus == bus)
2135          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2136          && (cdm->pos.cookie.target != NULL))
2137                 return(xpttargettraverse(bus,
2138                                         (struct cam_et *)cdm->pos.cookie.target,
2139                                          xptedttargetfunc, arg));
2140         else
2141                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2142 }
2143
2144 static int
2145 xptedttargetfunc(struct cam_et *target, void *arg)
2146 {
2147         struct ccb_dev_match *cdm;
2148
2149         cdm = (struct ccb_dev_match *)arg;
2150
2151         /*
2152          * If there is a device list generation recorded, check it to
2153          * make sure the device list hasn't changed.
2154          */
2155         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2156          && (cdm->pos.cookie.bus == target->bus)
2157          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2158          && (cdm->pos.cookie.target == target)
2159          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2160          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2161          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2162              target->generation)) {
2163                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2164                 return(0);
2165         }
2166
2167         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2168          && (cdm->pos.cookie.bus == target->bus)
2169          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2170          && (cdm->pos.cookie.target == target)
2171          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2172          && (cdm->pos.cookie.device != NULL))
2173                 return(xptdevicetraverse(target,
2174                                         (struct cam_ed *)cdm->pos.cookie.device,
2175                                          xptedtdevicefunc, arg));
2176         else
2177                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2178 }
2179
2180 static int
2181 xptedtdevicefunc(struct cam_ed *device, void *arg)
2182 {
2183
2184         struct ccb_dev_match *cdm;
2185         dev_match_ret retval;
2186
2187         cdm = (struct ccb_dev_match *)arg;
2188
2189         /*
2190          * If our position is for something deeper in the tree, that means
2191          * that we've already seen this node.  So, we keep going down.
2192          */
2193         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2194          && (cdm->pos.cookie.device == device)
2195          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2196          && (cdm->pos.cookie.periph != NULL))
2197                 retval = DM_RET_DESCEND;
2198         else
2199                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2200                                         device);
2201
2202         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2203                 cdm->status = CAM_DEV_MATCH_ERROR;
2204                 return(0);
2205         }
2206
2207         /*
2208          * If the copy flag is set, copy this device out.
2209          */
2210         if (retval & DM_RET_COPY) {
2211                 int spaceleft, j;
2212
2213                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2214                         sizeof(struct dev_match_result));
2215
2216                 /*
2217                  * If we don't have enough space to put in another
2218                  * match result, save our position and tell the
2219                  * user there are more devices to check.
2220                  */
2221                 if (spaceleft < sizeof(struct dev_match_result)) {
2222                         bzero(&cdm->pos, sizeof(cdm->pos));
2223                         cdm->pos.position_type = 
2224                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2225                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2226
2227                         cdm->pos.cookie.bus = device->target->bus;
2228                         cdm->pos.generations[CAM_BUS_GENERATION]=
2229                                 bus_generation;
2230                         cdm->pos.cookie.target = device->target;
2231                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2232                                 device->target->bus->generation;
2233                         cdm->pos.cookie.device = device;
2234                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2235                                 device->target->generation;
2236                         cdm->status = CAM_DEV_MATCH_MORE;
2237                         return(0);
2238                 }
2239                 j = cdm->num_matches;
2240                 cdm->num_matches++;
2241                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2242                 cdm->matches[j].result.device_result.path_id =
2243                         device->target->bus->path_id;
2244                 cdm->matches[j].result.device_result.target_id =
2245                         device->target->target_id;
2246                 cdm->matches[j].result.device_result.target_lun =
2247                         device->lun_id;
2248                 bcopy(&device->inq_data,
2249                       &cdm->matches[j].result.device_result.inq_data,
2250                       sizeof(struct scsi_inquiry_data));
2251
2252                 /* Let the user know whether this device is unconfigured */
2253                 if (device->flags & CAM_DEV_UNCONFIGURED)
2254                         cdm->matches[j].result.device_result.flags =
2255                                 DEV_RESULT_UNCONFIGURED;
2256                 else
2257                         cdm->matches[j].result.device_result.flags =
2258                                 DEV_RESULT_NOFLAG;
2259         }
2260
2261         /*
2262          * If the user isn't interested in peripherals, don't descend
2263          * the tree any further.
2264          */
2265         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2266                 return(1);
2267
2268         /*
2269          * If there is a peripheral list generation recorded, make sure
2270          * it hasn't changed.
2271          */
2272         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2273          && (device->target->bus == cdm->pos.cookie.bus)
2274          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2275          && (device->target == cdm->pos.cookie.target)
2276          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2277          && (device == cdm->pos.cookie.device)
2278          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2279          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2280          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2281              device->generation)){
2282                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2283                 return(0);
2284         }
2285
2286         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2287          && (cdm->pos.cookie.bus == device->target->bus)
2288          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2289          && (cdm->pos.cookie.target == device->target)
2290          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2291          && (cdm->pos.cookie.device == device)
2292          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2293          && (cdm->pos.cookie.periph != NULL))
2294                 return(xptperiphtraverse(device,
2295                                 (struct cam_periph *)cdm->pos.cookie.periph,
2296                                 xptedtperiphfunc, arg));
2297         else
2298                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2299 }
2300
2301 static int
2302 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2303 {
2304         struct ccb_dev_match *cdm;
2305         dev_match_ret retval;
2306
2307         cdm = (struct ccb_dev_match *)arg;
2308
2309         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2310
2311         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2312                 cdm->status = CAM_DEV_MATCH_ERROR;
2313                 return(0);
2314         }
2315
2316         /*
2317          * If the copy flag is set, copy this peripheral out.
2318          */
2319         if (retval & DM_RET_COPY) {
2320                 int spaceleft, j;
2321
2322                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2323                         sizeof(struct dev_match_result));
2324
2325                 /*
2326                  * If we don't have enough space to put in another
2327                  * match result, save our position and tell the
2328                  * user there are more devices to check.
2329                  */
2330                 if (spaceleft < sizeof(struct dev_match_result)) {
2331                         bzero(&cdm->pos, sizeof(cdm->pos));
2332                         cdm->pos.position_type = 
2333                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2334                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2335                                 CAM_DEV_POS_PERIPH;
2336
2337                         cdm->pos.cookie.bus = periph->path->bus;
2338                         cdm->pos.generations[CAM_BUS_GENERATION]=
2339                                 bus_generation;
2340                         cdm->pos.cookie.target = periph->path->target;
2341                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2342                                 periph->path->bus->generation;
2343                         cdm->pos.cookie.device = periph->path->device;
2344                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2345                                 periph->path->target->generation;
2346                         cdm->pos.cookie.periph = periph;
2347                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2348                                 periph->path->device->generation;
2349                         cdm->status = CAM_DEV_MATCH_MORE;
2350                         return(0);
2351                 }
2352
2353                 j = cdm->num_matches;
2354                 cdm->num_matches++;
2355                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2356                 cdm->matches[j].result.periph_result.path_id =
2357                         periph->path->bus->path_id;
2358                 cdm->matches[j].result.periph_result.target_id =
2359                         periph->path->target->target_id;
2360                 cdm->matches[j].result.periph_result.target_lun =
2361                         periph->path->device->lun_id;
2362                 cdm->matches[j].result.periph_result.unit_number =
2363                         periph->unit_number;
2364                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2365                         periph->periph_name, DEV_IDLEN);
2366         }
2367
2368         return(1);
2369 }
2370
2371 static int
2372 xptedtmatch(struct ccb_dev_match *cdm)
2373 {
2374         int ret;
2375
2376         cdm->num_matches = 0;
2377
2378         /*
2379          * Check the bus list generation.  If it has changed, the user
2380          * needs to reset everything and start over.
2381          */
2382         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2383          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2384          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2385                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2386                 return(0);
2387         }
2388
2389         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2390          && (cdm->pos.cookie.bus != NULL))
2391                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2392                                      xptedtbusfunc, cdm);
2393         else
2394                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2395
2396         /*
2397          * If we get back 0, that means that we had to stop before fully
2398          * traversing the EDT.  It also means that one of the subroutines
2399          * has set the status field to the proper value.  If we get back 1,
2400          * we've fully traversed the EDT and copied out any matching entries.
2401          */
2402         if (ret == 1)
2403                 cdm->status = CAM_DEV_MATCH_LAST;
2404
2405         return(ret);
2406 }
2407
2408 static int
2409 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2410 {
2411         struct ccb_dev_match *cdm;
2412
2413         cdm = (struct ccb_dev_match *)arg;
2414
2415         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2416          && (cdm->pos.cookie.pdrv == pdrv)
2417          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2418          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2419          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2420              (*pdrv)->generation)) {
2421                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2422                 return(0);
2423         }
2424
2425         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2426          && (cdm->pos.cookie.pdrv == pdrv)
2427          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2428          && (cdm->pos.cookie.periph != NULL))
2429                 return(xptpdperiphtraverse(pdrv,
2430                                 (struct cam_periph *)cdm->pos.cookie.periph,
2431                                 xptplistperiphfunc, arg));
2432         else
2433                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2434 }
2435
2436 static int
2437 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2438 {
2439         struct ccb_dev_match *cdm;
2440         dev_match_ret retval;
2441
2442         cdm = (struct ccb_dev_match *)arg;
2443
2444         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2445
2446         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2447                 cdm->status = CAM_DEV_MATCH_ERROR;
2448                 return(0);
2449         }
2450
2451         /*
2452          * If the copy flag is set, copy this peripheral out.
2453          */
2454         if (retval & DM_RET_COPY) {
2455                 int spaceleft, j;
2456
2457                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2458                         sizeof(struct dev_match_result));
2459
2460                 /*
2461                  * If we don't have enough space to put in another
2462                  * match result, save our position and tell the
2463                  * user there are more devices to check.
2464                  */
2465                 if (spaceleft < sizeof(struct dev_match_result)) {
2466                         struct periph_driver **pdrv;
2467
2468                         pdrv = NULL;
2469                         bzero(&cdm->pos, sizeof(cdm->pos));
2470                         cdm->pos.position_type = 
2471                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2472                                 CAM_DEV_POS_PERIPH;
2473
2474                         /*
2475                          * This may look a bit non-sensical, but it is
2476                          * actually quite logical.  There are very few
2477                          * peripheral drivers, and bloating every peripheral
2478                          * structure with a pointer back to its parent
2479                          * peripheral driver linker set entry would cost
2480                          * more in the long run than doing this quick lookup.
2481                          */
2482                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2483                                 if (strcmp((*pdrv)->driver_name,
2484                                     periph->periph_name) == 0)
2485                                         break;
2486                         }
2487
2488                         if (*pdrv == NULL) {
2489                                 cdm->status = CAM_DEV_MATCH_ERROR;
2490                                 return(0);
2491                         }
2492
2493                         cdm->pos.cookie.pdrv = pdrv;
2494                         /*
2495                          * The periph generation slot does double duty, as
2496                          * does the periph pointer slot.  They are used for
2497                          * both edt and pdrv lookups and positioning.
2498                          */
2499                         cdm->pos.cookie.periph = periph;
2500                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2501                                 (*pdrv)->generation;
2502                         cdm->status = CAM_DEV_MATCH_MORE;
2503                         return(0);
2504                 }
2505
2506                 j = cdm->num_matches;
2507                 cdm->num_matches++;
2508                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2509                 cdm->matches[j].result.periph_result.path_id =
2510                         periph->path->bus->path_id;
2511
2512                 /*
2513                  * The transport layer peripheral doesn't have a target or
2514                  * lun.
2515                  */
2516                 if (periph->path->target)
2517                         cdm->matches[j].result.periph_result.target_id =
2518                                 periph->path->target->target_id;
2519                 else
2520                         cdm->matches[j].result.periph_result.target_id = -1;
2521
2522                 if (periph->path->device)
2523                         cdm->matches[j].result.periph_result.target_lun =
2524                                 periph->path->device->lun_id;
2525                 else
2526                         cdm->matches[j].result.periph_result.target_lun = -1;
2527
2528                 cdm->matches[j].result.periph_result.unit_number =
2529                         periph->unit_number;
2530                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2531                         periph->periph_name, DEV_IDLEN);
2532         }
2533
2534         return(1);
2535 }
2536
2537 static int
2538 xptperiphlistmatch(struct ccb_dev_match *cdm)
2539 {
2540         int ret;
2541
2542         cdm->num_matches = 0;
2543
2544         /*
2545          * At this point in the edt traversal function, we check the bus
2546          * list generation to make sure that no busses have been added or
2547          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2548          * For the peripheral driver list traversal function, however, we
2549          * don't have to worry about new peripheral driver types coming or
2550          * going; they're in a linker set, and therefore can't change
2551          * without a recompile.
2552          */
2553
2554         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2555          && (cdm->pos.cookie.pdrv != NULL))
2556                 ret = xptpdrvtraverse(
2557                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2558                                 xptplistpdrvfunc, cdm);
2559         else
2560                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2561
2562         /*
2563          * If we get back 0, that means that we had to stop before fully
2564          * traversing the peripheral driver tree.  It also means that one of
2565          * the subroutines has set the status field to the proper value.  If
2566          * we get back 1, we've fully traversed the EDT and copied out any
2567          * matching entries.
2568          */
2569         if (ret == 1)
2570                 cdm->status = CAM_DEV_MATCH_LAST;
2571
2572         return(ret);
2573 }
2574
2575 static int
2576 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2577 {
2578         struct cam_eb *bus, *next_bus;
2579         int retval;
2580
2581         retval = 1;
2582
2583         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2584              bus != NULL;
2585              bus = next_bus) {
2586                 next_bus = TAILQ_NEXT(bus, links);
2587
2588                 retval = tr_func(bus, arg);
2589                 if (retval == 0)
2590                         return(retval);
2591         }
2592
2593         return(retval);
2594 }
2595
2596 static int
2597 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2598                   xpt_targetfunc_t *tr_func, void *arg)
2599 {
2600         struct cam_et *target, *next_target;
2601         int retval;
2602
2603         retval = 1;
2604         for (target = (start_target ? start_target :
2605                        TAILQ_FIRST(&bus->et_entries));
2606              target != NULL; target = next_target) {
2607
2608                 next_target = TAILQ_NEXT(target, links);
2609
2610                 retval = tr_func(target, arg);
2611
2612                 if (retval == 0)
2613                         return(retval);
2614         }
2615
2616         return(retval);
2617 }
2618
2619 static int
2620 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2621                   xpt_devicefunc_t *tr_func, void *arg)
2622 {
2623         struct cam_ed *device, *next_device;
2624         int retval;
2625
2626         retval = 1;
2627         for (device = (start_device ? start_device :
2628                        TAILQ_FIRST(&target->ed_entries));
2629              device != NULL;
2630              device = next_device) {
2631
2632                 next_device = TAILQ_NEXT(device, links);
2633
2634                 retval = tr_func(device, arg);
2635
2636                 if (retval == 0)
2637                         return(retval);
2638         }
2639
2640         return(retval);
2641 }
2642
2643 static int
2644 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2645                   xpt_periphfunc_t *tr_func, void *arg)
2646 {
2647         struct cam_periph *periph, *next_periph;
2648         int retval;
2649
2650         retval = 1;
2651
2652         for (periph = (start_periph ? start_periph :
2653                        SLIST_FIRST(&device->periphs));
2654              periph != NULL;
2655              periph = next_periph) {
2656
2657                 next_periph = SLIST_NEXT(periph, periph_links);
2658
2659                 retval = tr_func(periph, arg);
2660                 if (retval == 0)
2661                         return(retval);
2662         }
2663
2664         return(retval);
2665 }
2666
2667 static int
2668 xptpdrvtraverse(struct periph_driver **start_pdrv,
2669                 xpt_pdrvfunc_t *tr_func, void *arg)
2670 {
2671         struct periph_driver **pdrv;
2672         int retval;
2673
2674         retval = 1;
2675
2676         /*
2677          * We don't traverse the peripheral driver list like we do the
2678          * other lists, because it is a linker set, and therefore cannot be
2679          * changed during runtime.  If the peripheral driver list is ever
2680          * re-done to be something other than a linker set (i.e. it can
2681          * change while the system is running), the list traversal should
2682          * be modified to work like the other traversal functions.
2683          */
2684         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2685              *pdrv != NULL; pdrv++) {
2686                 retval = tr_func(pdrv, arg);
2687
2688                 if (retval == 0)
2689                         return(retval);
2690         }
2691
2692         return(retval);
2693 }
2694
2695 static int
2696 xptpdperiphtraverse(struct periph_driver **pdrv,
2697                     struct cam_periph *start_periph,
2698                     xpt_periphfunc_t *tr_func, void *arg)
2699 {
2700         struct cam_periph *periph, *next_periph;
2701         int retval;
2702
2703         retval = 1;
2704
2705         for (periph = (start_periph ? start_periph :
2706              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2707              periph = next_periph) {
2708
2709                 next_periph = TAILQ_NEXT(periph, unit_links);
2710
2711                 retval = tr_func(periph, arg);
2712                 if (retval == 0)
2713                         return(retval);
2714         }
2715         return(retval);
2716 }
2717
2718 static int
2719 xptdefbusfunc(struct cam_eb *bus, void *arg)
2720 {
2721         struct xpt_traverse_config *tr_config;
2722
2723         tr_config = (struct xpt_traverse_config *)arg;
2724
2725         if (tr_config->depth == XPT_DEPTH_BUS) {
2726                 xpt_busfunc_t *tr_func;
2727
2728                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2729
2730                 return(tr_func(bus, tr_config->tr_arg));
2731         } else
2732                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2733 }
2734
2735 static int
2736 xptdeftargetfunc(struct cam_et *target, void *arg)
2737 {
2738         struct xpt_traverse_config *tr_config;
2739
2740         tr_config = (struct xpt_traverse_config *)arg;
2741
2742         if (tr_config->depth == XPT_DEPTH_TARGET) {
2743                 xpt_targetfunc_t *tr_func;
2744
2745                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2746
2747                 return(tr_func(target, tr_config->tr_arg));
2748         } else
2749                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2750 }
2751
2752 static int
2753 xptdefdevicefunc(struct cam_ed *device, void *arg)
2754 {
2755         struct xpt_traverse_config *tr_config;
2756
2757         tr_config = (struct xpt_traverse_config *)arg;
2758
2759         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2760                 xpt_devicefunc_t *tr_func;
2761
2762                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2763
2764                 return(tr_func(device, tr_config->tr_arg));
2765         } else
2766                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2767 }
2768
2769 static int
2770 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2771 {
2772         struct xpt_traverse_config *tr_config;
2773         xpt_periphfunc_t *tr_func;
2774
2775         tr_config = (struct xpt_traverse_config *)arg;
2776
2777         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2778
2779         /*
2780          * Unlike the other default functions, we don't check for depth
2781          * here.  The peripheral driver level is the last level in the EDT,
2782          * so if we're here, we should execute the function in question.
2783          */
2784         return(tr_func(periph, tr_config->tr_arg));
2785 }
2786
2787 /*
2788  * Execute the given function for every bus in the EDT.
2789  */
2790 static int
2791 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2792 {
2793         struct xpt_traverse_config tr_config;
2794
2795         tr_config.depth = XPT_DEPTH_BUS;
2796         tr_config.tr_func = tr_func;
2797         tr_config.tr_arg = arg;
2798
2799         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2800 }
2801
2802 #ifdef notusedyet
2803 /*
2804  * Execute the given function for every target in the EDT.
2805  */
2806 static int
2807 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2808 {
2809         struct xpt_traverse_config tr_config;
2810
2811         tr_config.depth = XPT_DEPTH_TARGET;
2812         tr_config.tr_func = tr_func;
2813         tr_config.tr_arg = arg;
2814
2815         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2816 }
2817 #endif /* notusedyet */
2818
2819 /*
2820  * Execute the given function for every device in the EDT.
2821  */
2822 static int
2823 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2824 {
2825         struct xpt_traverse_config tr_config;
2826
2827         tr_config.depth = XPT_DEPTH_DEVICE;
2828         tr_config.tr_func = tr_func;
2829         tr_config.tr_arg = arg;
2830
2831         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2832 }
2833
2834 #ifdef notusedyet
2835 /*
2836  * Execute the given function for every peripheral in the EDT.
2837  */
2838 static int
2839 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2840 {
2841         struct xpt_traverse_config tr_config;
2842
2843         tr_config.depth = XPT_DEPTH_PERIPH;
2844         tr_config.tr_func = tr_func;
2845         tr_config.tr_arg = arg;
2846
2847         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2848 }
2849 #endif /* notusedyet */
2850
2851 static int
2852 xptsetasyncfunc(struct cam_ed *device, void *arg)
2853 {
2854         struct cam_path path;
2855         struct ccb_getdev cgd;
2856         struct async_node *cur_entry;
2857
2858         cur_entry = (struct async_node *)arg;
2859
2860         /*
2861          * Don't report unconfigured devices (Wildcard devs,
2862          * devices only for target mode, device instances
2863          * that have been invalidated but are waiting for
2864          * their last reference count to be released).
2865          */
2866         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2867                 return (1);
2868
2869         xpt_compile_path(&path,
2870                          NULL,
2871                          device->target->bus->path_id,
2872                          device->target->target_id,
2873                          device->lun_id);
2874         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2875         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2876         xpt_action((union ccb *)&cgd);
2877         cur_entry->callback(cur_entry->callback_arg,
2878                             AC_FOUND_DEVICE,
2879                             &path, &cgd);
2880         xpt_release_path(&path);
2881
2882         return(1);
2883 }
2884
2885 static int
2886 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2887 {
2888         struct cam_path path;
2889         struct ccb_pathinq cpi;
2890         struct async_node *cur_entry;
2891
2892         cur_entry = (struct async_node *)arg;
2893
2894         xpt_compile_path(&path, /*periph*/NULL,
2895                          bus->sim->path_id,
2896                          CAM_TARGET_WILDCARD,
2897                          CAM_LUN_WILDCARD);
2898         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2899         cpi.ccb_h.func_code = XPT_PATH_INQ;
2900         xpt_action((union ccb *)&cpi);
2901         cur_entry->callback(cur_entry->callback_arg,
2902                             AC_PATH_REGISTERED,
2903                             &path, &cpi);
2904         xpt_release_path(&path);
2905
2906         return(1);
2907 }
2908
2909 void
2910 xpt_action(union ccb *start_ccb)
2911 {
2912         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2913
2914         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2915
2916         crit_enter();
2917
2918         switch (start_ccb->ccb_h.func_code) {
2919         case XPT_SCSI_IO:
2920         {
2921 #ifdef CAM_NEW_TRAN_CODE
2922                 struct cam_ed *device;
2923 #endif /* CAM_NEW_TRAN_CODE */
2924 #ifdef CAMDEBUG
2925                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2926                 struct cam_path *path;
2927
2928                 path = start_ccb->ccb_h.path;
2929 #endif
2930
2931                 /*
2932                  * For the sake of compatibility with SCSI-1
2933                  * devices that may not understand the identify
2934                  * message, we include lun information in the
2935                  * second byte of all commands.  SCSI-1 specifies
2936                  * that luns are a 3 bit value and reserves only 3
2937                  * bits for lun information in the CDB.  Later
2938                  * revisions of the SCSI spec allow for more than 8
2939                  * luns, but have deprecated lun information in the
2940                  * CDB.  So, if the lun won't fit, we must omit.
2941                  *
2942                  * Also be aware that during initial probing for devices,
2943                  * the inquiry information is unknown but initialized to 0.
2944                  * This means that this code will be exercised while probing
2945                  * devices with an ANSI revision greater than 2.
2946                  */
2947 #ifdef CAM_NEW_TRAN_CODE
2948                 device = start_ccb->ccb_h.path->device;
2949                 if (device->protocol_version <= SCSI_REV_2
2950 #else /* CAM_NEW_TRAN_CODE */
2951                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2952 #endif /* CAM_NEW_TRAN_CODE */
2953                  && start_ccb->ccb_h.target_lun < 8
2954                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2955
2956                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2957                             start_ccb->ccb_h.target_lun << 5;
2958                 }
2959                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2960                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2961                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2962                                        &path->device->inq_data),
2963                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2964                                           cdb_str, sizeof(cdb_str))));
2965                 /* FALLTHROUGH */
2966         }
2967         case XPT_TARGET_IO:
2968         case XPT_CONT_TARGET_IO:
2969                 start_ccb->csio.sense_resid = 0;
2970                 start_ccb->csio.resid = 0;
2971                 /* FALLTHROUGH */
2972         case XPT_RESET_DEV:
2973         case XPT_ENG_EXEC:
2974         {
2975                 struct cam_path *path;
2976                 int runq;
2977
2978                 path = start_ccb->ccb_h.path;
2979
2980                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2981                 if (path->device->qfrozen_cnt == 0)
2982                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
2983                 else
2984                         runq = 0;
2985                 if (runq != 0)
2986                         xpt_run_dev_sendq(path->bus);
2987                 break;
2988         }
2989         case XPT_SET_TRAN_SETTINGS:
2990         {
2991                 xpt_set_transfer_settings(&start_ccb->cts,
2992                                           start_ccb->ccb_h.path->device,
2993                                           /*async_update*/FALSE);
2994                 break;
2995         }
2996         case XPT_CALC_GEOMETRY:
2997         {
2998                 struct cam_sim *sim;
2999
3000                 /* Filter out garbage */
3001                 if (start_ccb->ccg.block_size == 0
3002                  || start_ccb->ccg.volume_size == 0) {
3003                         start_ccb->ccg.cylinders = 0;
3004                         start_ccb->ccg.heads = 0;
3005                         start_ccb->ccg.secs_per_track = 0;
3006                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3007                         break;
3008                 }
3009                 sim = start_ccb->ccb_h.path->bus->sim;
3010                 (*(sim->sim_action))(sim, start_ccb);
3011                 break;
3012         }
3013         case XPT_ABORT:
3014         {
3015                 union ccb* abort_ccb;
3016
3017                 abort_ccb = start_ccb->cab.abort_ccb;
3018                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3019
3020                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3021                                 struct cam_ccbq *ccbq;
3022
3023                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3024                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3025                                 abort_ccb->ccb_h.status =
3026                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3027                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3028                                 xpt_done(abort_ccb);
3029                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3030                                 break;
3031                         }
3032                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3033                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3034                                 /*
3035                                  * We've caught this ccb en route to
3036                                  * the SIM.  Flag it for abort and the
3037                                  * SIM will do so just before starting
3038                                  * real work on the CCB.
3039                                  */
3040                                 abort_ccb->ccb_h.status =
3041                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3042                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3043                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3044                                 break;
3045                         }
3046                 } 
3047                 if (XPT_FC_IS_QUEUED(abort_ccb)
3048                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3049                         /*
3050                          * It's already completed but waiting
3051                          * for our SWI to get to it.
3052                          */
3053                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3054                         break;
3055                 }
3056                 /*
3057                  * If we weren't able to take care of the abort request
3058                  * in the XPT, pass the request down to the SIM for processing.
3059                  */
3060                 /* FALLTHROUGH */
3061         }
3062         case XPT_ACCEPT_TARGET_IO:
3063         case XPT_EN_LUN:
3064         case XPT_IMMED_NOTIFY:
3065         case XPT_NOTIFY_ACK:
3066         case XPT_GET_TRAN_SETTINGS:
3067         case XPT_RESET_BUS:
3068         {
3069                 struct cam_sim *sim;
3070
3071                 sim = start_ccb->ccb_h.path->bus->sim;
3072                 (*(sim->sim_action))(sim, start_ccb);
3073                 break;
3074         }
3075         case XPT_PATH_INQ:
3076         {
3077                 struct cam_sim *sim;
3078
3079                 sim = start_ccb->ccb_h.path->bus->sim;
3080                 (*(sim->sim_action))(sim, start_ccb);
3081                 break;
3082         }
3083         case XPT_PATH_STATS:
3084                 start_ccb->cpis.last_reset =
3085                         start_ccb->ccb_h.path->bus->last_reset;
3086                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3087                 break;
3088         case XPT_GDEV_TYPE:
3089         {
3090                 struct cam_ed *dev;
3091
3092                 dev = start_ccb->ccb_h.path->device;
3093                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3094                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3095                 } else {
3096                         struct ccb_getdev *cgd;
3097                         struct cam_eb *bus;
3098                         struct cam_et *tar;
3099
3100                         cgd = &start_ccb->cgd;
3101                         bus = cgd->ccb_h.path->bus;
3102                         tar = cgd->ccb_h.path->target;
3103                         cgd->inq_data = dev->inq_data;
3104                         cgd->ccb_h.status = CAM_REQ_CMP;
3105                         cgd->serial_num_len = dev->serial_num_len;
3106                         if ((dev->serial_num_len > 0)
3107                          && (dev->serial_num != NULL))
3108                                 bcopy(dev->serial_num, cgd->serial_num,
3109                                       dev->serial_num_len);
3110                 }
3111                 break; 
3112         }
3113         case XPT_GDEV_STATS:
3114         {
3115                 struct cam_ed *dev;
3116
3117                 dev = start_ccb->ccb_h.path->device;
3118                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3119                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3120                 } else {
3121                         struct ccb_getdevstats *cgds;
3122                         struct cam_eb *bus;
3123                         struct cam_et *tar;
3124
3125                         cgds = &start_ccb->cgds;
3126                         bus = cgds->ccb_h.path->bus;
3127                         tar = cgds->ccb_h.path->target;
3128                         cgds->dev_openings = dev->ccbq.dev_openings;
3129                         cgds->dev_active = dev->ccbq.dev_active;
3130                         cgds->devq_openings = dev->ccbq.devq_openings;
3131                         cgds->devq_queued = dev->ccbq.queue.entries;
3132                         cgds->held = dev->ccbq.held;
3133                         cgds->last_reset = tar->last_reset;
3134                         cgds->maxtags = dev->quirk->maxtags;
3135                         cgds->mintags = dev->quirk->mintags;
3136                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3137                                 cgds->last_reset = bus->last_reset;
3138                         cgds->ccb_h.status = CAM_REQ_CMP;
3139                 }
3140                 break;
3141         }
3142         case XPT_GDEVLIST:
3143         {
3144                 struct cam_periph       *nperiph;
3145                 struct periph_list      *periph_head;
3146                 struct ccb_getdevlist   *cgdl;
3147                 u_int                   i;
3148                 struct cam_ed           *device;
3149                 int                     found;
3150
3151
3152                 found = 0;
3153
3154                 /*
3155                  * Don't want anyone mucking with our data.
3156                  */
3157                 device = start_ccb->ccb_h.path->device;
3158                 periph_head = &device->periphs;
3159                 cgdl = &start_ccb->cgdl;
3160
3161                 /*
3162                  * Check and see if the list has changed since the user
3163                  * last requested a list member.  If so, tell them that the
3164                  * list has changed, and therefore they need to start over 
3165                  * from the beginning.
3166                  */
3167                 if ((cgdl->index != 0) && 
3168                     (cgdl->generation != device->generation)) {
3169                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3170                         break;
3171                 }
3172
3173                 /*
3174                  * Traverse the list of peripherals and attempt to find 
3175                  * the requested peripheral.
3176                  */
3177                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3178                      (nperiph != NULL) && (i <= cgdl->index);
3179                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3180                         if (i == cgdl->index) {
3181                                 strncpy(cgdl->periph_name,
3182                                         nperiph->periph_name,
3183                                         DEV_IDLEN);
3184                                 cgdl->unit_number = nperiph->unit_number;
3185                                 found = 1;
3186                         }
3187                 }
3188                 if (found == 0) {
3189                         cgdl->status = CAM_GDEVLIST_ERROR;
3190                         break;
3191                 }
3192
3193                 if (nperiph == NULL)
3194                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3195                 else
3196                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3197
3198                 cgdl->index++;
3199                 cgdl->generation = device->generation;
3200
3201                 cgdl->ccb_h.status = CAM_REQ_CMP;
3202                 break;
3203         }
3204         case XPT_DEV_MATCH:
3205         {
3206                 dev_pos_type position_type;
3207                 struct ccb_dev_match *cdm;
3208                 int ret;
3209
3210                 cdm = &start_ccb->cdm;
3211
3212                 /*
3213                  * Prevent EDT changes while we traverse it.
3214                  */
3215                 /*
3216                  * There are two ways of getting at information in the EDT.
3217                  * The first way is via the primary EDT tree.  It starts
3218                  * with a list of busses, then a list of targets on a bus,
3219                  * then devices/luns on a target, and then peripherals on a
3220                  * device/lun.  The "other" way is by the peripheral driver
3221                  * lists.  The peripheral driver lists are organized by
3222                  * peripheral driver.  (obviously)  So it makes sense to
3223                  * use the peripheral driver list if the user is looking
3224                  * for something like "da1", or all "da" devices.  If the
3225                  * user is looking for something on a particular bus/target
3226                  * or lun, it's generally better to go through the EDT tree.
3227                  */
3228
3229                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3230                         position_type = cdm->pos.position_type;
3231                 else {
3232                         u_int i;
3233
3234                         position_type = CAM_DEV_POS_NONE;
3235
3236                         for (i = 0; i < cdm->num_patterns; i++) {
3237                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3238                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3239                                         position_type = CAM_DEV_POS_EDT;
3240                                         break;
3241                                 }
3242                         }
3243
3244                         if (cdm->num_patterns == 0)
3245                                 position_type = CAM_DEV_POS_EDT;
3246                         else if (position_type == CAM_DEV_POS_NONE)
3247                                 position_type = CAM_DEV_POS_PDRV;
3248                 }
3249
3250                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3251                 case CAM_DEV_POS_EDT:
3252                         ret = xptedtmatch(cdm);
3253                         break;
3254                 case CAM_DEV_POS_PDRV:
3255                         ret = xptperiphlistmatch(cdm);
3256                         break;
3257                 default:
3258                         cdm->status = CAM_DEV_MATCH_ERROR;
3259                         break;
3260                 }
3261
3262                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3263                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3264                 else
3265                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3266
3267                 break;
3268         }
3269         case XPT_SASYNC_CB:
3270         {
3271                 struct ccb_setasync *csa;
3272                 struct async_node *cur_entry;
3273                 struct async_list *async_head;
3274                 u_int32_t added;
3275
3276                 csa = &start_ccb->csa;
3277                 added = csa->event_enable;
3278                 async_head = &csa->ccb_h.path->device->asyncs;
3279
3280                 /*
3281                  * If there is already an entry for us, simply
3282                  * update it.
3283                  */
3284                 cur_entry = SLIST_FIRST(async_head);
3285                 while (cur_entry != NULL) {
3286                         if ((cur_entry->callback_arg == csa->callback_arg)
3287                          && (cur_entry->callback == csa->callback))
3288                                 break;
3289                         cur_entry = SLIST_NEXT(cur_entry, links);
3290                 }
3291
3292                 if (cur_entry != NULL) {
3293                         /*
3294                          * If the request has no flags set,
3295                          * remove the entry.
3296                          */
3297                         added &= ~cur_entry->event_enable;
3298                         if (csa->event_enable == 0) {
3299                                 SLIST_REMOVE(async_head, cur_entry,
3300                                              async_node, links);
3301                                 csa->ccb_h.path->device->refcount--;
3302                                 kfree(cur_entry, M_DEVBUF);
3303                         } else {
3304                                 cur_entry->event_enable = csa->event_enable;
3305                         }
3306                 } else {
3307                         cur_entry = kmalloc(sizeof(*cur_entry), 
3308                                             M_DEVBUF, M_INTWAIT);
3309                         cur_entry->event_enable = csa->event_enable;
3310                         cur_entry->callback_arg = csa->callback_arg;
3311                         cur_entry->callback = csa->callback;
3312                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3313                         csa->ccb_h.path->device->refcount++;
3314                 }
3315
3316                 if ((added & AC_FOUND_DEVICE) != 0) {
3317                         /*
3318                          * Get this peripheral up to date with all
3319                          * the currently existing devices.
3320                          */
3321                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3322                 }
3323                 if ((added & AC_PATH_REGISTERED) != 0) {
3324                         /*
3325                          * Get this peripheral up to date with all
3326                          * the currently existing busses.
3327                          */
3328                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3329                 }
3330                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3331                 break;
3332         }
3333         case XPT_REL_SIMQ:
3334         {
3335                 struct ccb_relsim *crs;
3336                 struct cam_ed *dev;
3337
3338                 crs = &start_ccb->crs;
3339                 dev = crs->ccb_h.path->device;
3340                 if (dev == NULL) {
3341
3342                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3343                         break;
3344                 }
3345
3346                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3347
3348                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3349
3350                                 /* Don't ever go below one opening */
3351                                 if (crs->openings > 0) {
3352                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3353                                                             crs->openings);
3354
3355                                         if (bootverbose) {
3356                                                 xpt_print_path(crs->ccb_h.path);
3357                                                 kprintf("tagged openings "
3358                                                        "now %d\n",
3359                                                        crs->openings);
3360                                         }
3361                                 }
3362                         }
3363                 }
3364
3365                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3366
3367                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3368
3369                                 /*
3370                                  * Just extend the old timeout and decrement
3371                                  * the freeze count so that a single timeout
3372                                  * is sufficient for releasing the queue.
3373                                  */
3374                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3375                                 callout_stop(&dev->c_handle);
3376                         } else {
3377
3378                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3379                         }
3380
3381                         callout_reset(&dev->c_handle,
3382                                       (crs->release_timeout * hz) / 1000, 
3383                                       xpt_release_devq_timeout, dev);
3384
3385                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3386
3387                 }
3388
3389                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3390
3391                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3392                                 /*
3393                                  * Decrement the freeze count so that a single
3394                                  * completion is still sufficient to unfreeze
3395                                  * the queue.
3396                                  */
3397                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3398                         } else {
3399                                 
3400                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3401                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3402                         }
3403                 }
3404
3405                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3406
3407                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3408                          || (dev->ccbq.dev_active == 0)) {
3409
3410                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3411                         } else {
3412                                 
3413                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3414                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3415                         }
3416                 }
3417                 
3418                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3419
3420                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3421                                          /*run_queue*/TRUE);
3422                 }
3423                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3424                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3425                 break;
3426         }
3427         case XPT_SCAN_BUS:
3428                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3429                 break;
3430         case XPT_SCAN_LUN:
3431                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3432                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3433                              start_ccb);
3434                 break;
3435         case XPT_DEBUG: {
3436 #ifdef CAMDEBUG
3437 #ifdef CAM_DEBUG_DELAY
3438                 cam_debug_delay = CAM_DEBUG_DELAY;
3439 #endif
3440                 cam_dflags = start_ccb->cdbg.flags;
3441                 if (cam_dpath != NULL) {
3442                         xpt_free_path(cam_dpath);
3443                         cam_dpath = NULL;
3444                 }
3445
3446                 if (cam_dflags != CAM_DEBUG_NONE) {
3447                         if (xpt_create_path(&cam_dpath, xpt_periph,
3448                                             start_ccb->ccb_h.path_id,
3449                                             start_ccb->ccb_h.target_id,
3450                                             start_ccb->ccb_h.target_lun) !=
3451                                             CAM_REQ_CMP) {
3452                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3453                                 cam_dflags = CAM_DEBUG_NONE;
3454                         } else {
3455                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3456                                 xpt_print_path(cam_dpath);
3457                                 kprintf("debugging flags now %x\n", cam_dflags);
3458                         }
3459                 } else {
3460                         cam_dpath = NULL;
3461                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3462                 }
3463 #else /* !CAMDEBUG */
3464                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3465 #endif /* CAMDEBUG */
3466                 break;
3467         }
3468         case XPT_NOOP:
3469                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3470                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3471                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3472                 break;
3473         default:
3474         case XPT_SDEV_TYPE:
3475         case XPT_TERM_IO:
3476         case XPT_ENG_INQ:
3477                 /* XXX Implement */
3478                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3479                 break;
3480         }
3481         crit_exit();
3482 }
3483
3484 void
3485 xpt_polled_action(union ccb *start_ccb)
3486 {
3487         u_int32_t timeout;
3488         struct    cam_sim *sim; 
3489         struct    cam_devq *devq;
3490         struct    cam_ed *dev;
3491
3492         timeout = start_ccb->ccb_h.timeout;
3493         sim = start_ccb->ccb_h.path->bus->sim;
3494         devq = sim->devq;
3495         dev = start_ccb->ccb_h.path->device;
3496
3497         crit_enter();
3498
3499         /*
3500          * Steal an opening so that no other queued requests
3501          * can get it before us while we simulate interrupts.
3502          */
3503         dev->ccbq.devq_openings--;
3504         dev->ccbq.dev_openings--;       
3505         
3506         while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3507            && (--timeout > 0)) {
3508                 DELAY(1000);
3509                 (*(sim->sim_poll))(sim);
3510                 swi_cambio(NULL, NULL);         
3511         }
3512         
3513         dev->ccbq.devq_openings++;
3514         dev->ccbq.dev_openings++;
3515         
3516         if (timeout != 0) {
3517                 xpt_action(start_ccb);
3518                 while(--timeout > 0) {
3519                         (*(sim->sim_poll))(sim);
3520                         swi_cambio(NULL, NULL);
3521                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3522                             != CAM_REQ_INPROG)
3523                                 break;
3524                         DELAY(1000);
3525                 }
3526                 if (timeout == 0) {
3527                         /*
3528                          * XXX Is it worth adding a sim_timeout entry
3529                          * point so we can attempt recovery?  If
3530                          * this is only used for dumps, I don't think
3531                          * it is.
3532                          */
3533                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3534                 }
3535         } else {
3536                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3537         }
3538         crit_exit();
3539 }
3540         
3541 /*
3542  * Schedule a peripheral driver to receive a ccb when it's
3543  * target device has space for more transactions.
3544  */
3545 void
3546 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3547 {
3548         struct cam_ed *device;
3549         int runq;
3550
3551         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3552         device = perph->path->device;
3553         crit_enter();
3554         if (periph_is_queued(perph)) {
3555                 /* Simply reorder based on new priority */
3556                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3557                           ("   change priority to %d\n", new_priority));
3558                 if (new_priority < perph->pinfo.priority) {
3559                         camq_change_priority(&device->drvq,
3560                                              perph->pinfo.index,
3561                                              new_priority);
3562                 }
3563                 runq = 0;
3564         } else {
3565                 /* New entry on the queue */
3566                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3567                           ("   added periph to queue\n"));
3568                 perph->pinfo.priority = new_priority;
3569                 perph->pinfo.generation = ++device->drvq.generation;
3570                 camq_insert(&device->drvq, &perph->pinfo);
3571                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3572         }
3573         crit_exit();
3574         if (runq != 0) {
3575                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3576                           ("   calling xpt_run_devq\n"));
3577                 xpt_run_dev_allocq(perph->path->bus);
3578         }
3579 }
3580
3581
3582 /*
3583  * Schedule a device to run on a given queue.
3584  * If the device was inserted as a new entry on the queue,
3585  * return 1 meaning the device queue should be run. If we
3586  * were already queued, implying someone else has already
3587  * started the queue, return 0 so the caller doesn't attempt
3588  * to run the queue.  Must be run in a critical section.
3589  */
3590 static int
3591 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3592                  u_int32_t new_priority)
3593 {
3594         int retval;
3595         u_int32_t old_priority;
3596
3597         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3598
3599         old_priority = pinfo->priority;
3600
3601         /*
3602          * Are we already queued?
3603          */
3604         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3605                 /* Simply reorder based on new priority */
3606                 if (new_priority < old_priority) {
3607                         camq_change_priority(queue, pinfo->index,
3608                                              new_priority);
3609                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3610                                         ("changed priority to %d\n",
3611                                          new_priority));
3612                 }
3613                 retval = 0;
3614         } else {
3615                 /* New entry on the queue */
3616                 if (new_priority < old_priority)
3617                         pinfo->priority = new_priority;
3618
3619                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3620                                 ("Inserting onto queue\n"));
3621                 pinfo->generation = ++queue->generation;
3622                 camq_insert(queue, pinfo);
3623                 retval = 1;
3624         }
3625         return (retval);
3626 }
3627
3628 static void
3629 xpt_run_dev_allocq(struct cam_eb *bus)
3630 {
3631         struct  cam_devq *devq;
3632
3633         if ((devq = bus->sim->devq) == NULL) {
3634                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3635                 return;
3636         }
3637         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3638
3639         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3640                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3641                          "openings == %d, active == %d\n",
3642                          devq->alloc_queue.qfrozen_cnt,
3643                          devq->alloc_queue.entries,
3644                          devq->alloc_openings,
3645                          devq->alloc_active));
3646
3647         crit_enter();
3648         devq->alloc_queue.qfrozen_cnt++;
3649         while ((devq->alloc_queue.entries > 0)
3650             && (devq->alloc_openings > 0)
3651             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3652                 struct  cam_ed_qinfo *qinfo;
3653                 struct  cam_ed *device;
3654                 union   ccb *work_ccb;
3655                 struct  cam_periph *drv;
3656                 struct  camq *drvq;
3657                 
3658                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3659                                                            CAMQ_HEAD);
3660                 device = qinfo->device;
3661
3662                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3663                                 ("running device %p\n", device));
3664
3665                 drvq = &device->drvq;
3666
3667 #ifdef CAMDEBUG
3668                 if (drvq->entries <= 0) {
3669                         panic("xpt_run_dev_allocq: "
3670                               "Device on queue without any work to do");
3671                 }
3672 #endif
3673                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3674                         devq->alloc_openings--;
3675                         devq->alloc_active++;
3676                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3677                         crit_exit();
3678                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3679                                       drv->pinfo.priority);
3680                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3681                                         ("calling periph start\n"));
3682                         drv->periph_start(drv, work_ccb);
3683                 } else {
3684                         /*
3685                          * Malloc failure in alloc_ccb
3686                          */
3687                         /*
3688                          * XXX add us to a list to be run from free_ccb
3689                          * if we don't have any ccbs active on this
3690                          * device queue otherwise we may never get run
3691                          * again.
3692                          */
3693                         break;
3694                 }
3695         
3696                 /* Raise IPL for possible insertion and test at top of loop */
3697                 crit_enter();
3698
3699                 if (drvq->entries > 0) {
3700                         /* We have more work.  Attempt to reschedule */
3701                         xpt_schedule_dev_allocq(bus, device);
3702                 }
3703         }
3704         devq->alloc_queue.qfrozen_cnt--;
3705         crit_exit();
3706 }
3707
3708 static void
3709 xpt_run_dev_sendq(struct cam_eb *bus)
3710 {
3711         struct  cam_devq *devq;
3712
3713         if ((devq = bus->sim->devq) == NULL) {
3714                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3715                 return;
3716         }
3717         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3718
3719         crit_enter();
3720         devq->send_queue.qfrozen_cnt++;
3721         while ((devq->send_queue.entries > 0)
3722             && (devq->send_openings > 0)) {
3723                 struct  cam_ed_qinfo *qinfo;
3724                 struct  cam_ed *device;
3725                 union ccb *work_ccb;
3726                 struct  cam_sim *sim;
3727
3728                 if (devq->send_queue.qfrozen_cnt > 1) {
3729                         break;
3730                 }
3731
3732                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3733                                                            CAMQ_HEAD);
3734                 device = qinfo->device;
3735
3736                 /*
3737                  * If the device has been "frozen", don't attempt
3738                  * to run it.
3739                  */
3740                 if (device->qfrozen_cnt > 0) {
3741                         continue;
3742                 }
3743
3744                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3745                                 ("running device %p\n", device));
3746
3747                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3748                 if (work_ccb == NULL) {
3749                         kprintf("device on run queue with no ccbs???\n");
3750                         continue;
3751                 }
3752
3753                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3754
3755                         if (num_highpower <= 0) {
3756                                 /*
3757                                  * We got a high power command, but we
3758                                  * don't have any available slots.  Freeze
3759                                  * the device queue until we have a slot
3760                                  * available.
3761                                  */
3762                                 device->qfrozen_cnt++;
3763                                 STAILQ_INSERT_TAIL(&highpowerq, 
3764                                                    &work_ccb->ccb_h, 
3765                                                    xpt_links.stqe);
3766
3767                                 continue;
3768                         } else {
3769                                 /*
3770                                  * Consume a high power slot while
3771                                  * this ccb runs.
3772                                  */
3773                                 num_highpower--;
3774                         }
3775                 }
3776                 devq->active_dev = device;
3777                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3778
3779                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3780
3781                 devq->send_openings--;
3782                 devq->send_active++;            
3783                 
3784                 if (device->ccbq.queue.entries > 0)
3785                         xpt_schedule_dev_sendq(bus, device);
3786
3787                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3788                         /*
3789                          * The client wants to freeze the queue
3790                          * after this CCB is sent.
3791                          */
3792                         device->qfrozen_cnt++;
3793                 }
3794
3795                 /* In Target mode, the peripheral driver knows best... */
3796                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3797                         if ((device->inq_flags & SID_CmdQue) != 0
3798                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3799                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;