Try to automatically scan and attach luns for modern storage
[dragonfly.git] / sys / bus / cam / cam_xpt.c
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.49 2007/11/28 22:37:05 pavalos Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
48
49 #include <machine/clock.h>
50
51 #include "cam.h"
52 #include "cam_ccb.h"
53 #include "cam_periph.h"
54 #include "cam_sim.h"
55 #include "cam_xpt.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
59
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
63 #include "opt_cam.h"
64
65 /* Datastructures internal to the xpt layer */
66
67 /*
68  * Definition of an async handler callback block.  These are used to add
69  * SIMs and peripherals to the async callback lists.
70  */
71 struct async_node {
72         SLIST_ENTRY(async_node) links;
73         u_int32_t       event_enable;   /* Async Event enables */
74         void            (*callback)(void *arg, u_int32_t code,
75                                     struct cam_path *path, void *args);
76         void            *callback_arg;
77 };
78
79 SLIST_HEAD(async_list, async_node);
80 SLIST_HEAD(periph_list, cam_periph);
81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
82
83 /*
84  * This is the maximum number of high powered commands (e.g. start unit)
85  * that can be outstanding at a particular time.
86  */
87 #ifndef CAM_MAX_HIGHPOWER
88 #define CAM_MAX_HIGHPOWER  4
89 #endif
90
91 /* number of high powered commands that can go through right now */
92 static int num_highpower = CAM_MAX_HIGHPOWER;
93
94 /*
95  * Structure for queueing a device in a run queue.
96  * There is one run queue for allocating new ccbs,
97  * and another for sending ccbs to the controller.
98  */
99 struct cam_ed_qinfo {
100         cam_pinfo pinfo;
101         struct    cam_ed *device;
102 };
103
104 /*
105  * The CAM EDT (Existing Device Table) contains the device information for
106  * all devices for all busses in the system.  The table contains a
107  * cam_ed structure for each device on the bus.
108  */
109 struct cam_ed {
110         TAILQ_ENTRY(cam_ed) links;
111         struct  cam_ed_qinfo alloc_ccb_entry;
112         struct  cam_ed_qinfo send_ccb_entry;
113         struct  cam_et   *target;
114         lun_id_t         lun_id;
115         struct  camq drvq;              /*
116                                          * Queue of type drivers wanting to do
117                                          * work on this device.
118                                          */
119         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
120         struct  async_list asyncs;      /* Async callback info for this B/T/L */
121         struct  periph_list periphs;    /* All attached devices */
122         u_int   generation;             /* Generation number */
123         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
124         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
125                                         /* Storage for the inquiry data */
126 #ifdef CAM_NEW_TRAN_CODE
127         cam_proto        protocol;
128         u_int            protocol_version;
129         cam_xport        transport;
130         u_int            transport_version;
131 #endif /* CAM_NEW_TRAN_CODE */
132         struct           scsi_inquiry_data inq_data;
133         u_int8_t         inq_flags;     /*
134                                          * Current settings for inquiry flags.
135                                          * This allows us to override settings
136                                          * like disconnection and tagged
137                                          * queuing for a device.
138                                          */
139         u_int8_t         queue_flags;   /* Queue flags from the control page */
140         u_int8_t         serial_num_len;
141         u_int8_t        *serial_num;
142         u_int32_t        qfrozen_cnt;
143         u_int32_t        flags;
144 #define CAM_DEV_UNCONFIGURED            0x01
145 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
146 #define CAM_DEV_REL_ON_COMPLETE         0x04
147 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
148 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
149 #define CAM_DEV_TAG_AFTER_COUNT         0x20
150 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
151         u_int32_t        tag_delay_count;
152 #define CAM_TAG_DELAY_COUNT             5
153         u_int32_t        refcount;
154         struct           callout c_handle;
155 };
156
157 /*
158  * Each target is represented by an ET (Existing Target).  These
159  * entries are created when a target is successfully probed with an
160  * identify, and removed when a device fails to respond after a number
161  * of retries, or a bus rescan finds the device missing.
162  */
163 struct cam_et { 
164         TAILQ_HEAD(, cam_ed) ed_entries;
165         TAILQ_ENTRY(cam_et) links;
166         struct  cam_eb  *bus;   
167         target_id_t     target_id;
168         u_int32_t       refcount;       
169         u_int           generation;
170         struct          timeval last_reset;     /* uptime of last reset */
171 };
172
173 /*
174  * Each bus is represented by an EB (Existing Bus).  These entries
175  * are created by calls to xpt_bus_register and deleted by calls to
176  * xpt_bus_deregister.
177  */
178 struct cam_eb { 
179         TAILQ_HEAD(, cam_et) et_entries;
180         TAILQ_ENTRY(cam_eb)  links;
181         path_id_t            path_id;
182         struct cam_sim       *sim;
183         struct timeval       last_reset;        /* uptime of last reset */
184         u_int32_t            flags;
185 #define CAM_EB_RUNQ_SCHEDULED   0x01
186         u_int32_t            refcount;
187         u_int                generation;
188 };
189
190 struct cam_path {
191         struct cam_periph *periph;
192         struct cam_eb     *bus;
193         struct cam_et     *target;
194         struct cam_ed     *device;
195 };
196
197 struct xpt_quirk_entry {
198         struct scsi_inquiry_pattern inq_pat;
199         u_int8_t quirks;
200 #define CAM_QUIRK_NOLUNS        0x01
201 #define CAM_QUIRK_NOSERIAL      0x02
202 #define CAM_QUIRK_HILUNS        0x04
203 #define CAM_QUIRK_NOHILUNS      0x08
204         u_int mintags;
205         u_int maxtags;
206 };
207 #define CAM_SCSI2_MAXLUN        8
208 /*
209  * If we're not quirked to search <= the first 8 luns
210  * and we are either quirked to search above lun 8,
211  * or we're > SCSI-2, we can look for luns above lun 8.
212  */
213 #define CAN_SRCH_HI(dv)                                 \
214   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
215   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
216   || SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))
217
218 typedef enum {
219         XPT_FLAG_OPEN           = 0x01
220 } xpt_flags;
221
222 struct xpt_softc {
223         xpt_flags       flags;
224         u_int32_t       generation;
225 };
226
227 static const char quantum[] = "QUANTUM";
228 static const char sony[] = "SONY";
229 static const char west_digital[] = "WDIGTL";
230 static const char samsung[] = "SAMSUNG";
231 static const char seagate[] = "SEAGATE";
232 static const char microp[] = "MICROP";
233
234 static struct xpt_quirk_entry xpt_quirk_table[] = 
235 {
236         {
237                 /* Reports QUEUE FULL for temporary resource shortages */
238                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
239                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
240         },
241         {
242                 /* Reports QUEUE FULL for temporary resource shortages */
243                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
244                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
245         },
246         {
247                 /* Reports QUEUE FULL for temporary resource shortages */
248                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
249                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
250         },
251         {
252                 /* Broken tagged queuing drive */
253                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
254                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
255         },
256         {
257                 /* Broken tagged queuing drive */
258                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
259                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
260         },
261         {
262                 /* Broken tagged queuing drive */
263                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
264                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
265         },
266         {
267                 /*
268                  * Unfortunately, the Quantum Atlas III has the same
269                  * problem as the Atlas II drives above.
270                  * Reported by: "Johan Granlund" <johan@granlund.nu>
271                  *
272                  * For future reference, the drive with the problem was:
273                  * QUANTUM QM39100TD-SW N1B0
274                  * 
275                  * It's possible that Quantum will fix the problem in later
276                  * firmware revisions.  If that happens, the quirk entry
277                  * will need to be made specific to the firmware revisions
278                  * with the problem.
279                  * 
280                  */
281                 /* Reports QUEUE FULL for temporary resource shortages */
282                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
283                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284         },
285         {
286                 /*
287                  * 18 Gig Atlas III, same problem as the 9G version.
288                  * Reported by: Andre Albsmeier
289                  *              <andre.albsmeier@mchp.siemens.de>
290                  *
291                  * For future reference, the drive with the problem was:
292                  * QUANTUM QM318000TD-S N491
293                  */
294                 /* Reports QUEUE FULL for temporary resource shortages */
295                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
296                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
297         },
298         {
299                 /*
300                  * Broken tagged queuing drive
301                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
302                  *         and: Martin Renters <martin@tdc.on.ca>
303                  */
304                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
305                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
306         },
307                 /*
308                  * The Seagate Medalist Pro drives have very poor write
309                  * performance with anything more than 2 tags.
310                  * 
311                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
312                  * Drive:  <SEAGATE ST36530N 1444>
313                  *
314                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
315                  * Drive:  <SEAGATE ST34520W 1281>
316                  *
317                  * No one has actually reported that the 9G version
318                  * (ST39140*) of the Medalist Pro has the same problem, but
319                  * we're assuming that it does because the 4G and 6.5G
320                  * versions of the drive are broken.
321                  */
322         {
323                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
324                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
325         },
326         {
327                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
328                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
329         },
330         {
331                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
332                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
333         },
334         {
335                 /*
336                  * Slow when tagged queueing is enabled.  Write performance
337                  * steadily drops off with more and more concurrent
338                  * transactions.  Best sequential write performance with
339                  * tagged queueing turned off and write caching turned on.
340                  *
341                  * PR:  kern/10398
342                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
343                  * Drive:  DCAS-34330 w/ "S65A" firmware.
344                  *
345                  * The drive with the problem had the "S65A" firmware
346                  * revision, and has also been reported (by Stephen J.
347                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
348                  * firmware revision.
349                  *
350                  * Although no one has reported problems with the 2 gig
351                  * version of the DCAS drive, the assumption is that it
352                  * has the same problems as the 4 gig version.  Therefore
353                  * this quirk entries disables tagged queueing for all
354                  * DCAS drives.
355                  */
356                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
357                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
358         },
359         {
360                 /* Broken tagged queuing drive */
361                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
362                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
363         },
364         {
365                 /* Broken tagged queuing drive */ 
366                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
367                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
368         },
369         {
370                 /*
371                  * Broken tagged queuing drive.
372                  * Submitted by:
373                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
374                  * in PR kern/9535
375                  */
376                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
377                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
378         },
379         {
380                 /*
381                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
382                  * 8MB/sec.)
383                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
384                  * Best performance with these drives is achieved with
385                  * tagged queueing turned off, and write caching turned on.
386                  */
387                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
388                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
389         },
390         {
391                 /*
392                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
393                  * 8MB/sec.)
394                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
395                  * Best performance with these drives is achieved with
396                  * tagged queueing turned off, and write caching turned on.
397                  */
398                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
399                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
400         },
401         {
402                 /*
403                  * Doesn't handle queue full condition correctly,
404                  * so we need to limit maxtags to what the device
405                  * can handle instead of determining this automatically.
406                  */
407                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
408                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
409         },
410         {
411                 /* Really only one LUN */
412                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
413                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
414         },
415         {
416                 /* I can't believe we need a quirk for DPT volumes. */
417                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
418                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
419                 /*mintags*/0, /*maxtags*/255
420         },
421         {
422                 /*
423                  * Many Sony CDROM drives don't like multi-LUN probing.
424                  */
425                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
426                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
427         },
428         {
429                 /*
430                  * This drive doesn't like multiple LUN probing.
431                  * Submitted by:  Parag Patel <parag@cgt.com>
432                  */
433                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
434                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
435         },
436         {
437                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
438                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
439         },
440         {
441                 /*
442                  * The 8200 doesn't like multi-lun probing, and probably
443                  * don't like serial number requests either.
444                  */
445                 {
446                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
447                         "EXB-8200*", "*"
448                 },
449                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
450         },
451         {
452                 /*
453                  * Let's try the same as above, but for a drive that says
454                  * it's an IPL-6860 but is actually an EXB 8200.
455                  */
456                 {
457                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
458                         "IPL-6860*", "*"
459                 },
460                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
461         },
462         {
463                 /*
464                  * These Hitachi drives don't like multi-lun probing.
465                  * The PR submitter has a DK319H, but says that the Linux
466                  * kernel has a similar work-around for the DK312 and DK314,
467                  * so all DK31* drives are quirked here.
468                  * PR:            misc/18793
469                  * Submitted by:  Paul Haddad <paul@pth.com>
470                  */
471                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
472                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
473         },
474         {
475                 /*
476                  * This old revision of the TDC3600 is also SCSI-1, and
477                  * hangs upon serial number probing.
478                  */
479                 {
480                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
481                         " TDC 3600", "U07:"
482                 },
483                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
484         },
485         {
486                 /*
487                  * Would repond to all LUNs if asked for.
488                  */
489                 {
490                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
491                         "CP150", "*"
492                 },
493                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
494         },
495         {
496                 /*
497                  * Would repond to all LUNs if asked for.
498                  */
499                 {
500                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
501                         "96X2*", "*"
502                 },
503                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
504         },
505         {
506                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
507                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
508                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
509         },
510         {
511                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
512                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
513                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
514         },
515         {
516                 /* TeraSolutions special settings for TRC-22 RAID */
517                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
518                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
519         },
520         {
521                 /* Veritas Storage Appliance */
522                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
523                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
524         },
525         {
526                 /*
527                  * Would respond to all LUNs.  Device type and removable
528                  * flag are jumper-selectable.
529                  */
530                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
531                   "Tahiti 1", "*"
532                 },
533                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
534         },
535         {
536                 /* Default tagged queuing parameters for all devices */
537                 {
538                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
539                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
540                 },
541                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
542         },
543 };
544
545 static const int xpt_quirk_table_size =
546         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
547
548 typedef enum {
549         DM_RET_COPY             = 0x01,
550         DM_RET_FLAG_MASK        = 0x0f,
551         DM_RET_NONE             = 0x00,
552         DM_RET_STOP             = 0x10,
553         DM_RET_DESCEND          = 0x20,
554         DM_RET_ERROR            = 0x30,
555         DM_RET_ACTION_MASK      = 0xf0
556 } dev_match_ret;
557
558 typedef enum {
559         XPT_DEPTH_BUS,
560         XPT_DEPTH_TARGET,
561         XPT_DEPTH_DEVICE,
562         XPT_DEPTH_PERIPH
563 } xpt_traverse_depth;
564
565 struct xpt_traverse_config {
566         xpt_traverse_depth      depth;
567         void                    *tr_func;
568         void                    *tr_arg;
569 };
570
571 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
572 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
573 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
574 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
575 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
576
577 /* Transport layer configuration information */
578 static struct xpt_softc xsoftc;
579
580 /* Queues for our software interrupt handler */
581 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
582 static cam_isrq_t cam_bioq;
583
584 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
585 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
586 static u_int xpt_max_ccbs;      /*
587                                  * Maximum size of ccb pool.  Modified as
588                                  * devices are added/removed or have their
589                                  * opening counts changed.
590                                  */
591 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
592
593 struct cam_periph *xpt_periph;
594
595 static periph_init_t xpt_periph_init;
596
597 static periph_init_t probe_periph_init;
598
599 static struct periph_driver xpt_driver =
600 {
601         xpt_periph_init, "xpt",
602         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
603 };
604
605 static struct periph_driver probe_driver =
606 {
607         probe_periph_init, "probe",
608         TAILQ_HEAD_INITIALIZER(probe_driver.units)
609 };
610
611 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
612 PERIPHDRIVER_DECLARE(probe, probe_driver);
613
614 #define XPT_CDEV_MAJOR 104
615
616 static d_open_t xptopen;
617 static d_close_t xptclose;
618 static d_ioctl_t xptioctl;
619
620 static struct dev_ops xpt_ops = {
621         { "xpt", XPT_CDEV_MAJOR, 0 },
622         .d_open = xptopen,
623         .d_close = xptclose,
624         .d_ioctl = xptioctl
625 };
626
627 static struct intr_config_hook *xpt_config_hook;
628
629 /* Registered busses */
630 static TAILQ_HEAD(,cam_eb) xpt_busses;
631 static u_int bus_generation;
632
633 /* Storage for debugging datastructures */
634 #ifdef  CAMDEBUG
635 struct cam_path *cam_dpath;
636 u_int32_t cam_dflags;
637 u_int32_t cam_debug_delay;
638 #endif
639
640 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
641 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
642 #endif
643
644 /*
645  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
646  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
647  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
648  */
649 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
650     || defined(CAM_DEBUG_LUN)
651 #ifdef CAMDEBUG
652 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
653     || !defined(CAM_DEBUG_LUN)
654 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
655         and CAM_DEBUG_LUN"
656 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
657 #else /* !CAMDEBUG */
658 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
659 #endif /* CAMDEBUG */
660 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
661
662 /* Our boot-time initialization hook */
663 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
664
665 static moduledata_t cam_moduledata = {
666         "cam",
667         cam_module_event_handler,
668         NULL
669 };
670
671 static void     xpt_init(void *);
672
673 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
674 MODULE_VERSION(cam, 1);
675
676
677 static cam_status       xpt_compile_path(struct cam_path *new_path,
678                                          struct cam_periph *perph,
679                                          path_id_t path_id,
680                                          target_id_t target_id,
681                                          lun_id_t lun_id);
682
683 static void             xpt_release_path(struct cam_path *path);
684
685 static void             xpt_async_bcast(struct async_list *async_head,
686                                         u_int32_t async_code,
687                                         struct cam_path *path,
688                                         void *async_arg);
689 static void             xpt_dev_async(u_int32_t async_code,
690                                       struct cam_eb *bus,
691                                       struct cam_et *target,
692                                       struct cam_ed *device,
693                                       void *async_arg);
694 static path_id_t xptnextfreepathid(void);
695 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
696 static union ccb *xpt_get_ccb(struct cam_ed *device);
697 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
698                                   u_int32_t new_priority);
699 static void      xpt_run_dev_allocq(struct cam_eb *bus);
700 static void      xpt_run_dev_sendq(struct cam_eb *bus);
701 static timeout_t xpt_release_devq_timeout;
702 static void      xpt_release_bus(struct cam_eb *bus);
703 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
704                                          int run_queue);
705 static struct cam_et*
706                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
707 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
708 static struct cam_ed*
709                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
710                                   lun_id_t lun_id);
711 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
712                                     struct cam_ed *device);
713 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
714 static struct cam_eb*
715                  xpt_find_bus(path_id_t path_id);
716 static struct cam_et*
717                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
718 static struct cam_ed*
719                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
720 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
721 static void      xpt_scan_lun(struct cam_periph *periph,
722                               struct cam_path *path, cam_flags flags,
723                               union ccb *ccb);
724 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
725 static xpt_busfunc_t    xptconfigbuscountfunc;
726 static xpt_busfunc_t    xptconfigfunc;
727 static void      xpt_config(void *arg);
728 static xpt_devicefunc_t xptpassannouncefunc;
729 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
730 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
731 static void      xptpoll(struct cam_sim *sim);
732 static inthand2_t swi_cambio;
733 static void      camisr(cam_isrq_t *queue);
734 #if 0
735 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
736 static void      xptasync(struct cam_periph *periph,
737                           u_int32_t code, cam_path *path);
738 #endif
739 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
740                                     u_int num_patterns, struct cam_eb *bus);
741 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
742                                        u_int num_patterns,
743                                        struct cam_ed *device);
744 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
745                                        u_int num_patterns,
746                                        struct cam_periph *periph);
747 static xpt_busfunc_t    xptedtbusfunc;
748 static xpt_targetfunc_t xptedttargetfunc;
749 static xpt_devicefunc_t xptedtdevicefunc;
750 static xpt_periphfunc_t xptedtperiphfunc;
751 static xpt_pdrvfunc_t   xptplistpdrvfunc;
752 static xpt_periphfunc_t xptplistperiphfunc;
753 static int              xptedtmatch(struct ccb_dev_match *cdm);
754 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
755 static int              xptbustraverse(struct cam_eb *start_bus,
756                                        xpt_busfunc_t *tr_func, void *arg);
757 static int              xpttargettraverse(struct cam_eb *bus,
758                                           struct cam_et *start_target,
759                                           xpt_targetfunc_t *tr_func, void *arg);
760 static int              xptdevicetraverse(struct cam_et *target,
761                                           struct cam_ed *start_device,
762                                           xpt_devicefunc_t *tr_func, void *arg);
763 static int              xptperiphtraverse(struct cam_ed *device,
764                                           struct cam_periph *start_periph,
765                                           xpt_periphfunc_t *tr_func, void *arg);
766 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
767                                         xpt_pdrvfunc_t *tr_func, void *arg);
768 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
769                                             struct cam_periph *start_periph,
770                                             xpt_periphfunc_t *tr_func,
771                                             void *arg);
772 static xpt_busfunc_t    xptdefbusfunc;
773 static xpt_targetfunc_t xptdeftargetfunc;
774 static xpt_devicefunc_t xptdefdevicefunc;
775 static xpt_periphfunc_t xptdefperiphfunc;
776 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
777 #ifdef notusedyet
778 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
779                                             void *arg);
780 #endif
781 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
782                                             void *arg);
783 #ifdef notusedyet
784 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
785                                             void *arg);
786 #endif
787 static xpt_devicefunc_t xptsetasyncfunc;
788 static xpt_busfunc_t    xptsetasyncbusfunc;
789 static cam_status       xptregister(struct cam_periph *periph,
790                                     void *arg);
791 static cam_status       proberegister(struct cam_periph *periph,
792                                       void *arg);
793 static void      probeschedule(struct cam_periph *probe_periph);
794 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
795 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
796 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
797 static void      probecleanup(struct cam_periph *periph);
798 static void      xpt_find_quirk(struct cam_ed *device);
799 #ifdef CAM_NEW_TRAN_CODE
800 static void      xpt_devise_transport(struct cam_path *path);
801 #endif /* CAM_NEW_TRAN_CODE */
802 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
803                                            struct cam_ed *device,
804                                            int async_update);
805 static void      xpt_toggle_tags(struct cam_path *path);
806 static void      xpt_start_tags(struct cam_path *path);
807 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
808                                             struct cam_ed *dev);
809 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
810                                            struct cam_ed *dev);
811 static __inline int periph_is_queued(struct cam_periph *periph);
812 static __inline int device_is_alloc_queued(struct cam_ed *device);
813 static __inline int device_is_send_queued(struct cam_ed *device);
814 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
815
816 static __inline int
817 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
818 {
819         int retval;
820
821         if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
822                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
823                         cam_ccbq_resize(&dev->ccbq,
824                                         dev->ccbq.dev_openings
825                                         + dev->ccbq.dev_active);
826                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
827                 }
828                 /*
829                  * The priority of a device waiting for CCB resources
830                  * is that of the the highest priority peripheral driver
831                  * enqueued.
832                  */
833                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
834                                           &dev->alloc_ccb_entry.pinfo,
835                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
836         } else {
837                 retval = 0;
838         }
839
840         return (retval);
841 }
842
843 static __inline int
844 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
845 {
846         int     retval;
847
848         if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
849                 /*
850                  * The priority of a device waiting for controller
851                  * resources is that of the the highest priority CCB
852                  * enqueued.
853                  */
854                 retval =
855                     xpt_schedule_dev(&bus->sim->devq->send_queue,
856                                      &dev->send_ccb_entry.pinfo,
857                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
858         } else {
859                 retval = 0;
860         }
861         return (retval);
862 }
863
864 static __inline int
865 periph_is_queued(struct cam_periph *periph)
866 {
867         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
868 }
869
870 static __inline int
871 device_is_alloc_queued(struct cam_ed *device)
872 {
873         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
874 }
875
876 static __inline int
877 device_is_send_queued(struct cam_ed *device)
878 {
879         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
880 }
881
882 static __inline int
883 dev_allocq_is_runnable(struct cam_devq *devq)
884 {
885         /*
886          * Have work to do.
887          * Have space to do more work.
888          * Allowed to do work.
889          */
890         return ((devq->alloc_queue.qfrozen_cnt == 0)
891              && (devq->alloc_queue.entries > 0)
892              && (devq->alloc_openings > 0));
893 }
894
895 static void
896 xpt_periph_init(void)
897 {
898         dev_ops_add(&xpt_ops, 0, 0);
899         make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
900 }
901
902 static void
903 probe_periph_init(void)
904 {
905 }
906
907
908 static void
909 xptdone(struct cam_periph *periph, union ccb *done_ccb)
910 {
911         /* Caller will release the CCB */
912         wakeup(&done_ccb->ccb_h.cbfcnp);
913 }
914
915 static int
916 xptopen(struct dev_open_args *ap)
917 {
918         cdev_t dev = ap->a_head.a_dev;
919         int unit;
920
921         unit = minor(dev) & 0xff;
922
923         /*
924          * Only allow read-write access.
925          */
926         if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
927                 return(EPERM);
928
929         /*
930          * We don't allow nonblocking access.
931          */
932         if ((ap->a_oflags & O_NONBLOCK) != 0) {
933                 kprintf("xpt%d: can't do nonblocking access\n", unit);
934                 return(ENODEV);
935         }
936
937         /*
938          * We only have one transport layer right now.  If someone accesses
939          * us via something other than minor number 1, point out their
940          * mistake.
941          */
942         if (unit != 0) {
943                 kprintf("xptopen: got invalid xpt unit %d\n", unit);
944                 return(ENXIO);
945         }
946
947         /* Mark ourselves open */
948         xsoftc.flags |= XPT_FLAG_OPEN;
949         
950         return(0);
951 }
952
953 static int
954 xptclose(struct dev_close_args *ap)
955 {
956         cdev_t dev = ap->a_head.a_dev;
957         int unit;
958
959         unit = minor(dev) & 0xff;
960
961         /*
962          * We only have one transport layer right now.  If someone accesses
963          * us via something other than minor number 1, point out their
964          * mistake.
965          */
966         if (unit != 0) {
967                 kprintf("xptclose: got invalid xpt unit %d\n", unit);
968                 return(ENXIO);
969         }
970
971         /* Mark ourselves closed */
972         xsoftc.flags &= ~XPT_FLAG_OPEN;
973
974         return(0);
975 }
976
977 static int
978 xptioctl(struct dev_ioctl_args *ap)
979 {
980         cdev_t dev = ap->a_head.a_dev;
981         int unit, error;
982
983         error = 0;
984         unit = minor(dev) & 0xff;
985
986         /*
987          * We only have one transport layer right now.  If someone accesses
988          * us via something other than minor number 1, point out their
989          * mistake.
990          */
991         if (unit != 0) {
992                 kprintf("xptioctl: got invalid xpt unit %d\n", unit);
993                 return(ENXIO);
994         }
995
996         switch(ap->a_cmd) {
997         /*
998          * For the transport layer CAMIOCOMMAND ioctl, we really only want
999          * to accept CCB types that don't quite make sense to send through a
1000          * passthrough driver.
1001          */
1002         case CAMIOCOMMAND: {
1003                 union ccb *ccb;
1004                 union ccb *inccb;
1005
1006                 inccb = (union ccb *)ap->a_data;
1007
1008                 switch(inccb->ccb_h.func_code) {
1009                 case XPT_SCAN_BUS:
1010                 case XPT_RESET_BUS:
1011                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1012                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1013                                 error = EINVAL;
1014                                 break;
1015                         }
1016                         /* FALLTHROUGH */
1017                 case XPT_PATH_INQ:
1018                 case XPT_ENG_INQ:
1019                 case XPT_SCAN_LUN:
1020
1021                         ccb = xpt_alloc_ccb();
1022
1023                         /*
1024                          * Create a path using the bus, target, and lun the
1025                          * user passed in.
1026                          */
1027                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1028                                             inccb->ccb_h.path_id,
1029                                             inccb->ccb_h.target_id,
1030                                             inccb->ccb_h.target_lun) !=
1031                                             CAM_REQ_CMP){
1032                                 error = EINVAL;
1033                                 xpt_free_ccb(ccb);
1034                                 break;
1035                         }
1036                         /* Ensure all of our fields are correct */
1037                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1038                                       inccb->ccb_h.pinfo.priority);
1039                         xpt_merge_ccb(ccb, inccb);
1040                         ccb->ccb_h.cbfcnp = xptdone;
1041                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1042                         bcopy(ccb, inccb, sizeof(union ccb));
1043                         xpt_free_path(ccb->ccb_h.path);
1044                         xpt_free_ccb(ccb);
1045                         break;
1046
1047                 case XPT_DEBUG: {
1048                         union ccb ccb;
1049
1050                         /*
1051                          * This is an immediate CCB, so it's okay to
1052                          * allocate it on the stack.
1053                          */
1054
1055                         /*
1056                          * Create a path using the bus, target, and lun the
1057                          * user passed in.
1058                          */
1059                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1060                                             inccb->ccb_h.path_id,
1061                                             inccb->ccb_h.target_id,
1062                                             inccb->ccb_h.target_lun) !=
1063                                             CAM_REQ_CMP){
1064                                 error = EINVAL;
1065                                 break;
1066                         }
1067                         /* Ensure all of our fields are correct */
1068                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1069                                       inccb->ccb_h.pinfo.priority);
1070                         xpt_merge_ccb(&ccb, inccb);
1071                         ccb.ccb_h.cbfcnp = xptdone;
1072                         xpt_action(&ccb);
1073                         bcopy(&ccb, inccb, sizeof(union ccb));
1074                         xpt_free_path(ccb.ccb_h.path);
1075                         break;
1076
1077                 }
1078                 case XPT_DEV_MATCH: {
1079                         struct cam_periph_map_info mapinfo;
1080                         struct cam_path *old_path;
1081
1082                         /*
1083                          * We can't deal with physical addresses for this
1084                          * type of transaction.
1085                          */
1086                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1087                                 error = EINVAL;
1088                                 break;
1089                         }
1090
1091                         /*
1092                          * Save this in case the caller had it set to
1093                          * something in particular.
1094                          */
1095                         old_path = inccb->ccb_h.path;
1096
1097                         /*
1098                          * We really don't need a path for the matching
1099                          * code.  The path is needed because of the
1100                          * debugging statements in xpt_action().  They
1101                          * assume that the CCB has a valid path.
1102                          */
1103                         inccb->ccb_h.path = xpt_periph->path;
1104
1105                         bzero(&mapinfo, sizeof(mapinfo));
1106
1107                         /*
1108                          * Map the pattern and match buffers into kernel
1109                          * virtual address space.
1110                          */
1111                         error = cam_periph_mapmem(inccb, &mapinfo);
1112
1113                         if (error) {
1114                                 inccb->ccb_h.path = old_path;
1115                                 break;
1116                         }
1117
1118                         /*
1119                          * This is an immediate CCB, we can send it on directly.
1120                          */
1121                         xpt_action(inccb);
1122
1123                         /*
1124                          * Map the buffers back into user space.
1125                          */
1126                         cam_periph_unmapmem(inccb, &mapinfo);
1127
1128                         inccb->ccb_h.path = old_path;
1129
1130                         error = 0;
1131                         break;
1132                 }
1133                 default:
1134                         error = ENOTSUP;
1135                         break;
1136                 }
1137                 break;
1138         }
1139         /*
1140          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1141          * with the periphal driver name and unit name filled in.  The other
1142          * fields don't really matter as input.  The passthrough driver name
1143          * ("pass"), and unit number are passed back in the ccb.  The current
1144          * device generation number, and the index into the device peripheral
1145          * driver list, and the status are also passed back.  Note that
1146          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1147          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1148          * (or rather should be) impossible for the device peripheral driver
1149          * list to change since we look at the whole thing in one pass, and
1150          * we do it within a critical section.
1151          * 
1152          */
1153         case CAMGETPASSTHRU: {
1154                 union ccb *ccb;
1155                 struct cam_periph *periph;
1156                 struct periph_driver **p_drv;
1157                 char   *name;
1158                 u_int unit;
1159                 u_int cur_generation;
1160                 int base_periph_found;
1161                 int splbreaknum;
1162
1163                 ccb = (union ccb *)ap->a_data;
1164                 unit = ccb->cgdl.unit_number;
1165                 name = ccb->cgdl.periph_name;
1166                 /*
1167                  * Every 100 devices, we want to call splz() to check for
1168                  * and allow the software interrupt handler a chance to run.
1169                  *
1170                  * Most systems won't run into this check, but this should
1171                  * avoid starvation in the software interrupt handler in
1172                  * large systems.
1173                  */
1174                 splbreaknum = 100;
1175
1176                 ccb = (union ccb *)ap->a_data;
1177
1178                 base_periph_found = 0;
1179
1180                 /*
1181                  * Sanity check -- make sure we don't get a null peripheral
1182                  * driver name.
1183                  */
1184                 if (*ccb->cgdl.periph_name == '\0') {
1185                         error = EINVAL;
1186                         break;
1187                 }
1188
1189                 /* Keep the list from changing while we traverse it */
1190                 crit_enter();
1191 ptstartover:
1192                 cur_generation = xsoftc.generation;
1193
1194                 /* first find our driver in the list of drivers */
1195                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1196                         if (strcmp((*p_drv)->driver_name, name) == 0)
1197                                 break;
1198                 }
1199
1200                 if (*p_drv == NULL) {
1201                         crit_exit();
1202                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1203                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1204                         *ccb->cgdl.periph_name = '\0';
1205                         ccb->cgdl.unit_number = 0;
1206                         error = ENOENT;
1207                         break;
1208                 }       
1209
1210                 /*
1211                  * Run through every peripheral instance of this driver
1212                  * and check to see whether it matches the unit passed
1213                  * in by the user.  If it does, get out of the loops and
1214                  * find the passthrough driver associated with that
1215                  * peripheral driver.
1216                  */
1217                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1218
1219                         if (periph->unit_number == unit) {
1220                                 break;
1221                         } else if (--splbreaknum == 0) {
1222                                 splz();
1223                                 splbreaknum = 100;
1224                                 if (cur_generation != xsoftc.generation)
1225                                        goto ptstartover;
1226                         }
1227                 }
1228                 /*
1229                  * If we found the peripheral driver that the user passed
1230                  * in, go through all of the peripheral drivers for that
1231                  * particular device and look for a passthrough driver.
1232                  */
1233                 if (periph != NULL) {
1234                         struct cam_ed *device;
1235                         int i;
1236
1237                         base_periph_found = 1;
1238                         device = periph->path->device;
1239                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1240                              periph != NULL;
1241                              periph = SLIST_NEXT(periph, periph_links), i++) {
1242                                 /*
1243                                  * Check to see whether we have a
1244                                  * passthrough device or not. 
1245                                  */
1246                                 if (strcmp(periph->periph_name, "pass") == 0) {
1247                                         /*
1248                                          * Fill in the getdevlist fields.
1249                                          */
1250                                         strcpy(ccb->cgdl.periph_name,
1251                                                periph->periph_name);
1252                                         ccb->cgdl.unit_number =
1253                                                 periph->unit_number;
1254                                         if (SLIST_NEXT(periph, periph_links))
1255                                                 ccb->cgdl.status =
1256                                                         CAM_GDEVLIST_MORE_DEVS;
1257                                         else
1258                                                 ccb->cgdl.status =
1259                                                        CAM_GDEVLIST_LAST_DEVICE;
1260                                         ccb->cgdl.generation =
1261                                                 device->generation;
1262                                         ccb->cgdl.index = i;
1263                                         /*
1264                                          * Fill in some CCB header fields
1265                                          * that the user may want.
1266                                          */
1267                                         ccb->ccb_h.path_id =
1268                                                 periph->path->bus->path_id;
1269                                         ccb->ccb_h.target_id =
1270                                                 periph->path->target->target_id;
1271                                         ccb->ccb_h.target_lun =
1272                                                 periph->path->device->lun_id;
1273                                         ccb->ccb_h.status = CAM_REQ_CMP;
1274                                         break;
1275                                 }
1276                         }
1277                 }
1278
1279                 /*
1280                  * If the periph is null here, one of two things has
1281                  * happened.  The first possibility is that we couldn't
1282                  * find the unit number of the particular peripheral driver
1283                  * that the user is asking about.  e.g. the user asks for
1284                  * the passthrough driver for "da11".  We find the list of
1285                  * "da" peripherals all right, but there is no unit 11.
1286                  * The other possibility is that we went through the list
1287                  * of peripheral drivers attached to the device structure,
1288                  * but didn't find one with the name "pass".  Either way,
1289                  * we return ENOENT, since we couldn't find something.
1290                  */
1291                 if (periph == NULL) {
1292                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1293                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1294                         *ccb->cgdl.periph_name = '\0';
1295                         ccb->cgdl.unit_number = 0;
1296                         error = ENOENT;
1297                         /*
1298                          * It is unfortunate that this is even necessary,
1299                          * but there are many, many clueless users out there.
1300                          * If this is true, the user is looking for the
1301                          * passthrough driver, but doesn't have one in his
1302                          * kernel.
1303                          */
1304                         if (base_periph_found == 1) {
1305                                 kprintf("xptioctl: pass driver is not in the "
1306                                        "kernel\n");
1307                                 kprintf("xptioctl: put \"device pass0\" in "
1308                                        "your kernel config file\n");
1309                         }
1310                 }
1311                 crit_exit();
1312                 break;
1313                 }
1314         default:
1315                 error = ENOTTY;
1316                 break;
1317         }
1318
1319         return(error);
1320 }
1321
1322 static int
1323 cam_module_event_handler(module_t mod, int what, void *arg)
1324 {
1325         if (what == MOD_LOAD) {
1326                 xpt_init(NULL);
1327         } else if (what == MOD_UNLOAD) {
1328                 return EBUSY;
1329         } else {
1330                 return EOPNOTSUPP;
1331         }
1332
1333         return 0;
1334 }
1335
1336 /* Functions accessed by the peripheral drivers */
1337 static void
1338 xpt_init(void *dummy)
1339 {
1340         struct cam_sim *xpt_sim;
1341         struct cam_path *path;
1342         struct cam_devq *devq;
1343         cam_status status;
1344
1345         TAILQ_INIT(&xpt_busses);
1346         TAILQ_INIT(&cam_bioq);
1347         SLIST_INIT(&ccb_freeq);
1348         STAILQ_INIT(&highpowerq);
1349
1350         /*
1351          * The xpt layer is, itself, the equivelent of a SIM.
1352          * Allow 16 ccbs in the ccb pool for it.  This should
1353          * give decent parallelism when we probe busses and
1354          * perform other XPT functions.
1355          */
1356         devq = cam_simq_alloc(16);
1357         xpt_sim = cam_sim_alloc(xptaction,
1358                                 xptpoll,
1359                                 "xpt",
1360                                 /*softc*/NULL,
1361                                 /*unit*/0,
1362                                 /*max_dev_transactions*/0,
1363                                 /*max_tagged_dev_transactions*/0,
1364                                 devq);
1365         cam_simq_release(devq);
1366         xpt_max_ccbs = 16;
1367                                 
1368         xpt_bus_register(xpt_sim, /*bus #*/0);
1369
1370         /*
1371          * Looking at the XPT from the SIM layer, the XPT is
1372          * the equivelent of a peripheral driver.  Allocate
1373          * a peripheral driver entry for us.
1374          */
1375         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1376                                       CAM_TARGET_WILDCARD,
1377                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1378                 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1379                        " failing attach\n", status);
1380                 return;
1381         }
1382
1383         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1384                          path, NULL, 0, NULL);
1385         xpt_free_path(path);
1386
1387         xpt_sim->softc = xpt_periph;
1388
1389         /*
1390          * Register a callback for when interrupts are enabled.
1391          */
1392         xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1393                                   M_TEMP, M_INTWAIT | M_ZERO);
1394         xpt_config_hook->ich_func = xpt_config;
1395         xpt_config_hook->ich_desc = "xpt";
1396         xpt_config_hook->ich_order = 1000;
1397         if (config_intrhook_establish(xpt_config_hook) != 0) {
1398                 kfree (xpt_config_hook, M_TEMP);
1399                 kprintf("xpt_init: config_intrhook_establish failed "
1400                        "- failing attach\n");
1401         }
1402
1403         /* Install our software interrupt handlers */
1404         register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1405 }
1406
1407 static cam_status
1408 xptregister(struct cam_periph *periph, void *arg)
1409 {
1410         if (periph == NULL) {
1411                 kprintf("xptregister: periph was NULL!!\n");
1412                 return(CAM_REQ_CMP_ERR);
1413         }
1414
1415         periph->softc = NULL;
1416
1417         xpt_periph = periph;
1418
1419         return(CAM_REQ_CMP);
1420 }
1421
1422 int32_t
1423 xpt_add_periph(struct cam_periph *periph)
1424 {
1425         struct cam_ed *device;
1426         int32_t  status;
1427         struct periph_list *periph_head;
1428
1429         device = periph->path->device;
1430
1431         periph_head = &device->periphs;
1432
1433         status = CAM_REQ_CMP;
1434
1435         if (device != NULL) {
1436                 /*
1437                  * Make room for this peripheral
1438                  * so it will fit in the queue
1439                  * when it's scheduled to run
1440                  */
1441                 crit_enter();
1442                 status = camq_resize(&device->drvq,
1443                                      device->drvq.array_size + 1);
1444
1445                 device->generation++;
1446
1447                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1448                 crit_exit();
1449         }
1450
1451         xsoftc.generation++;
1452
1453         return (status);
1454 }
1455
1456 void
1457 xpt_remove_periph(struct cam_periph *periph)
1458 {
1459         struct cam_ed *device;
1460
1461         device = periph->path->device;
1462
1463         if (device != NULL) {
1464                 struct periph_list *periph_head;
1465
1466                 periph_head = &device->periphs;
1467                 
1468                 /* Release the slot for this peripheral */
1469                 crit_enter();
1470                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1471
1472                 device->generation++;
1473
1474                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1475                 crit_exit();
1476         }
1477
1478         xsoftc.generation++;
1479
1480 }
1481
1482 #ifdef CAM_NEW_TRAN_CODE
1483
1484 void
1485 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1486 {
1487         struct  ccb_pathinq cpi;
1488         struct  ccb_trans_settings cts;
1489         struct  cam_path *path;
1490         u_int   speed;
1491         u_int   freq;
1492         u_int   mb;
1493
1494         path = periph->path;
1495         /*
1496          * To ensure that this is printed in one piece,
1497          * mask out CAM interrupts.
1498          */
1499         crit_enter();
1500         printf("%s%d at %s%d bus %d target %d lun %d\n",
1501                periph->periph_name, periph->unit_number,
1502                path->bus->sim->sim_name,
1503                path->bus->sim->unit_number,
1504                path->bus->sim->bus_id,
1505                path->target->target_id,
1506                path->device->lun_id);
1507         printf("%s%d: ", periph->periph_name, periph->unit_number);
1508         scsi_print_inquiry(&path->device->inq_data);
1509         if (bootverbose && path->device->serial_num_len > 0) {
1510                 /* Don't wrap the screen  - print only the first 60 chars */
1511                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1512                        periph->unit_number, path->device->serial_num);
1513         }
1514         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1515         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1516         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1517         xpt_action((union ccb*)&cts);
1518
1519         /* Ask the SIM for its base transfer speed */
1520         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1521         cpi.ccb_h.func_code = XPT_PATH_INQ;
1522         xpt_action((union ccb *)&cpi);
1523
1524         speed = cpi.base_transfer_speed;
1525         freq = 0;
1526         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1527                 struct  ccb_trans_settings_spi *spi;
1528
1529                 spi = &cts.xport_specific.spi;
1530                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1531                   && spi->sync_offset != 0) {
1532                         freq = scsi_calc_syncsrate(spi->sync_period);
1533                         speed = freq;
1534                 }
1535
1536                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1537                         speed *= (0x01 << spi->bus_width);
1538         }
1539         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1540                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1541                 if (fc->valid & CTS_FC_VALID_SPEED) {
1542                         speed = fc->bitrate;
1543                 }
1544         }
1545
1546         mb = speed / 1000;
1547         if (mb > 0)
1548                 printf("%s%d: %d.%03dMB/s transfers",
1549                        periph->periph_name, periph->unit_number,
1550                        mb, speed % 1000);
1551         else
1552                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1553                        periph->unit_number, speed);
1554         /* Report additional information about SPI connections */
1555         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1556                 struct  ccb_trans_settings_spi *spi;
1557
1558                 spi = &cts.xport_specific.spi;
1559                 if (freq != 0) {
1560                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1561                                freq % 1000,
1562                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1563                              ? " DT" : "",
1564                                spi->sync_offset);
1565                 }
1566                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1567                  && spi->bus_width > 0) {
1568                         if (freq != 0) {
1569                                 printf(", ");
1570                         } else {
1571                                 printf(" (");
1572                         }
1573                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1574                 } else if (freq != 0) {
1575                         printf(")");
1576                 }
1577         }
1578         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1579                 struct  ccb_trans_settings_fc *fc;
1580
1581                 fc = &cts.xport_specific.fc;
1582                 if (fc->valid & CTS_FC_VALID_WWNN)
1583                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1584                 if (fc->valid & CTS_FC_VALID_WWPN)
1585                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1586                 if (fc->valid & CTS_FC_VALID_PORT)
1587                         printf(" PortID 0x%x", fc->port);
1588         }
1589
1590         if (path->device->inq_flags & SID_CmdQue
1591          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1592                 printf("\n%s%d: Tagged Queueing Enabled",
1593                        periph->periph_name, periph->unit_number);
1594         }
1595         printf("\n");
1596
1597         /*
1598          * We only want to print the caller's announce string if they've
1599          * passed one in..
1600          */
1601         if (announce_string != NULL)
1602                 printf("%s%d: %s\n", periph->periph_name,
1603                        periph->unit_number, announce_string);
1604         crit_exit();
1605 }
1606 #else /* CAM_NEW_TRAN_CODE */
1607 void
1608 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1609 {
1610         u_int mb;
1611         struct cam_path *path;
1612         struct ccb_trans_settings cts;
1613
1614         path = periph->path;
1615         /*
1616          * To ensure that this is printed in one piece,
1617          * mask out CAM interrupts.
1618          */
1619         crit_enter();
1620         kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1621                periph->periph_name, periph->unit_number,
1622                path->bus->sim->sim_name,
1623                path->bus->sim->unit_number,
1624                path->bus->sim->bus_id,
1625                path->target->target_id,
1626                path->device->lun_id);
1627         kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1628         scsi_print_inquiry(&path->device->inq_data);
1629         if ((bootverbose)
1630          && (path->device->serial_num_len > 0)) {
1631                 /* Don't wrap the screen  - print only the first 60 chars */
1632                 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1633                        periph->unit_number, path->device->serial_num);
1634         }
1635         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1636         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1637         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1638         xpt_action((union ccb*)&cts);
1639         if (cts.ccb_h.status == CAM_REQ_CMP) {
1640                 u_int speed;
1641                 u_int freq;
1642
1643                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1644                   && cts.sync_offset != 0) {
1645                         freq = scsi_calc_syncsrate(cts.sync_period);
1646                         speed = freq;
1647                 } else {
1648                         struct ccb_pathinq cpi;
1649
1650                         /* Ask the SIM for its base transfer speed */
1651                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1652                         cpi.ccb_h.func_code = XPT_PATH_INQ;
1653                         xpt_action((union ccb *)&cpi);
1654
1655                         speed = cpi.base_transfer_speed;
1656                         freq = 0;
1657                 }
1658                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1659                         speed *= (0x01 << cts.bus_width);
1660                 mb = speed / 1000;
1661                 if (mb > 0)
1662                         kprintf("%s%d: %d.%03dMB/s transfers",
1663                                periph->periph_name, periph->unit_number,
1664                                mb, speed % 1000);
1665                 else
1666                         kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1667                                periph->unit_number, speed);
1668                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1669                  && cts.sync_offset != 0) {
1670                         kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1671                                freq % 1000, cts.sync_offset);
1672                 }
1673                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1674                  && cts.bus_width > 0) {
1675                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1676                          && cts.sync_offset != 0) {
1677                                 kprintf(", ");
1678                         } else {
1679                                 kprintf(" (");
1680                         }
1681                         kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1682                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1683                         && cts.sync_offset != 0) {
1684                         kprintf(")");
1685                 }
1686
1687                 if (path->device->inq_flags & SID_CmdQue
1688                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1689                         kprintf(", Tagged Queueing Enabled");
1690                 }
1691
1692                 kprintf("\n");
1693         } else if (path->device->inq_flags & SID_CmdQue
1694                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1695                 kprintf("%s%d: Tagged Queueing Enabled\n",
1696                        periph->periph_name, periph->unit_number);
1697         }
1698
1699         /*
1700          * We only want to print the caller's announce string if they've
1701          * passed one in..
1702          */
1703         if (announce_string != NULL)
1704                 kprintf("%s%d: %s\n", periph->periph_name,
1705                        periph->unit_number, announce_string);
1706         crit_exit();
1707 }
1708
1709 #endif /* CAM_NEW_TRAN_CODE */
1710
1711 static dev_match_ret
1712 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1713             struct cam_eb *bus)
1714 {
1715         dev_match_ret retval;
1716         int i;
1717
1718         retval = DM_RET_NONE;
1719
1720         /*
1721          * If we aren't given something to match against, that's an error.
1722          */
1723         if (bus == NULL)
1724                 return(DM_RET_ERROR);
1725
1726         /*
1727          * If there are no match entries, then this bus matches no
1728          * matter what.
1729          */
1730         if ((patterns == NULL) || (num_patterns == 0))
1731                 return(DM_RET_DESCEND | DM_RET_COPY);
1732
1733         for (i = 0; i < num_patterns; i++) {
1734                 struct bus_match_pattern *cur_pattern;
1735
1736                 /*
1737                  * If the pattern in question isn't for a bus node, we
1738                  * aren't interested.  However, we do indicate to the
1739                  * calling routine that we should continue descending the
1740                  * tree, since the user wants to match against lower-level
1741                  * EDT elements.
1742                  */
1743                 if (patterns[i].type != DEV_MATCH_BUS) {
1744                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1745                                 retval |= DM_RET_DESCEND;
1746                         continue;
1747                 }
1748
1749                 cur_pattern = &patterns[i].pattern.bus_pattern;
1750
1751                 /*
1752                  * If they want to match any bus node, we give them any
1753                  * device node.
1754                  */
1755                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1756                         /* set the copy flag */
1757                         retval |= DM_RET_COPY;
1758
1759                         /*
1760                          * If we've already decided on an action, go ahead
1761                          * and return.
1762                          */
1763                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1764                                 return(retval);
1765                 }
1766
1767                 /*
1768                  * Not sure why someone would do this...
1769                  */
1770                 if (cur_pattern->flags == BUS_MATCH_NONE)
1771                         continue;
1772
1773                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1774                  && (cur_pattern->path_id != bus->path_id))
1775                         continue;
1776
1777                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1778                  && (cur_pattern->bus_id != bus->sim->bus_id))
1779                         continue;
1780
1781                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1782                  && (cur_pattern->unit_number != bus->sim->unit_number))
1783                         continue;
1784
1785                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1786                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1787                              DEV_IDLEN) != 0))
1788                         continue;
1789
1790                 /*
1791                  * If we get to this point, the user definitely wants 
1792                  * information on this bus.  So tell the caller to copy the
1793                  * data out.
1794                  */
1795                 retval |= DM_RET_COPY;
1796
1797                 /*
1798                  * If the return action has been set to descend, then we
1799                  * know that we've already seen a non-bus matching
1800                  * expression, therefore we need to further descend the tree.
1801                  * This won't change by continuing around the loop, so we
1802                  * go ahead and return.  If we haven't seen a non-bus
1803                  * matching expression, we keep going around the loop until
1804                  * we exhaust the matching expressions.  We'll set the stop
1805                  * flag once we fall out of the loop.
1806                  */
1807                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1808                         return(retval);
1809         }
1810
1811         /*
1812          * If the return action hasn't been set to descend yet, that means
1813          * we haven't seen anything other than bus matching patterns.  So
1814          * tell the caller to stop descending the tree -- the user doesn't
1815          * want to match against lower level tree elements.
1816          */
1817         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1818                 retval |= DM_RET_STOP;
1819
1820         return(retval);
1821 }
1822
1823 static dev_match_ret
1824 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1825                struct cam_ed *device)
1826 {
1827         dev_match_ret retval;
1828         int i;
1829
1830         retval = DM_RET_NONE;
1831
1832         /*
1833          * If we aren't given something to match against, that's an error.
1834          */
1835         if (device == NULL)
1836                 return(DM_RET_ERROR);
1837
1838         /*
1839          * If there are no match entries, then this device matches no
1840          * matter what.
1841          */
1842         if ((patterns == NULL) || (num_patterns == 0))
1843                 return(DM_RET_DESCEND | DM_RET_COPY);
1844
1845         for (i = 0; i < num_patterns; i++) {
1846                 struct device_match_pattern *cur_pattern;
1847
1848                 /*
1849                  * If the pattern in question isn't for a device node, we
1850                  * aren't interested.
1851                  */
1852                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1853                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1854                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1855                                 retval |= DM_RET_DESCEND;
1856                         continue;
1857                 }
1858
1859                 cur_pattern = &patterns[i].pattern.device_pattern;
1860
1861                 /*
1862                  * If they want to match any device node, we give them any
1863                  * device node.
1864                  */
1865                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1866                         /* set the copy flag */
1867                         retval |= DM_RET_COPY;
1868
1869                         
1870                         /*
1871                          * If we've already decided on an action, go ahead
1872                          * and return.
1873                          */
1874                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1875                                 return(retval);
1876                 }
1877
1878                 /*
1879                  * Not sure why someone would do this...
1880                  */
1881                 if (cur_pattern->flags == DEV_MATCH_NONE)
1882                         continue;
1883
1884                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1885                  && (cur_pattern->path_id != device->target->bus->path_id))
1886                         continue;
1887
1888                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1889                  && (cur_pattern->target_id != device->target->target_id))
1890                         continue;
1891
1892                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1893                  && (cur_pattern->target_lun != device->lun_id))
1894                         continue;
1895
1896                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1897                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1898                                     (caddr_t)&cur_pattern->inq_pat,
1899                                     1, sizeof(cur_pattern->inq_pat),
1900                                     scsi_static_inquiry_match) == NULL))
1901                         continue;
1902
1903                 /*
1904                  * If we get to this point, the user definitely wants 
1905                  * information on this device.  So tell the caller to copy
1906                  * the data out.
1907                  */
1908                 retval |= DM_RET_COPY;
1909
1910                 /*
1911                  * If the return action has been set to descend, then we
1912                  * know that we've already seen a peripheral matching
1913                  * expression, therefore we need to further descend the tree.
1914                  * This won't change by continuing around the loop, so we
1915                  * go ahead and return.  If we haven't seen a peripheral
1916                  * matching expression, we keep going around the loop until
1917                  * we exhaust the matching expressions.  We'll set the stop
1918                  * flag once we fall out of the loop.
1919                  */
1920                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1921                         return(retval);
1922         }
1923
1924         /*
1925          * If the return action hasn't been set to descend yet, that means
1926          * we haven't seen any peripheral matching patterns.  So tell the
1927          * caller to stop descending the tree -- the user doesn't want to
1928          * match against lower level tree elements.
1929          */
1930         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1931                 retval |= DM_RET_STOP;
1932
1933         return(retval);
1934 }
1935
1936 /*
1937  * Match a single peripheral against any number of match patterns.
1938  */
1939 static dev_match_ret
1940 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1941                struct cam_periph *periph)
1942 {
1943         dev_match_ret retval;
1944         int i;
1945
1946         /*
1947          * If we aren't given something to match against, that's an error.
1948          */
1949         if (periph == NULL)
1950                 return(DM_RET_ERROR);
1951
1952         /*
1953          * If there are no match entries, then this peripheral matches no
1954          * matter what.
1955          */
1956         if ((patterns == NULL) || (num_patterns == 0))
1957                 return(DM_RET_STOP | DM_RET_COPY);
1958
1959         /*
1960          * There aren't any nodes below a peripheral node, so there's no
1961          * reason to descend the tree any further.
1962          */
1963         retval = DM_RET_STOP;
1964
1965         for (i = 0; i < num_patterns; i++) {
1966                 struct periph_match_pattern *cur_pattern;
1967
1968                 /*
1969                  * If the pattern in question isn't for a peripheral, we
1970                  * aren't interested.
1971                  */
1972                 if (patterns[i].type != DEV_MATCH_PERIPH)
1973                         continue;
1974
1975                 cur_pattern = &patterns[i].pattern.periph_pattern;
1976
1977                 /*
1978                  * If they want to match on anything, then we will do so.
1979                  */
1980                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1981                         /* set the copy flag */
1982                         retval |= DM_RET_COPY;
1983
1984                         /*
1985                          * We've already set the return action to stop,
1986                          * since there are no nodes below peripherals in
1987                          * the tree.
1988                          */
1989                         return(retval);
1990                 }
1991
1992                 /*
1993                  * Not sure why someone would do this...
1994                  */
1995                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1996                         continue;
1997
1998                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1999                  && (cur_pattern->path_id != periph->path->bus->path_id))
2000                         continue;
2001
2002                 /*
2003                  * For the target and lun id's, we have to make sure the
2004                  * target and lun pointers aren't NULL.  The xpt peripheral
2005                  * has a wildcard target and device.
2006                  */
2007                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2008                  && ((periph->path->target == NULL)
2009                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2010                         continue;
2011
2012                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2013                  && ((periph->path->device == NULL)
2014                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2015                         continue;
2016
2017                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2018                  && (cur_pattern->unit_number != periph->unit_number))
2019                         continue;
2020
2021                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2022                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2023                              DEV_IDLEN) != 0))
2024                         continue;
2025
2026                 /*
2027                  * If we get to this point, the user definitely wants 
2028                  * information on this peripheral.  So tell the caller to
2029                  * copy the data out.
2030                  */
2031                 retval |= DM_RET_COPY;
2032
2033                 /*
2034                  * The return action has already been set to stop, since
2035                  * peripherals don't have any nodes below them in the EDT.
2036                  */
2037                 return(retval);
2038         }
2039
2040         /*
2041          * If we get to this point, the peripheral that was passed in
2042          * doesn't match any of the patterns.
2043          */
2044         return(retval);
2045 }
2046
2047 static int
2048 xptedtbusfunc(struct cam_eb *bus, void *arg)
2049 {
2050         struct ccb_dev_match *cdm;
2051         dev_match_ret retval;
2052
2053         cdm = (struct ccb_dev_match *)arg;
2054
2055         /*
2056          * If our position is for something deeper in the tree, that means
2057          * that we've already seen this node.  So, we keep going down.
2058          */
2059         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2060          && (cdm->pos.cookie.bus == bus)
2061          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2062          && (cdm->pos.cookie.target != NULL))
2063                 retval = DM_RET_DESCEND;
2064         else
2065                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2066
2067         /*
2068          * If we got an error, bail out of the search.
2069          */
2070         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2071                 cdm->status = CAM_DEV_MATCH_ERROR;
2072                 return(0);
2073         }
2074
2075         /*
2076          * If the copy flag is set, copy this bus out.
2077          */
2078         if (retval & DM_RET_COPY) {
2079                 int spaceleft, j;
2080
2081                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2082                         sizeof(struct dev_match_result));
2083
2084                 /*
2085                  * If we don't have enough space to put in another
2086                  * match result, save our position and tell the
2087                  * user there are more devices to check.
2088                  */
2089                 if (spaceleft < sizeof(struct dev_match_result)) {
2090                         bzero(&cdm->pos, sizeof(cdm->pos));
2091                         cdm->pos.position_type = 
2092                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2093
2094                         cdm->pos.cookie.bus = bus;
2095                         cdm->pos.generations[CAM_BUS_GENERATION]=
2096                                 bus_generation;
2097                         cdm->status = CAM_DEV_MATCH_MORE;
2098                         return(0);
2099                 }
2100                 j = cdm->num_matches;
2101                 cdm->num_matches++;
2102                 cdm->matches[j].type = DEV_MATCH_BUS;
2103                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2104                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2105                 cdm->matches[j].result.bus_result.unit_number =
2106                         bus->sim->unit_number;
2107                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2108                         bus->sim->sim_name, DEV_IDLEN);
2109         }
2110
2111         /*
2112          * If the user is only interested in busses, there's no
2113          * reason to descend to the next level in the tree.
2114          */
2115         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2116                 return(1);
2117
2118         /*
2119          * If there is a target generation recorded, check it to
2120          * make sure the target list hasn't changed.
2121          */
2122         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2123          && (bus == cdm->pos.cookie.bus)
2124          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2125          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2126          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2127              bus->generation)) {
2128                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2129                 return(0);
2130         }
2131
2132         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2133          && (cdm->pos.cookie.bus == bus)
2134          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2135          && (cdm->pos.cookie.target != NULL))
2136                 return(xpttargettraverse(bus,
2137                                         (struct cam_et *)cdm->pos.cookie.target,
2138                                          xptedttargetfunc, arg));
2139         else
2140                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2141 }
2142
2143 static int
2144 xptedttargetfunc(struct cam_et *target, void *arg)
2145 {
2146         struct ccb_dev_match *cdm;
2147
2148         cdm = (struct ccb_dev_match *)arg;
2149
2150         /*
2151          * If there is a device list generation recorded, check it to
2152          * make sure the device list hasn't changed.
2153          */
2154         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2155          && (cdm->pos.cookie.bus == target->bus)
2156          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2157          && (cdm->pos.cookie.target == target)
2158          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2159          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2160          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2161              target->generation)) {
2162                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2163                 return(0);
2164         }
2165
2166         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2167          && (cdm->pos.cookie.bus == target->bus)
2168          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2169          && (cdm->pos.cookie.target == target)
2170          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2171          && (cdm->pos.cookie.device != NULL))
2172                 return(xptdevicetraverse(target,
2173                                         (struct cam_ed *)cdm->pos.cookie.device,
2174                                          xptedtdevicefunc, arg));
2175         else
2176                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2177 }
2178
2179 static int
2180 xptedtdevicefunc(struct cam_ed *device, void *arg)
2181 {
2182
2183         struct ccb_dev_match *cdm;
2184         dev_match_ret retval;
2185
2186         cdm = (struct ccb_dev_match *)arg;
2187
2188         /*
2189          * If our position is for something deeper in the tree, that means
2190          * that we've already seen this node.  So, we keep going down.
2191          */
2192         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2193          && (cdm->pos.cookie.device == device)
2194          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2195          && (cdm->pos.cookie.periph != NULL))
2196                 retval = DM_RET_DESCEND;
2197         else
2198                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2199                                         device);
2200
2201         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2202                 cdm->status = CAM_DEV_MATCH_ERROR;
2203                 return(0);
2204         }
2205
2206         /*
2207          * If the copy flag is set, copy this device out.
2208          */
2209         if (retval & DM_RET_COPY) {
2210                 int spaceleft, j;
2211
2212                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2213                         sizeof(struct dev_match_result));
2214
2215                 /*
2216                  * If we don't have enough space to put in another
2217                  * match result, save our position and tell the
2218                  * user there are more devices to check.
2219                  */
2220                 if (spaceleft < sizeof(struct dev_match_result)) {
2221                         bzero(&cdm->pos, sizeof(cdm->pos));
2222                         cdm->pos.position_type = 
2223                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2224                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2225
2226                         cdm->pos.cookie.bus = device->target->bus;
2227                         cdm->pos.generations[CAM_BUS_GENERATION]=
2228                                 bus_generation;
2229                         cdm->pos.cookie.target = device->target;
2230                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2231                                 device->target->bus->generation;
2232                         cdm->pos.cookie.device = device;
2233                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2234                                 device->target->generation;
2235                         cdm->status = CAM_DEV_MATCH_MORE;
2236                         return(0);
2237                 }
2238                 j = cdm->num_matches;
2239                 cdm->num_matches++;
2240                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2241                 cdm->matches[j].result.device_result.path_id =
2242                         device->target->bus->path_id;
2243                 cdm->matches[j].result.device_result.target_id =
2244                         device->target->target_id;
2245                 cdm->matches[j].result.device_result.target_lun =
2246                         device->lun_id;
2247                 bcopy(&device->inq_data,
2248                       &cdm->matches[j].result.device_result.inq_data,
2249                       sizeof(struct scsi_inquiry_data));
2250
2251                 /* Let the user know whether this device is unconfigured */
2252                 if (device->flags & CAM_DEV_UNCONFIGURED)
2253                         cdm->matches[j].result.device_result.flags =
2254                                 DEV_RESULT_UNCONFIGURED;
2255                 else
2256                         cdm->matches[j].result.device_result.flags =
2257                                 DEV_RESULT_NOFLAG;
2258         }
2259
2260         /*
2261          * If the user isn't interested in peripherals, don't descend
2262          * the tree any further.
2263          */
2264         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2265                 return(1);
2266
2267         /*
2268          * If there is a peripheral list generation recorded, make sure
2269          * it hasn't changed.
2270          */
2271         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2272          && (device->target->bus == cdm->pos.cookie.bus)
2273          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2274          && (device->target == cdm->pos.cookie.target)
2275          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2276          && (device == cdm->pos.cookie.device)
2277          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2278          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2279          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2280              device->generation)){
2281                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2282                 return(0);
2283         }
2284
2285         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2286          && (cdm->pos.cookie.bus == device->target->bus)
2287          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2288          && (cdm->pos.cookie.target == device->target)
2289          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2290          && (cdm->pos.cookie.device == device)
2291          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2292          && (cdm->pos.cookie.periph != NULL))
2293                 return(xptperiphtraverse(device,
2294                                 (struct cam_periph *)cdm->pos.cookie.periph,
2295                                 xptedtperiphfunc, arg));
2296         else
2297                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2298 }
2299
2300 static int
2301 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2302 {
2303         struct ccb_dev_match *cdm;
2304         dev_match_ret retval;
2305
2306         cdm = (struct ccb_dev_match *)arg;
2307
2308         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2309
2310         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2311                 cdm->status = CAM_DEV_MATCH_ERROR;
2312                 return(0);
2313         }
2314
2315         /*
2316          * If the copy flag is set, copy this peripheral out.
2317          */
2318         if (retval & DM_RET_COPY) {
2319                 int spaceleft, j;
2320
2321                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2322                         sizeof(struct dev_match_result));
2323
2324                 /*
2325                  * If we don't have enough space to put in another
2326                  * match result, save our position and tell the
2327                  * user there are more devices to check.
2328                  */
2329                 if (spaceleft < sizeof(struct dev_match_result)) {
2330                         bzero(&cdm->pos, sizeof(cdm->pos));
2331                         cdm->pos.position_type = 
2332                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2333                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2334                                 CAM_DEV_POS_PERIPH;
2335
2336                         cdm->pos.cookie.bus = periph->path->bus;
2337                         cdm->pos.generations[CAM_BUS_GENERATION]=
2338                                 bus_generation;
2339                         cdm->pos.cookie.target = periph->path->target;
2340                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2341                                 periph->path->bus->generation;
2342                         cdm->pos.cookie.device = periph->path->device;
2343                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2344                                 periph->path->target->generation;
2345                         cdm->pos.cookie.periph = periph;
2346                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2347                                 periph->path->device->generation;
2348                         cdm->status = CAM_DEV_MATCH_MORE;
2349                         return(0);
2350                 }
2351
2352                 j = cdm->num_matches;
2353                 cdm->num_matches++;
2354                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2355                 cdm->matches[j].result.periph_result.path_id =
2356                         periph->path->bus->path_id;
2357                 cdm->matches[j].result.periph_result.target_id =
2358                         periph->path->target->target_id;
2359                 cdm->matches[j].result.periph_result.target_lun =
2360                         periph->path->device->lun_id;
2361                 cdm->matches[j].result.periph_result.unit_number =
2362                         periph->unit_number;
2363                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2364                         periph->periph_name, DEV_IDLEN);
2365         }
2366
2367         return(1);
2368 }
2369
2370 static int
2371 xptedtmatch(struct ccb_dev_match *cdm)
2372 {
2373         int ret;
2374
2375         cdm->num_matches = 0;
2376
2377         /*
2378          * Check the bus list generation.  If it has changed, the user
2379          * needs to reset everything and start over.
2380          */
2381         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2382          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2383          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2384                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2385                 return(0);
2386         }
2387
2388         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2389          && (cdm->pos.cookie.bus != NULL))
2390                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2391                                      xptedtbusfunc, cdm);
2392         else
2393                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2394
2395         /*
2396          * If we get back 0, that means that we had to stop before fully
2397          * traversing the EDT.  It also means that one of the subroutines
2398          * has set the status field to the proper value.  If we get back 1,
2399          * we've fully traversed the EDT and copied out any matching entries.
2400          */
2401         if (ret == 1)
2402                 cdm->status = CAM_DEV_MATCH_LAST;
2403
2404         return(ret);
2405 }
2406
2407 static int
2408 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2409 {
2410         struct ccb_dev_match *cdm;
2411
2412         cdm = (struct ccb_dev_match *)arg;
2413
2414         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2415          && (cdm->pos.cookie.pdrv == pdrv)
2416          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2417          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2418          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2419              (*pdrv)->generation)) {
2420                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2421                 return(0);
2422         }
2423
2424         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2425          && (cdm->pos.cookie.pdrv == pdrv)
2426          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2427          && (cdm->pos.cookie.periph != NULL))
2428                 return(xptpdperiphtraverse(pdrv,
2429                                 (struct cam_periph *)cdm->pos.cookie.periph,
2430                                 xptplistperiphfunc, arg));
2431         else
2432                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2433 }
2434
2435 static int
2436 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2437 {
2438         struct ccb_dev_match *cdm;
2439         dev_match_ret retval;
2440
2441         cdm = (struct ccb_dev_match *)arg;
2442
2443         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2444
2445         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2446                 cdm->status = CAM_DEV_MATCH_ERROR;
2447                 return(0);
2448         }
2449
2450         /*
2451          * If the copy flag is set, copy this peripheral out.
2452          */
2453         if (retval & DM_RET_COPY) {
2454                 int spaceleft, j;
2455
2456                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2457                         sizeof(struct dev_match_result));
2458
2459                 /*
2460                  * If we don't have enough space to put in another
2461                  * match result, save our position and tell the
2462                  * user there are more devices to check.
2463                  */
2464                 if (spaceleft < sizeof(struct dev_match_result)) {
2465                         struct periph_driver **pdrv;
2466
2467                         pdrv = NULL;
2468                         bzero(&cdm->pos, sizeof(cdm->pos));
2469                         cdm->pos.position_type = 
2470                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2471                                 CAM_DEV_POS_PERIPH;
2472
2473                         /*
2474                          * This may look a bit non-sensical, but it is
2475                          * actually quite logical.  There are very few
2476                          * peripheral drivers, and bloating every peripheral
2477                          * structure with a pointer back to its parent
2478                          * peripheral driver linker set entry would cost
2479                          * more in the long run than doing this quick lookup.
2480                          */
2481                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2482                                 if (strcmp((*pdrv)->driver_name,
2483                                     periph->periph_name) == 0)
2484                                         break;
2485                         }
2486
2487                         if (*pdrv == NULL) {
2488                                 cdm->status = CAM_DEV_MATCH_ERROR;
2489                                 return(0);
2490                         }
2491
2492                         cdm->pos.cookie.pdrv = pdrv;
2493                         /*
2494                          * The periph generation slot does double duty, as
2495                          * does the periph pointer slot.  They are used for
2496                          * both edt and pdrv lookups and positioning.
2497                          */
2498                         cdm->pos.cookie.periph = periph;
2499                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2500                                 (*pdrv)->generation;
2501                         cdm->status = CAM_DEV_MATCH_MORE;
2502                         return(0);
2503                 }
2504
2505                 j = cdm->num_matches;
2506                 cdm->num_matches++;
2507                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2508                 cdm->matches[j].result.periph_result.path_id =
2509                         periph->path->bus->path_id;
2510
2511                 /*
2512                  * The transport layer peripheral doesn't have a target or
2513                  * lun.
2514                  */
2515                 if (periph->path->target)
2516                         cdm->matches[j].result.periph_result.target_id =
2517                                 periph->path->target->target_id;
2518                 else
2519                         cdm->matches[j].result.periph_result.target_id = -1;
2520
2521                 if (periph->path->device)
2522                         cdm->matches[j].result.periph_result.target_lun =
2523                                 periph->path->device->lun_id;
2524                 else
2525                         cdm->matches[j].result.periph_result.target_lun = -1;
2526
2527                 cdm->matches[j].result.periph_result.unit_number =
2528                         periph->unit_number;
2529                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2530                         periph->periph_name, DEV_IDLEN);
2531         }
2532
2533         return(1);
2534 }
2535
2536 static int
2537 xptperiphlistmatch(struct ccb_dev_match *cdm)
2538 {
2539         int ret;
2540
2541         cdm->num_matches = 0;
2542
2543         /*
2544          * At this point in the edt traversal function, we check the bus
2545          * list generation to make sure that no busses have been added or
2546          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2547          * For the peripheral driver list traversal function, however, we
2548          * don't have to worry about new peripheral driver types coming or
2549          * going; they're in a linker set, and therefore can't change
2550          * without a recompile.
2551          */
2552
2553         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2554          && (cdm->pos.cookie.pdrv != NULL))
2555                 ret = xptpdrvtraverse(
2556                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2557                                 xptplistpdrvfunc, cdm);
2558         else
2559                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2560
2561         /*
2562          * If we get back 0, that means that we had to stop before fully
2563          * traversing the peripheral driver tree.  It also means that one of
2564          * the subroutines has set the status field to the proper value.  If
2565          * we get back 1, we've fully traversed the EDT and copied out any
2566          * matching entries.
2567          */
2568         if (ret == 1)
2569                 cdm->status = CAM_DEV_MATCH_LAST;
2570
2571         return(ret);
2572 }
2573
2574 static int
2575 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2576 {
2577         struct cam_eb *bus, *next_bus;
2578         int retval;
2579
2580         retval = 1;
2581
2582         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2583              bus != NULL;
2584              bus = next_bus) {
2585                 next_bus = TAILQ_NEXT(bus, links);
2586
2587                 retval = tr_func(bus, arg);
2588                 if (retval == 0)
2589                         return(retval);
2590         }
2591
2592         return(retval);
2593 }
2594
2595 static int
2596 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2597                   xpt_targetfunc_t *tr_func, void *arg)
2598 {
2599         struct cam_et *target, *next_target;
2600         int retval;
2601
2602         retval = 1;
2603         for (target = (start_target ? start_target :
2604                        TAILQ_FIRST(&bus->et_entries));
2605              target != NULL; target = next_target) {
2606
2607                 next_target = TAILQ_NEXT(target, links);
2608
2609                 retval = tr_func(target, arg);
2610
2611                 if (retval == 0)
2612                         return(retval);
2613         }
2614
2615         return(retval);
2616 }
2617
2618 static int
2619 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2620                   xpt_devicefunc_t *tr_func, void *arg)
2621 {
2622         struct cam_ed *device, *next_device;
2623         int retval;
2624
2625         retval = 1;
2626         for (device = (start_device ? start_device :
2627                        TAILQ_FIRST(&target->ed_entries));
2628              device != NULL;
2629              device = next_device) {
2630
2631                 next_device = TAILQ_NEXT(device, links);
2632
2633                 retval = tr_func(device, arg);
2634
2635                 if (retval == 0)
2636                         return(retval);
2637         }
2638
2639         return(retval);
2640 }
2641
2642 static int
2643 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2644                   xpt_periphfunc_t *tr_func, void *arg)
2645 {
2646         struct cam_periph *periph, *next_periph;
2647         int retval;
2648
2649         retval = 1;
2650
2651         for (periph = (start_periph ? start_periph :
2652                        SLIST_FIRST(&device->periphs));
2653              periph != NULL;
2654              periph = next_periph) {
2655
2656                 next_periph = SLIST_NEXT(periph, periph_links);
2657
2658                 retval = tr_func(periph, arg);
2659                 if (retval == 0)
2660                         return(retval);
2661         }
2662
2663         return(retval);
2664 }
2665
2666 static int
2667 xptpdrvtraverse(struct periph_driver **start_pdrv,
2668                 xpt_pdrvfunc_t *tr_func, void *arg)
2669 {
2670         struct periph_driver **pdrv;
2671         int retval;
2672
2673         retval = 1;
2674
2675         /*
2676          * We don't traverse the peripheral driver list like we do the
2677          * other lists, because it is a linker set, and therefore cannot be
2678          * changed during runtime.  If the peripheral driver list is ever
2679          * re-done to be something other than a linker set (i.e. it can
2680          * change while the system is running), the list traversal should
2681          * be modified to work like the other traversal functions.
2682          */
2683         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2684              *pdrv != NULL; pdrv++) {
2685                 retval = tr_func(pdrv, arg);
2686
2687                 if (retval == 0)
2688                         return(retval);
2689         }
2690
2691         return(retval);
2692 }
2693
2694 static int
2695 xptpdperiphtraverse(struct periph_driver **pdrv,
2696                     struct cam_periph *start_periph,
2697                     xpt_periphfunc_t *tr_func, void *arg)
2698 {
2699         struct cam_periph *periph, *next_periph;
2700         int retval;
2701
2702         retval = 1;
2703
2704         for (periph = (start_periph ? start_periph :
2705              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2706              periph = next_periph) {
2707
2708                 next_periph = TAILQ_NEXT(periph, unit_links);
2709
2710                 retval = tr_func(periph, arg);
2711                 if (retval == 0)
2712                         return(retval);
2713         }
2714         return(retval);
2715 }
2716
2717 static int
2718 xptdefbusfunc(struct cam_eb *bus, void *arg)
2719 {
2720         struct xpt_traverse_config *tr_config;
2721
2722         tr_config = (struct xpt_traverse_config *)arg;
2723
2724         if (tr_config->depth == XPT_DEPTH_BUS) {
2725                 xpt_busfunc_t *tr_func;
2726
2727                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2728
2729                 return(tr_func(bus, tr_config->tr_arg));
2730         } else
2731                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2732 }
2733
2734 static int
2735 xptdeftargetfunc(struct cam_et *target, void *arg)
2736 {
2737         struct xpt_traverse_config *tr_config;
2738
2739         tr_config = (struct xpt_traverse_config *)arg;
2740
2741         if (tr_config->depth == XPT_DEPTH_TARGET) {
2742                 xpt_targetfunc_t *tr_func;
2743
2744                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2745
2746                 return(tr_func(target, tr_config->tr_arg));
2747         } else
2748                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2749 }
2750
2751 static int
2752 xptdefdevicefunc(struct cam_ed *device, void *arg)
2753 {
2754         struct xpt_traverse_config *tr_config;
2755
2756         tr_config = (struct xpt_traverse_config *)arg;
2757
2758         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2759                 xpt_devicefunc_t *tr_func;
2760
2761                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2762
2763                 return(tr_func(device, tr_config->tr_arg));
2764         } else
2765                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2766 }
2767
2768 static int
2769 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2770 {
2771         struct xpt_traverse_config *tr_config;
2772         xpt_periphfunc_t *tr_func;
2773
2774         tr_config = (struct xpt_traverse_config *)arg;
2775
2776         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2777
2778         /*
2779          * Unlike the other default functions, we don't check for depth
2780          * here.  The peripheral driver level is the last level in the EDT,
2781          * so if we're here, we should execute the function in question.
2782          */
2783         return(tr_func(periph, tr_config->tr_arg));
2784 }
2785
2786 /*
2787  * Execute the given function for every bus in the EDT.
2788  */
2789 static int
2790 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2791 {
2792         struct xpt_traverse_config tr_config;
2793
2794         tr_config.depth = XPT_DEPTH_BUS;
2795         tr_config.tr_func = tr_func;
2796         tr_config.tr_arg = arg;
2797
2798         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2799 }
2800
2801 #ifdef notusedyet
2802 /*
2803  * Execute the given function for every target in the EDT.
2804  */
2805 static int
2806 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2807 {
2808         struct xpt_traverse_config tr_config;
2809
2810         tr_config.depth = XPT_DEPTH_TARGET;
2811         tr_config.tr_func = tr_func;
2812         tr_config.tr_arg = arg;
2813
2814         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2815 }
2816 #endif /* notusedyet */
2817
2818 /*
2819  * Execute the given function for every device in the EDT.
2820  */
2821 static int
2822 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2823 {
2824         struct xpt_traverse_config tr_config;
2825
2826         tr_config.depth = XPT_DEPTH_DEVICE;
2827         tr_config.tr_func = tr_func;
2828         tr_config.tr_arg = arg;
2829
2830         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2831 }
2832
2833 #ifdef notusedyet
2834 /*
2835  * Execute the given function for every peripheral in the EDT.
2836  */
2837 static int
2838 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2839 {
2840         struct xpt_traverse_config tr_config;
2841
2842         tr_config.depth = XPT_DEPTH_PERIPH;
2843         tr_config.tr_func = tr_func;
2844         tr_config.tr_arg = arg;
2845
2846         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2847 }
2848 #endif /* notusedyet */
2849
2850 static int
2851 xptsetasyncfunc(struct cam_ed *device, void *arg)
2852 {
2853         struct cam_path path;
2854         struct ccb_getdev cgd;
2855         struct async_node *cur_entry;
2856
2857         cur_entry = (struct async_node *)arg;
2858
2859         /*
2860          * Don't report unconfigured devices (Wildcard devs,
2861          * devices only for target mode, device instances
2862          * that have been invalidated but are waiting for
2863          * their last reference count to be released).
2864          */
2865         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2866                 return (1);
2867
2868         xpt_compile_path(&path,
2869                          NULL,
2870                          device->target->bus->path_id,
2871                          device->target->target_id,
2872                          device->lun_id);
2873         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2874         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2875         xpt_action((union ccb *)&cgd);
2876         cur_entry->callback(cur_entry->callback_arg,
2877                             AC_FOUND_DEVICE,
2878                             &path, &cgd);
2879         xpt_release_path(&path);
2880
2881         return(1);
2882 }
2883
2884 static int
2885 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2886 {
2887         struct cam_path path;
2888         struct ccb_pathinq cpi;
2889         struct async_node *cur_entry;
2890
2891         cur_entry = (struct async_node *)arg;
2892
2893         xpt_compile_path(&path, /*periph*/NULL,
2894                          bus->sim->path_id,
2895                          CAM_TARGET_WILDCARD,
2896                          CAM_LUN_WILDCARD);
2897         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2898         cpi.ccb_h.func_code = XPT_PATH_INQ;
2899         xpt_action((union ccb *)&cpi);
2900         cur_entry->callback(cur_entry->callback_arg,
2901                             AC_PATH_REGISTERED,
2902                             &path, &cpi);
2903         xpt_release_path(&path);
2904
2905         return(1);
2906 }
2907
2908 void
2909 xpt_action(union ccb *start_ccb)
2910 {
2911         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2912
2913         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2914
2915         crit_enter();
2916
2917         switch (start_ccb->ccb_h.func_code) {
2918         case XPT_SCSI_IO:
2919         {
2920 #ifdef CAM_NEW_TRAN_CODE
2921                 struct cam_ed *device;
2922 #endif /* CAM_NEW_TRAN_CODE */
2923 #ifdef CAMDEBUG
2924                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2925                 struct cam_path *path;
2926
2927                 path = start_ccb->ccb_h.path;
2928 #endif
2929
2930                 /*
2931                  * For the sake of compatibility with SCSI-1
2932                  * devices that may not understand the identify
2933                  * message, we include lun information in the
2934                  * second byte of all commands.  SCSI-1 specifies
2935                  * that luns are a 3 bit value and reserves only 3
2936                  * bits for lun information in the CDB.  Later
2937                  * revisions of the SCSI spec allow for more than 8
2938                  * luns, but have deprecated lun information in the
2939                  * CDB.  So, if the lun won't fit, we must omit.
2940                  *
2941                  * Also be aware that during initial probing for devices,
2942                  * the inquiry information is unknown but initialized to 0.
2943                  * This means that this code will be exercised while probing
2944                  * devices with an ANSI revision greater than 2.
2945                  */
2946 #ifdef CAM_NEW_TRAN_CODE
2947                 device = start_ccb->ccb_h.path->device;
2948                 if (device->protocol_version <= SCSI_REV_2
2949 #else /* CAM_NEW_TRAN_CODE */
2950                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2951 #endif /* CAM_NEW_TRAN_CODE */
2952                  && start_ccb->ccb_h.target_lun < 8
2953                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2954
2955                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2956                             start_ccb->ccb_h.target_lun << 5;
2957                 }
2958                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2959                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2960                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2961                                        &path->device->inq_data),
2962                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2963                                           cdb_str, sizeof(cdb_str))));
2964                 /* FALLTHROUGH */
2965         }
2966         case XPT_TARGET_IO:
2967         case XPT_CONT_TARGET_IO:
2968                 start_ccb->csio.sense_resid = 0;
2969                 start_ccb->csio.resid = 0;
2970                 /* FALLTHROUGH */
2971         case XPT_RESET_DEV:
2972         case XPT_ENG_EXEC:
2973         {
2974                 struct cam_path *path;
2975                 int runq;
2976
2977                 path = start_ccb->ccb_h.path;
2978
2979                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2980                 if (path->device->qfrozen_cnt == 0)
2981                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
2982                 else
2983                         runq = 0;
2984                 if (runq != 0)
2985                         xpt_run_dev_sendq(path->bus);
2986                 break;
2987         }
2988         case XPT_SET_TRAN_SETTINGS:
2989         {
2990                 xpt_set_transfer_settings(&start_ccb->cts,
2991                                           start_ccb->ccb_h.path->device,
2992                                           /*async_update*/FALSE);
2993                 break;
2994         }
2995         case XPT_CALC_GEOMETRY:
2996         {
2997                 struct cam_sim *sim;
2998
2999                 /* Filter out garbage */
3000                 if (start_ccb->ccg.block_size == 0
3001                  || start_ccb->ccg.volume_size == 0) {
3002                         start_ccb->ccg.cylinders = 0;
3003                         start_ccb->ccg.heads = 0;
3004                         start_ccb->ccg.secs_per_track = 0;
3005                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3006                         break;
3007                 }
3008                 sim = start_ccb->ccb_h.path->bus->sim;
3009                 (*(sim->sim_action))(sim, start_ccb);
3010                 break;
3011         }
3012         case XPT_ABORT:
3013         {
3014                 union ccb* abort_ccb;
3015
3016                 abort_ccb = start_ccb->cab.abort_ccb;
3017                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3018
3019                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3020                                 struct cam_ccbq *ccbq;
3021
3022                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3023                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3024                                 abort_ccb->ccb_h.status =
3025                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3026                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3027                                 xpt_done(abort_ccb);
3028                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3029                                 break;
3030                         }
3031                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3032                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3033                                 /*
3034                                  * We've caught this ccb en route to
3035                                  * the SIM.  Flag it for abort and the
3036                                  * SIM will do so just before starting
3037                                  * real work on the CCB.
3038                                  */
3039                                 abort_ccb->ccb_h.status =
3040                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3041                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3042                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3043                                 break;
3044                         }
3045                 } 
3046                 if (XPT_FC_IS_QUEUED(abort_ccb)
3047                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3048                         /*
3049                          * It's already completed but waiting
3050                          * for our SWI to get to it.
3051                          */
3052                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3053                         break;
3054                 }
3055                 /*
3056                  * If we weren't able to take care of the abort request
3057                  * in the XPT, pass the request down to the SIM for processing.
3058                  */
3059                 /* FALLTHROUGH */
3060         }
3061         case XPT_ACCEPT_TARGET_IO:
3062         case XPT_EN_LUN:
3063         case XPT_IMMED_NOTIFY:
3064         case XPT_NOTIFY_ACK:
3065         case XPT_GET_TRAN_SETTINGS:
3066         case XPT_RESET_BUS:
3067         {
3068                 struct cam_sim *sim;
3069
3070                 sim = start_ccb->ccb_h.path->bus->sim;
3071                 (*(sim->sim_action))(sim, start_ccb);
3072                 break;
3073         }
3074         case XPT_PATH_INQ:
3075         {
3076                 struct cam_sim *sim;
3077
3078                 sim = start_ccb->ccb_h.path->bus->sim;
3079                 (*(sim->sim_action))(sim, start_ccb);
3080                 break;
3081         }
3082         case XPT_PATH_STATS:
3083                 start_ccb->cpis.last_reset =
3084                         start_ccb->ccb_h.path->bus->last_reset;
3085                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3086                 break;
3087         case XPT_GDEV_TYPE:
3088         {
3089                 struct cam_ed *dev;
3090
3091                 dev = start_ccb->ccb_h.path->device;
3092                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3093                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3094                 } else {
3095                         struct ccb_getdev *cgd;
3096                         struct cam_eb *bus;
3097                         struct cam_et *tar;
3098
3099                         cgd = &start_ccb->cgd;
3100                         bus = cgd->ccb_h.path->bus;
3101                         tar = cgd->ccb_h.path->target;
3102                         cgd->inq_data = dev->inq_data;
3103                         cgd->ccb_h.status = CAM_REQ_CMP;
3104                         cgd->serial_num_len = dev->serial_num_len;
3105                         if ((dev->serial_num_len > 0)
3106                          && (dev->serial_num != NULL))
3107                                 bcopy(dev->serial_num, cgd->serial_num,
3108                                       dev->serial_num_len);
3109                 }
3110                 break; 
3111         }
3112         case XPT_GDEV_STATS:
3113         {
3114                 struct cam_ed *dev;
3115
3116                 dev = start_ccb->ccb_h.path->device;
3117                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3118                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3119                 } else {
3120                         struct ccb_getdevstats *cgds;
3121                         struct cam_eb *bus;
3122                         struct cam_et *tar;
3123
3124                         cgds = &start_ccb->cgds;
3125                         bus = cgds->ccb_h.path->bus;
3126                         tar = cgds->ccb_h.path->target;
3127                         cgds->dev_openings = dev->ccbq.dev_openings;
3128                         cgds->dev_active = dev->ccbq.dev_active;
3129                         cgds->devq_openings = dev->ccbq.devq_openings;
3130                         cgds->devq_queued = dev->ccbq.queue.entries;
3131                         cgds->held = dev->ccbq.held;
3132                         cgds->last_reset = tar->last_reset;
3133                         cgds->maxtags = dev->quirk->maxtags;
3134                         cgds->mintags = dev->quirk->mintags;
3135                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3136                                 cgds->last_reset = bus->last_reset;
3137                         cgds->ccb_h.status = CAM_REQ_CMP;
3138                 }
3139                 break;
3140         }
3141         case XPT_GDEVLIST:
3142         {
3143                 struct cam_periph       *nperiph;
3144                 struct periph_list      *periph_head;
3145                 struct ccb_getdevlist   *cgdl;
3146                 u_int                   i;
3147                 struct cam_ed           *device;
3148                 int                     found;
3149
3150
3151                 found = 0;
3152
3153                 /*
3154                  * Don't want anyone mucking with our data.
3155                  */
3156                 device = start_ccb->ccb_h.path->device;
3157                 periph_head = &device->periphs;
3158                 cgdl = &start_ccb->cgdl;
3159
3160                 /*
3161                  * Check and see if the list has changed since the user
3162                  * last requested a list member.  If so, tell them that the
3163                  * list has changed, and therefore they need to start over 
3164                  * from the beginning.
3165                  */
3166                 if ((cgdl->index != 0) && 
3167                     (cgdl->generation != device->generation)) {
3168                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3169                         break;
3170                 }
3171
3172                 /*
3173                  * Traverse the list of peripherals and attempt to find 
3174                  * the requested peripheral.
3175                  */
3176                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3177                      (nperiph != NULL) && (i <= cgdl->index);
3178                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3179                         if (i == cgdl->index) {
3180                                 strncpy(cgdl->periph_name,
3181                                         nperiph->periph_name,
3182                                         DEV_IDLEN);
3183                                 cgdl->unit_number = nperiph->unit_number;
3184                                 found = 1;
3185                         }
3186                 }
3187                 if (found == 0) {
3188                         cgdl->status = CAM_GDEVLIST_ERROR;
3189                         break;
3190                 }
3191
3192                 if (nperiph == NULL)
3193                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3194                 else
3195                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3196
3197                 cgdl->index++;
3198                 cgdl->generation = device->generation;
3199
3200                 cgdl->ccb_h.status = CAM_REQ_CMP;
3201                 break;
3202         }
3203         case XPT_DEV_MATCH:
3204         {
3205                 dev_pos_type position_type;
3206                 struct ccb_dev_match *cdm;
3207                 int ret;
3208
3209                 cdm = &start_ccb->cdm;
3210
3211                 /*
3212                  * Prevent EDT changes while we traverse it.
3213                  */
3214                 /*
3215                  * There are two ways of getting at information in the EDT.
3216                  * The first way is via the primary EDT tree.  It starts
3217                  * with a list of busses, then a list of targets on a bus,
3218                  * then devices/luns on a target, and then peripherals on a
3219                  * device/lun.  The "other" way is by the peripheral driver
3220                  * lists.  The peripheral driver lists are organized by
3221                  * peripheral driver.  (obviously)  So it makes sense to
3222                  * use the peripheral driver list if the user is looking
3223                  * for something like "da1", or all "da" devices.  If the
3224                  * user is looking for something on a particular bus/target
3225                  * or lun, it's generally better to go through the EDT tree.
3226                  */
3227
3228                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3229                         position_type = cdm->pos.position_type;
3230                 else {
3231                         u_int i;
3232
3233                         position_type = CAM_DEV_POS_NONE;
3234
3235                         for (i = 0; i < cdm->num_patterns; i++) {
3236                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3237                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3238                                         position_type = CAM_DEV_POS_EDT;
3239                                         break;
3240                                 }
3241                         }
3242
3243                         if (cdm->num_patterns == 0)
3244                                 position_type = CAM_DEV_POS_EDT;
3245                         else if (position_type == CAM_DEV_POS_NONE)
3246                                 position_type = CAM_DEV_POS_PDRV;
3247                 }
3248
3249                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3250                 case CAM_DEV_POS_EDT:
3251                         ret = xptedtmatch(cdm);
3252                         break;
3253                 case CAM_DEV_POS_PDRV:
3254                         ret = xptperiphlistmatch(cdm);
3255                         break;
3256                 default:
3257                         cdm->status = CAM_DEV_MATCH_ERROR;
3258                         break;
3259                 }
3260
3261                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3262                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3263                 else
3264                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3265
3266                 break;
3267         }
3268         case XPT_SASYNC_CB:
3269         {
3270                 struct ccb_setasync *csa;
3271                 struct async_node *cur_entry;
3272                 struct async_list *async_head;
3273                 u_int32_t added;
3274
3275                 csa = &start_ccb->csa;
3276                 added = csa->event_enable;
3277                 async_head = &csa->ccb_h.path->device->asyncs;
3278
3279                 /*
3280                  * If there is already an entry for us, simply
3281                  * update it.
3282                  */
3283                 cur_entry = SLIST_FIRST(async_head);
3284                 while (cur_entry != NULL) {
3285                         if ((cur_entry->callback_arg == csa->callback_arg)
3286                          && (cur_entry->callback == csa->callback))
3287                                 break;
3288                         cur_entry = SLIST_NEXT(cur_entry, links);
3289                 }
3290
3291                 if (cur_entry != NULL) {
3292                         /*
3293                          * If the request has no flags set,
3294                          * remove the entry.
3295                          */
3296                         added &= ~cur_entry->event_enable;
3297                         if (csa->event_enable == 0) {
3298                                 SLIST_REMOVE(async_head, cur_entry,
3299                                              async_node, links);
3300                                 csa->ccb_h.path->device->refcount--;
3301                                 kfree(cur_entry, M_DEVBUF);
3302                         } else {
3303                                 cur_entry->event_enable = csa->event_enable;
3304                         }
3305                 } else {
3306                         cur_entry = kmalloc(sizeof(*cur_entry), 
3307                                             M_DEVBUF, M_INTWAIT);
3308                         cur_entry->event_enable = csa->event_enable;
3309                         cur_entry->callback_arg = csa->callback_arg;
3310                         cur_entry->callback = csa->callback;
3311                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3312                         csa->ccb_h.path->device->refcount++;
3313                 }
3314
3315                 if ((added & AC_FOUND_DEVICE) != 0) {
3316                         /*
3317                          * Get this peripheral up to date with all
3318                          * the currently existing devices.
3319                          */
3320                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3321                 }
3322                 if ((added & AC_PATH_REGISTERED) != 0) {
3323                         /*
3324                          * Get this peripheral up to date with all
3325                          * the currently existing busses.
3326                          */
3327                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3328                 }
3329                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3330                 break;
3331         }
3332         case XPT_REL_SIMQ:
3333         {
3334                 struct ccb_relsim *crs;
3335                 struct cam_ed *dev;
3336
3337                 crs = &start_ccb->crs;
3338                 dev = crs->ccb_h.path->device;
3339                 if (dev == NULL) {
3340
3341                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3342                         break;
3343                 }
3344
3345                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3346
3347                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3348
3349                                 /* Don't ever go below one opening */
3350                                 if (crs->openings > 0) {
3351                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3352                                                             crs->openings);
3353
3354                                         if (bootverbose) {
3355                                                 xpt_print_path(crs->ccb_h.path);
3356                                                 kprintf("tagged openings "
3357                                                        "now %d\n",
3358                                                        crs->openings);
3359                                         }
3360                                 }
3361                         }
3362                 }
3363
3364                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3365
3366                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3367
3368                                 /*
3369                                  * Just extend the old timeout and decrement
3370                                  * the freeze count so that a single timeout
3371                                  * is sufficient for releasing the queue.
3372                                  */
3373                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3374                                 callout_stop(&dev->c_handle);
3375                         } else {
3376
3377                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3378                         }
3379
3380                         callout_reset(&dev->c_handle,
3381                                       (crs->release_timeout * hz) / 1000, 
3382                                       xpt_release_devq_timeout, dev);
3383
3384                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3385
3386                 }
3387
3388                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3389
3390                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3391                                 /*
3392                                  * Decrement the freeze count so that a single
3393                                  * completion is still sufficient to unfreeze
3394                                  * the queue.
3395                                  */
3396                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3397                         } else {
3398                                 
3399                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3400                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3401                         }
3402                 }
3403
3404                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3405
3406                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3407                          || (dev->ccbq.dev_active == 0)) {
3408
3409                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3410                         } else {
3411                                 
3412                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3413                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3414                         }
3415                 }
3416                 
3417                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3418
3419                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3420                                          /*run_queue*/TRUE);
3421                 }
3422                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3423                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3424                 break;
3425         }
3426         case XPT_SCAN_BUS:
3427                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3428                 break;
3429         case XPT_SCAN_LUN:
3430                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3431                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3432                              start_ccb);
3433                 break;
3434         case XPT_DEBUG: {
3435 #ifdef CAMDEBUG
3436 #ifdef CAM_DEBUG_DELAY
3437                 cam_debug_delay = CAM_DEBUG_DELAY;
3438 #endif
3439                 cam_dflags = start_ccb->cdbg.flags;
3440                 if (cam_dpath != NULL) {
3441                         xpt_free_path(cam_dpath);
3442                         cam_dpath = NULL;
3443                 }
3444
3445                 if (cam_dflags != CAM_DEBUG_NONE) {
3446                         if (xpt_create_path(&cam_dpath, xpt_periph,
3447                                             start_ccb->ccb_h.path_id,
3448                                             start_ccb->ccb_h.target_id,
3449                                             start_ccb->ccb_h.target_lun) !=
3450                                             CAM_REQ_CMP) {
3451                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3452                                 cam_dflags = CAM_DEBUG_NONE;
3453                         } else {
3454                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3455                                 xpt_print_path(cam_dpath);
3456                                 kprintf("debugging flags now %x\n", cam_dflags);
3457                         }
3458                 } else {
3459                         cam_dpath = NULL;
3460                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3461                 }
3462 #else /* !CAMDEBUG */
3463                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3464 #endif /* CAMDEBUG */
3465                 break;
3466         }
3467         case XPT_NOOP:
3468                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3469                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3470                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3471                 break;
3472         default:
3473         case XPT_SDEV_TYPE:
3474         case XPT_TERM_IO:
3475         case XPT_ENG_INQ:
3476                 /* XXX Implement */
3477                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3478                 break;
3479         }
3480         crit_exit();
3481 }
3482
3483 void
3484 xpt_polled_action(union ccb *start_ccb)
3485 {
3486         u_int32_t timeout;
3487         struct    cam_sim *sim; 
3488         struct    cam_devq *devq;
3489         struct    cam_ed *dev;
3490
3491         timeout = start_ccb->ccb_h.timeout;
3492         sim = start_ccb->ccb_h.path->bus->sim;
3493         devq = sim->devq;
3494         dev = start_ccb->ccb_h.path->device;
3495
3496         crit_enter();
3497
3498         /*
3499          * Steal an opening so that no other queued requests
3500          * can get it before us while we simulate interrupts.
3501          */
3502         dev->ccbq.devq_openings--;
3503         dev->ccbq.dev_openings--;       
3504         
3505         while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3506            && (--timeout > 0)) {
3507                 DELAY(1000);
3508                 (*(sim->sim_poll))(sim);
3509                 swi_cambio(NULL, NULL);         
3510         }
3511         
3512         dev->ccbq.devq_openings++;
3513         dev->ccbq.dev_openings++;
3514         
3515         if (timeout != 0) {
3516                 xpt_action(start_ccb);
3517                 while(--timeout > 0) {
3518                         (*(sim->sim_poll))(sim);
3519                         swi_cambio(NULL, NULL);
3520                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3521                             != CAM_REQ_INPROG)
3522                                 break;
3523                         DELAY(1000);
3524                 }
3525                 if (timeout == 0) {
3526                         /*
3527                          * XXX Is it worth adding a sim_timeout entry
3528                          * point so we can attempt recovery?  If
3529                          * this is only used for dumps, I don't think
3530                          * it is.
3531                          */
3532                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3533                 }
3534         } else {
3535                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3536         }
3537         crit_exit();
3538 }
3539         
3540 /*
3541  * Schedule a peripheral driver to receive a ccb when it's
3542  * target device has space for more transactions.
3543  */
3544 void
3545 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3546 {
3547         struct cam_ed *device;
3548         int runq;
3549
3550         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3551         device = perph->path->device;
3552         crit_enter();
3553         if (periph_is_queued(perph)) {
3554                 /* Simply reorder based on new priority */
3555                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3556                           ("   change priority to %d\n", new_priority));
3557                 if (new_priority < perph->pinfo.priority) {
3558                         camq_change_priority(&device->drvq,
3559                                              perph->pinfo.index,
3560                                              new_priority);
3561                 }
3562                 runq = 0;
3563         } else {
3564                 /* New entry on the queue */
3565                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3566                           ("   added periph to queue\n"));
3567                 perph->pinfo.priority = new_priority;
3568                 perph->pinfo.generation = ++device->drvq.generation;
3569                 camq_insert(&device->drvq, &perph->pinfo);
3570                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3571         }
3572         crit_exit();
3573         if (runq != 0) {
3574                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3575                           ("   calling xpt_run_devq\n"));
3576                 xpt_run_dev_allocq(perph->path->bus);
3577         }
3578 }
3579
3580
3581 /*
3582  * Schedule a device to run on a given queue.
3583  * If the device was inserted as a new entry on the queue,
3584  * return 1 meaning the device queue should be run. If we
3585  * were already queued, implying someone else has already
3586  * started the queue, return 0 so the caller doesn't attempt
3587  * to run the queue.  Must be run in a critical section.
3588  */
3589 static int
3590 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3591                  u_int32_t new_priority)
3592 {
3593         int retval;
3594         u_int32_t old_priority;
3595
3596         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3597
3598         old_priority = pinfo->priority;
3599
3600         /*
3601          * Are we already queued?
3602          */
3603         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3604                 /* Simply reorder based on new priority */
3605                 if (new_priority < old_priority) {
3606                         camq_change_priority(queue, pinfo->index,
3607                                              new_priority);
3608                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3609                                         ("changed priority to %d\n",
3610                                          new_priority));
3611                 }
3612                 retval = 0;
3613         } else {
3614                 /* New entry on the queue */
3615                 if (new_priority < old_priority)
3616                         pinfo->priority = new_priority;
3617
3618                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3619                                 ("Inserting onto queue\n"));
3620                 pinfo->generation = ++queue->generation;
3621                 camq_insert(queue, pinfo);
3622                 retval = 1;
3623         }
3624         return (retval);
3625 }
3626
3627 static void
3628 xpt_run_dev_allocq(struct cam_eb *bus)
3629 {
3630         struct  cam_devq *devq;
3631
3632         if ((devq = bus->sim->devq) == NULL) {
3633                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3634                 return;
3635         }
3636         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3637
3638         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3639                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3640                          "openings == %d, active == %d\n",
3641                          devq->alloc_queue.qfrozen_cnt,
3642                          devq->alloc_queue.entries,
3643                          devq->alloc_openings,
3644                          devq->alloc_active));
3645
3646         crit_enter();
3647         devq->alloc_queue.qfrozen_cnt++;
3648         while ((devq->alloc_queue.entries > 0)
3649             && (devq->alloc_openings > 0)
3650             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3651                 struct  cam_ed_qinfo *qinfo;
3652                 struct  cam_ed *device;
3653                 union   ccb *work_ccb;
3654                 struct  cam_periph *drv;
3655                 struct  camq *drvq;
3656                 
3657                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3658                                                            CAMQ_HEAD);
3659                 device = qinfo->device;
3660
3661                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3662                                 ("running device %p\n", device));
3663
3664                 drvq = &device->drvq;
3665
3666 #ifdef CAMDEBUG
3667                 if (drvq->entries <= 0) {
3668                         panic("xpt_run_dev_allocq: "
3669                               "Device on queue without any work to do");
3670                 }
3671 #endif
3672                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3673                         devq->alloc_openings--;
3674                         devq->alloc_active++;
3675                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3676                         crit_exit();
3677                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3678                                       drv->pinfo.priority);
3679                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3680                                         ("calling periph start\n"));
3681                         drv->periph_start(drv, work_ccb);
3682                 } else {
3683                         /*
3684                          * Malloc failure in alloc_ccb
3685                          */
3686                         /*
3687                          * XXX add us to a list to be run from free_ccb
3688                          * if we don't have any ccbs active on this
3689                          * device queue otherwise we may never get run
3690                          * again.
3691                          */
3692                         break;
3693                 }
3694         
3695                 /* Raise IPL for possible insertion and test at top of loop */
3696                 crit_enter();
3697
3698                 if (drvq->entries > 0) {
3699                         /* We have more work.  Attempt to reschedule */
3700                         xpt_schedule_dev_allocq(bus, device);
3701                 }
3702         }
3703         devq->alloc_queue.qfrozen_cnt--;
3704         crit_exit();
3705 }
3706
3707 static void
3708 xpt_run_dev_sendq(struct cam_eb *bus)
3709 {
3710         struct  cam_devq *devq;
3711
3712         if ((devq = bus->sim->devq) == NULL) {
3713                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3714                 return;
3715         }
3716         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3717
3718         crit_enter();
3719         devq->send_queue.qfrozen_cnt++;
3720         while ((devq->send_queue.entries > 0)
3721             && (devq->send_openings > 0)) {
3722                 struct  cam_ed_qinfo *qinfo;
3723                 struct  cam_ed *device;
3724                 union ccb *work_ccb;
3725                 struct  cam_sim *sim;
3726
3727                 if (devq->send_queue.qfrozen_cnt > 1) {
3728                         break;
3729                 }
3730
3731                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3732                                                            CAMQ_HEAD);
3733                 device = qinfo->device;
3734
3735                 /*
3736                  * If the device has been "frozen", don't attempt
3737                  * to run it.
3738                  */
3739                 if (device->qfrozen_cnt > 0) {
3740                         continue;
3741                 }
3742
3743                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3744                                 ("running device %p\n", device));
3745
3746                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3747                 if (work_ccb == NULL) {
3748                         kprintf("device on run queue with no ccbs???\n");
3749                         continue;
3750                 }
3751
3752                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3753
3754                         if (num_highpower <= 0) {
3755                                 /*
3756                                  * We got a high power command, but we
3757                                  * don't have any available slots.  Freeze
3758                                  * the device queue until we have a slot
3759                                  * available.
3760                                  */
3761                                 device->qfrozen_cnt++;
3762                                 STAILQ_INSERT_TAIL(&highpowerq, 
3763                                                    &work_ccb->ccb_h, 
3764                                                    xpt_links.stqe);
3765
3766                                 continue;
3767                         } else {
3768                                 /*
3769                                  * Consume a high power slot while
3770                                  * this ccb runs.
3771                                  */
3772                                 num_highpower--;
3773                         }
3774                 }
3775                 devq->active_dev = device;
3776                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3777
3778                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3779
3780                 devq->send_openings--;
3781                 devq->send_active++;            
3782                 
3783                 if (device->ccbq.queue.entries > 0)
3784                         xpt_schedule_dev_sendq(bus, device);
3785
3786                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3787                         /*
3788                          * The client wants to freeze the queue
3789                          * after this CCB is sent.
3790                          */
3791                         device->qfrozen_cnt++;
3792                 }
3793
3794                 /* In Target mode, the peripheral driver knows best... */
3795                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3796                         if ((device->inq_flags & SID_CmdQue) != 0
3797                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3798                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3799                         else