2c71828083b8cd6c091ff2d18968efa297bc3214
[dragonfly.git] / sys / bus / cam / cam_xpt.c
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.45 2007/11/28 21:29:18 pavalos Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
48
49 #include <machine/clock.h>
50
51 #include "cam.h"
52 #include "cam_ccb.h"
53 #include "cam_periph.h"
54 #include "cam_sim.h"
55 #include "cam_xpt.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
59
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
63 #include "opt_cam.h"
64
65 /* Datastructures internal to the xpt layer */
66
67 /*
68  * Definition of an async handler callback block.  These are used to add
69  * SIMs and peripherals to the async callback lists.
70  */
71 struct async_node {
72         SLIST_ENTRY(async_node) links;
73         u_int32_t       event_enable;   /* Async Event enables */
74         void            (*callback)(void *arg, u_int32_t code,
75                                     struct cam_path *path, void *args);
76         void            *callback_arg;
77 };
78
79 SLIST_HEAD(async_list, async_node);
80 SLIST_HEAD(periph_list, cam_periph);
81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
82
83 /*
84  * This is the maximum number of high powered commands (e.g. start unit)
85  * that can be outstanding at a particular time.
86  */
87 #ifndef CAM_MAX_HIGHPOWER
88 #define CAM_MAX_HIGHPOWER  4
89 #endif
90
91 /* number of high powered commands that can go through right now */
92 static int num_highpower = CAM_MAX_HIGHPOWER;
93
94 /*
95  * Structure for queueing a device in a run queue.
96  * There is one run queue for allocating new ccbs,
97  * and another for sending ccbs to the controller.
98  */
99 struct cam_ed_qinfo {
100         cam_pinfo pinfo;
101         struct    cam_ed *device;
102 };
103
104 /*
105  * The CAM EDT (Existing Device Table) contains the device information for
106  * all devices for all busses in the system.  The table contains a
107  * cam_ed structure for each device on the bus.
108  */
109 struct cam_ed {
110         TAILQ_ENTRY(cam_ed) links;
111         struct  cam_ed_qinfo alloc_ccb_entry;
112         struct  cam_ed_qinfo send_ccb_entry;
113         struct  cam_et   *target;
114         lun_id_t         lun_id;
115         struct  camq drvq;              /*
116                                          * Queue of type drivers wanting to do
117                                          * work on this device.
118                                          */
119         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
120         struct  async_list asyncs;      /* Async callback info for this B/T/L */
121         struct  periph_list periphs;    /* All attached devices */
122         u_int   generation;             /* Generation number */
123         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
124         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
125                                         /* Storage for the inquiry data */
126 #ifdef CAM_NEW_TRAN_CODE
127         cam_proto        protocol;
128         u_int            protocol_version;
129         cam_xport        transport;
130         u_int            transport_version;
131 #endif /* CAM_NEW_TRAN_CODE */
132         struct           scsi_inquiry_data inq_data;
133         u_int8_t         inq_flags;     /*
134                                          * Current settings for inquiry flags.
135                                          * This allows us to override settings
136                                          * like disconnection and tagged
137                                          * queuing for a device.
138                                          */
139         u_int8_t         queue_flags;   /* Queue flags from the control page */
140         u_int8_t         serial_num_len;
141         u_int8_t        *serial_num;
142         u_int32_t        qfrozen_cnt;
143         u_int32_t        flags;
144 #define CAM_DEV_UNCONFIGURED            0x01
145 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
146 #define CAM_DEV_REL_ON_COMPLETE         0x04
147 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
148 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
149 #define CAM_DEV_TAG_AFTER_COUNT         0x20
150 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
151         u_int32_t        tag_delay_count;
152 #define CAM_TAG_DELAY_COUNT             5
153         u_int32_t        refcount;
154         struct           callout c_handle;
155 };
156
157 /*
158  * Each target is represented by an ET (Existing Target).  These
159  * entries are created when a target is successfully probed with an
160  * identify, and removed when a device fails to respond after a number
161  * of retries, or a bus rescan finds the device missing.
162  */
163 struct cam_et { 
164         TAILQ_HEAD(, cam_ed) ed_entries;
165         TAILQ_ENTRY(cam_et) links;
166         struct  cam_eb  *bus;   
167         target_id_t     target_id;
168         u_int32_t       refcount;       
169         u_int           generation;
170         struct          timeval last_reset;     /* uptime of last reset */
171 };
172
173 /*
174  * Each bus is represented by an EB (Existing Bus).  These entries
175  * are created by calls to xpt_bus_register and deleted by calls to
176  * xpt_bus_deregister.
177  */
178 struct cam_eb { 
179         TAILQ_HEAD(, cam_et) et_entries;
180         TAILQ_ENTRY(cam_eb)  links;
181         path_id_t            path_id;
182         struct cam_sim       *sim;
183         struct timeval       last_reset;        /* uptime of last reset */
184         u_int32_t            flags;
185 #define CAM_EB_RUNQ_SCHEDULED   0x01
186         u_int32_t            refcount;
187         u_int                generation;
188 };
189
190 struct cam_path {
191         struct cam_periph *periph;
192         struct cam_eb     *bus;
193         struct cam_et     *target;
194         struct cam_ed     *device;
195 };
196
197 struct xpt_quirk_entry {
198         struct scsi_inquiry_pattern inq_pat;
199         u_int8_t quirks;
200 #define CAM_QUIRK_NOLUNS        0x01
201 #define CAM_QUIRK_NOSERIAL      0x02
202 #define CAM_QUIRK_HILUNS        0x04
203         u_int mintags;
204         u_int maxtags;
205 };
206 #define CAM_SCSI2_MAXLUN        8
207
208 typedef enum {
209         XPT_FLAG_OPEN           = 0x01
210 } xpt_flags;
211
212 struct xpt_softc {
213         xpt_flags       flags;
214         u_int32_t       generation;
215 };
216
217 static const char quantum[] = "QUANTUM";
218 static const char sony[] = "SONY";
219 static const char west_digital[] = "WDIGTL";
220 static const char samsung[] = "SAMSUNG";
221 static const char seagate[] = "SEAGATE";
222 static const char microp[] = "MICROP";
223
224 static struct xpt_quirk_entry xpt_quirk_table[] = 
225 {
226         {
227                 /* Reports QUEUE FULL for temporary resource shortages */
228                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
229                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
230         },
231         {
232                 /* Reports QUEUE FULL for temporary resource shortages */
233                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
234                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
235         },
236         {
237                 /* Reports QUEUE FULL for temporary resource shortages */
238                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
239                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
240         },
241         {
242                 /* Broken tagged queuing drive */
243                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
244                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
245         },
246         {
247                 /* Broken tagged queuing drive */
248                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
249                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
250         },
251         {
252                 /* Broken tagged queuing drive */
253                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
254                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
255         },
256         {
257                 /*
258                  * Unfortunately, the Quantum Atlas III has the same
259                  * problem as the Atlas II drives above.
260                  * Reported by: "Johan Granlund" <johan@granlund.nu>
261                  *
262                  * For future reference, the drive with the problem was:
263                  * QUANTUM QM39100TD-SW N1B0
264                  * 
265                  * It's possible that Quantum will fix the problem in later
266                  * firmware revisions.  If that happens, the quirk entry
267                  * will need to be made specific to the firmware revisions
268                  * with the problem.
269                  * 
270                  */
271                 /* Reports QUEUE FULL for temporary resource shortages */
272                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
273                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
274         },
275         {
276                 /*
277                  * 18 Gig Atlas III, same problem as the 9G version.
278                  * Reported by: Andre Albsmeier
279                  *              <andre.albsmeier@mchp.siemens.de>
280                  *
281                  * For future reference, the drive with the problem was:
282                  * QUANTUM QM318000TD-S N491
283                  */
284                 /* Reports QUEUE FULL for temporary resource shortages */
285                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
286                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
287         },
288         {
289                 /*
290                  * Broken tagged queuing drive
291                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
292                  *         and: Martin Renters <martin@tdc.on.ca>
293                  */
294                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
295                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
296         },
297                 /*
298                  * The Seagate Medalist Pro drives have very poor write
299                  * performance with anything more than 2 tags.
300                  * 
301                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
302                  * Drive:  <SEAGATE ST36530N 1444>
303                  *
304                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
305                  * Drive:  <SEAGATE ST34520W 1281>
306                  *
307                  * No one has actually reported that the 9G version
308                  * (ST39140*) of the Medalist Pro has the same problem, but
309                  * we're assuming that it does because the 4G and 6.5G
310                  * versions of the drive are broken.
311                  */
312         {
313                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
314                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
315         },
316         {
317                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
318                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
319         },
320         {
321                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
322                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
323         },
324         {
325                 /*
326                  * Slow when tagged queueing is enabled.  Write performance
327                  * steadily drops off with more and more concurrent
328                  * transactions.  Best sequential write performance with
329                  * tagged queueing turned off and write caching turned on.
330                  *
331                  * PR:  kern/10398
332                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
333                  * Drive:  DCAS-34330 w/ "S65A" firmware.
334                  *
335                  * The drive with the problem had the "S65A" firmware
336                  * revision, and has also been reported (by Stephen J.
337                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
338                  * firmware revision.
339                  *
340                  * Although no one has reported problems with the 2 gig
341                  * version of the DCAS drive, the assumption is that it
342                  * has the same problems as the 4 gig version.  Therefore
343                  * this quirk entries disables tagged queueing for all
344                  * DCAS drives.
345                  */
346                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
347                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
348         },
349         {
350                 /* Broken tagged queuing drive */
351                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
352                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
353         },
354         {
355                 /* Broken tagged queuing drive */ 
356                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
357                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
358         },
359         {
360                 /*
361                  * Broken tagged queuing drive.
362                  * Submitted by:
363                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
364                  * in PR kern/9535
365                  */
366                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
367                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
368         },
369         {
370                 /*
371                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
372                  * 8MB/sec.)
373                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
374                  * Best performance with these drives is achieved with
375                  * tagged queueing turned off, and write caching turned on.
376                  */
377                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
378                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
379         },
380         {
381                 /*
382                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
383                  * 8MB/sec.)
384                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
385                  * Best performance with these drives is achieved with
386                  * tagged queueing turned off, and write caching turned on.
387                  */
388                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
389                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
390         },
391         {
392                 /*
393                  * Doesn't handle queue full condition correctly,
394                  * so we need to limit maxtags to what the device
395                  * can handle instead of determining this automatically.
396                  */
397                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
398                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
399         },
400         {
401                 /* Really only one LUN */
402                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
403                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
404         },
405         {
406                 /* I can't believe we need a quirk for DPT volumes. */
407                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
408                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
409                 /*mintags*/0, /*maxtags*/255
410         },
411         {
412                 /*
413                  * Many Sony CDROM drives don't like multi-LUN probing.
414                  */
415                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
416                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
417         },
418         {
419                 /*
420                  * This drive doesn't like multiple LUN probing.
421                  * Submitted by:  Parag Patel <parag@cgt.com>
422                  */
423                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
424                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
425         },
426         {
427                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
428                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
429         },
430         {
431                 /*
432                  * The 8200 doesn't like multi-lun probing, and probably
433                  * don't like serial number requests either.
434                  */
435                 {
436                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
437                         "EXB-8200*", "*"
438                 },
439                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
440         },
441         {
442                 /*
443                  * Let's try the same as above, but for a drive that says
444                  * it's an IPL-6860 but is actually an EXB 8200.
445                  */
446                 {
447                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
448                         "IPL-6860*", "*"
449                 },
450                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
451         },
452         {
453                 /*
454                  * These Hitachi drives don't like multi-lun probing.
455                  * The PR submitter has a DK319H, but says that the Linux
456                  * kernel has a similar work-around for the DK312 and DK314,
457                  * so all DK31* drives are quirked here.
458                  * PR:            misc/18793
459                  * Submitted by:  Paul Haddad <paul@pth.com>
460                  */
461                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
462                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
463         },
464         {
465                 /*
466                  * This old revision of the TDC3600 is also SCSI-1, and
467                  * hangs upon serial number probing.
468                  */
469                 {
470                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
471                         " TDC 3600", "U07:"
472                 },
473                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
474         },
475         {
476                 /*
477                  * Would repond to all LUNs if asked for.
478                  */
479                 {
480                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
481                         "CP150", "*"
482                 },
483                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
484         },
485         {
486                 /*
487                  * Would repond to all LUNs if asked for.
488                  */
489                 {
490                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
491                         "96X2*", "*"
492                 },
493                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
494         },
495         {
496                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
497                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
498                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
499         },
500         {
501                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
502                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
503                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
504         },
505         {
506                 /* TeraSolutions special settings for TRC-22 RAID */
507                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
508                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
509         },
510         {
511                 /* Veritas Storage Appliance */
512                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
513                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
514         },
515         {
516                 /*
517                  * Would respond to all LUNs.  Device type and removable
518                  * flag are jumper-selectable.
519                  */
520                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
521                   "Tahiti 1", "*"
522                 },
523                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
524         },
525         {
526                 /* Default tagged queuing parameters for all devices */
527                 {
528                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
529                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
530                 },
531                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
532         },
533 };
534
535 static const int xpt_quirk_table_size =
536         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
537
538 typedef enum {
539         DM_RET_COPY             = 0x01,
540         DM_RET_FLAG_MASK        = 0x0f,
541         DM_RET_NONE             = 0x00,
542         DM_RET_STOP             = 0x10,
543         DM_RET_DESCEND          = 0x20,
544         DM_RET_ERROR            = 0x30,
545         DM_RET_ACTION_MASK      = 0xf0
546 } dev_match_ret;
547
548 typedef enum {
549         XPT_DEPTH_BUS,
550         XPT_DEPTH_TARGET,
551         XPT_DEPTH_DEVICE,
552         XPT_DEPTH_PERIPH
553 } xpt_traverse_depth;
554
555 struct xpt_traverse_config {
556         xpt_traverse_depth      depth;
557         void                    *tr_func;
558         void                    *tr_arg;
559 };
560
561 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
562 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
563 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
564 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
565 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
566
567 /* Transport layer configuration information */
568 static struct xpt_softc xsoftc;
569
570 /* Queues for our software interrupt handler */
571 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
572 static cam_isrq_t cam_bioq;
573 static cam_isrq_t cam_netq;
574
575 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
576 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
577 static u_int xpt_max_ccbs;      /*
578                                  * Maximum size of ccb pool.  Modified as
579                                  * devices are added/removed or have their
580                                  * opening counts changed.
581                                  */
582 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
583
584 struct cam_periph *xpt_periph;
585
586 static periph_init_t xpt_periph_init;
587
588 static periph_init_t probe_periph_init;
589
590 static struct periph_driver xpt_driver =
591 {
592         xpt_periph_init, "xpt",
593         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
594 };
595
596 static struct periph_driver probe_driver =
597 {
598         probe_periph_init, "probe",
599         TAILQ_HEAD_INITIALIZER(probe_driver.units)
600 };
601
602 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
603 PERIPHDRIVER_DECLARE(probe, probe_driver);
604
605 #define XPT_CDEV_MAJOR 104
606
607 static d_open_t xptopen;
608 static d_close_t xptclose;
609 static d_ioctl_t xptioctl;
610
611 static struct dev_ops xpt_ops = {
612         { "xpt", XPT_CDEV_MAJOR, 0 },
613         .d_open = xptopen,
614         .d_close = xptclose,
615         .d_ioctl = xptioctl
616 };
617
618 static struct intr_config_hook *xpt_config_hook;
619
620 /* Registered busses */
621 static TAILQ_HEAD(,cam_eb) xpt_busses;
622 static u_int bus_generation;
623
624 /* Storage for debugging datastructures */
625 #ifdef  CAMDEBUG
626 struct cam_path *cam_dpath;
627 u_int32_t cam_dflags;
628 u_int32_t cam_debug_delay;
629 #endif
630
631 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
632 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
633 #endif
634
635 /*
636  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
637  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
638  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
639  */
640 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
641     || defined(CAM_DEBUG_LUN)
642 #ifdef CAMDEBUG
643 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
644     || !defined(CAM_DEBUG_LUN)
645 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
646         and CAM_DEBUG_LUN"
647 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
648 #else /* !CAMDEBUG */
649 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
650 #endif /* CAMDEBUG */
651 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
652
653 /* Our boot-time initialization hook */
654 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
655
656 static moduledata_t cam_moduledata = {
657         "cam",
658         cam_module_event_handler,
659         NULL
660 };
661
662 static void     xpt_init(void *);
663
664 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
665 MODULE_VERSION(cam, 1);
666
667
668 static cam_status       xpt_compile_path(struct cam_path *new_path,
669                                          struct cam_periph *perph,
670                                          path_id_t path_id,
671                                          target_id_t target_id,
672                                          lun_id_t lun_id);
673
674 static void             xpt_release_path(struct cam_path *path);
675
676 static void             xpt_async_bcast(struct async_list *async_head,
677                                         u_int32_t async_code,
678                                         struct cam_path *path,
679                                         void *async_arg);
680 static void             xpt_dev_async(u_int32_t async_code,
681                                       struct cam_eb *bus,
682                                       struct cam_et *target,
683                                       struct cam_ed *device,
684                                       void *async_arg);
685 static path_id_t xptnextfreepathid(void);
686 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
687 static union ccb *xpt_get_ccb(struct cam_ed *device);
688 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
689                                   u_int32_t new_priority);
690 static void      xpt_run_dev_allocq(struct cam_eb *bus);
691 static void      xpt_run_dev_sendq(struct cam_eb *bus);
692 static timeout_t xpt_release_devq_timeout;
693 static void      xpt_release_bus(struct cam_eb *bus);
694 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
695                                          int run_queue);
696 static struct cam_et*
697                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
698 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
699 static struct cam_ed*
700                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
701                                   lun_id_t lun_id);
702 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
703                                     struct cam_ed *device);
704 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
705 static struct cam_eb*
706                  xpt_find_bus(path_id_t path_id);
707 static struct cam_et*
708                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
709 static struct cam_ed*
710                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
711 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
712 static void      xpt_scan_lun(struct cam_periph *periph,
713                               struct cam_path *path, cam_flags flags,
714                               union ccb *ccb);
715 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
716 static xpt_busfunc_t    xptconfigbuscountfunc;
717 static xpt_busfunc_t    xptconfigfunc;
718 static void      xpt_config(void *arg);
719 static xpt_devicefunc_t xptpassannouncefunc;
720 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
721 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
722 static void      xptpoll(struct cam_sim *sim);
723 static inthand2_t swi_camnet;
724 static inthand2_t swi_cambio;
725 static void      camisr(cam_isrq_t *queue);
726 #if 0
727 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
728 static void      xptasync(struct cam_periph *periph,
729                           u_int32_t code, cam_path *path);
730 #endif
731 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
732                                     u_int num_patterns, struct cam_eb *bus);
733 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
734                                        u_int num_patterns,
735                                        struct cam_ed *device);
736 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
737                                        u_int num_patterns,
738                                        struct cam_periph *periph);
739 static xpt_busfunc_t    xptedtbusfunc;
740 static xpt_targetfunc_t xptedttargetfunc;
741 static xpt_devicefunc_t xptedtdevicefunc;
742 static xpt_periphfunc_t xptedtperiphfunc;
743 static xpt_pdrvfunc_t   xptplistpdrvfunc;
744 static xpt_periphfunc_t xptplistperiphfunc;
745 static int              xptedtmatch(struct ccb_dev_match *cdm);
746 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
747 static int              xptbustraverse(struct cam_eb *start_bus,
748                                        xpt_busfunc_t *tr_func, void *arg);
749 static int              xpttargettraverse(struct cam_eb *bus,
750                                           struct cam_et *start_target,
751                                           xpt_targetfunc_t *tr_func, void *arg);
752 static int              xptdevicetraverse(struct cam_et *target,
753                                           struct cam_ed *start_device,
754                                           xpt_devicefunc_t *tr_func, void *arg);
755 static int              xptperiphtraverse(struct cam_ed *device,
756                                           struct cam_periph *start_periph,
757                                           xpt_periphfunc_t *tr_func, void *arg);
758 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
759                                         xpt_pdrvfunc_t *tr_func, void *arg);
760 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
761                                             struct cam_periph *start_periph,
762                                             xpt_periphfunc_t *tr_func,
763                                             void *arg);
764 static xpt_busfunc_t    xptdefbusfunc;
765 static xpt_targetfunc_t xptdeftargetfunc;
766 static xpt_devicefunc_t xptdefdevicefunc;
767 static xpt_periphfunc_t xptdefperiphfunc;
768 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
769 #ifdef notusedyet
770 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
771                                             void *arg);
772 #endif
773 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
774                                             void *arg);
775 #ifdef notusedyet
776 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
777                                             void *arg);
778 #endif
779 static xpt_devicefunc_t xptsetasyncfunc;
780 static xpt_busfunc_t    xptsetasyncbusfunc;
781 static cam_status       xptregister(struct cam_periph *periph,
782                                     void *arg);
783 static cam_status       proberegister(struct cam_periph *periph,
784                                       void *arg);
785 static void      probeschedule(struct cam_periph *probe_periph);
786 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
787 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
788 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
789 static void      probecleanup(struct cam_periph *periph);
790 static void      xpt_find_quirk(struct cam_ed *device);
791 #ifdef CAM_NEW_TRAN_CODE
792 static void      xpt_devise_transport(struct cam_path *path);
793 #endif /* CAM_NEW_TRAN_CODE */
794 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
795                                            struct cam_ed *device,
796                                            int async_update);
797 static void      xpt_toggle_tags(struct cam_path *path);
798 static void      xpt_start_tags(struct cam_path *path);
799 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
800                                             struct cam_ed *dev);
801 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
802                                            struct cam_ed *dev);
803 static __inline int periph_is_queued(struct cam_periph *periph);
804 static __inline int device_is_alloc_queued(struct cam_ed *device);
805 static __inline int device_is_send_queued(struct cam_ed *device);
806 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
807
808 static __inline int
809 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
810 {
811         int retval;
812
813         if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
814                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
815                         cam_ccbq_resize(&dev->ccbq,
816                                         dev->ccbq.dev_openings
817                                         + dev->ccbq.dev_active);
818                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
819                 }
820                 /*
821                  * The priority of a device waiting for CCB resources
822                  * is that of the the highest priority peripheral driver
823                  * enqueued.
824                  */
825                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
826                                           &dev->alloc_ccb_entry.pinfo,
827                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
828         } else {
829                 retval = 0;
830         }
831
832         return (retval);
833 }
834
835 static __inline int
836 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
837 {
838         int     retval;
839
840         if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
841                 /*
842                  * The priority of a device waiting for controller
843                  * resources is that of the the highest priority CCB
844                  * enqueued.
845                  */
846                 retval =
847                     xpt_schedule_dev(&bus->sim->devq->send_queue,
848                                      &dev->send_ccb_entry.pinfo,
849                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
850         } else {
851                 retval = 0;
852         }
853         return (retval);
854 }
855
856 static __inline int
857 periph_is_queued(struct cam_periph *periph)
858 {
859         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
860 }
861
862 static __inline int
863 device_is_alloc_queued(struct cam_ed *device)
864 {
865         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
866 }
867
868 static __inline int
869 device_is_send_queued(struct cam_ed *device)
870 {
871         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
872 }
873
874 static __inline int
875 dev_allocq_is_runnable(struct cam_devq *devq)
876 {
877         /*
878          * Have work to do.
879          * Have space to do more work.
880          * Allowed to do work.
881          */
882         return ((devq->alloc_queue.qfrozen_cnt == 0)
883              && (devq->alloc_queue.entries > 0)
884              && (devq->alloc_openings > 0));
885 }
886
887 static void
888 xpt_periph_init(void)
889 {
890         dev_ops_add(&xpt_ops, 0, 0);
891         make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
892 }
893
894 static void
895 probe_periph_init(void)
896 {
897 }
898
899
900 static void
901 xptdone(struct cam_periph *periph, union ccb *done_ccb)
902 {
903         /* Caller will release the CCB */
904         wakeup(&done_ccb->ccb_h.cbfcnp);
905 }
906
907 static int
908 xptopen(struct dev_open_args *ap)
909 {
910         cdev_t dev = ap->a_head.a_dev;
911         int unit;
912
913         unit = minor(dev) & 0xff;
914
915         /*
916          * Only allow read-write access.
917          */
918         if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
919                 return(EPERM);
920
921         /*
922          * We don't allow nonblocking access.
923          */
924         if ((ap->a_oflags & O_NONBLOCK) != 0) {
925                 kprintf("xpt%d: can't do nonblocking access\n", unit);
926                 return(ENODEV);
927         }
928
929         /*
930          * We only have one transport layer right now.  If someone accesses
931          * us via something other than minor number 1, point out their
932          * mistake.
933          */
934         if (unit != 0) {
935                 kprintf("xptopen: got invalid xpt unit %d\n", unit);
936                 return(ENXIO);
937         }
938
939         /* Mark ourselves open */
940         xsoftc.flags |= XPT_FLAG_OPEN;
941         
942         return(0);
943 }
944
945 static int
946 xptclose(struct dev_close_args *ap)
947 {
948         cdev_t dev = ap->a_head.a_dev;
949         int unit;
950
951         unit = minor(dev) & 0xff;
952
953         /*
954          * We only have one transport layer right now.  If someone accesses
955          * us via something other than minor number 1, point out their
956          * mistake.
957          */
958         if (unit != 0) {
959                 kprintf("xptclose: got invalid xpt unit %d\n", unit);
960                 return(ENXIO);
961         }
962
963         /* Mark ourselves closed */
964         xsoftc.flags &= ~XPT_FLAG_OPEN;
965
966         return(0);
967 }
968
969 static int
970 xptioctl(struct dev_ioctl_args *ap)
971 {
972         cdev_t dev = ap->a_head.a_dev;
973         int unit, error;
974
975         error = 0;
976         unit = minor(dev) & 0xff;
977
978         /*
979          * We only have one transport layer right now.  If someone accesses
980          * us via something other than minor number 1, point out their
981          * mistake.
982          */
983         if (unit != 0) {
984                 kprintf("xptioctl: got invalid xpt unit %d\n", unit);
985                 return(ENXIO);
986         }
987
988         switch(ap->a_cmd) {
989         /*
990          * For the transport layer CAMIOCOMMAND ioctl, we really only want
991          * to accept CCB types that don't quite make sense to send through a
992          * passthrough driver.
993          */
994         case CAMIOCOMMAND: {
995                 union ccb *ccb;
996                 union ccb *inccb;
997
998                 inccb = (union ccb *)ap->a_data;
999
1000                 switch(inccb->ccb_h.func_code) {
1001                 case XPT_SCAN_BUS:
1002                 case XPT_RESET_BUS:
1003                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1004                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1005                                 error = EINVAL;
1006                                 break;
1007                         }
1008                         /* FALLTHROUGH */
1009                 case XPT_PATH_INQ:
1010                 case XPT_ENG_INQ:
1011                 case XPT_SCAN_LUN:
1012
1013                         ccb = xpt_alloc_ccb();
1014
1015                         /*
1016                          * Create a path using the bus, target, and lun the
1017                          * user passed in.
1018                          */
1019                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1020                                             inccb->ccb_h.path_id,
1021                                             inccb->ccb_h.target_id,
1022                                             inccb->ccb_h.target_lun) !=
1023                                             CAM_REQ_CMP){
1024                                 error = EINVAL;
1025                                 xpt_free_ccb(ccb);
1026                                 break;
1027                         }
1028                         /* Ensure all of our fields are correct */
1029                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1030                                       inccb->ccb_h.pinfo.priority);
1031                         xpt_merge_ccb(ccb, inccb);
1032                         ccb->ccb_h.cbfcnp = xptdone;
1033                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1034                         bcopy(ccb, inccb, sizeof(union ccb));
1035                         xpt_free_path(ccb->ccb_h.path);
1036                         xpt_free_ccb(ccb);
1037                         break;
1038
1039                 case XPT_DEBUG: {
1040                         union ccb ccb;
1041
1042                         /*
1043                          * This is an immediate CCB, so it's okay to
1044                          * allocate it on the stack.
1045                          */
1046
1047                         /*
1048                          * Create a path using the bus, target, and lun the
1049                          * user passed in.
1050                          */
1051                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1052                                             inccb->ccb_h.path_id,
1053                                             inccb->ccb_h.target_id,
1054                                             inccb->ccb_h.target_lun) !=
1055                                             CAM_REQ_CMP){
1056                                 error = EINVAL;
1057                                 break;
1058                         }
1059                         /* Ensure all of our fields are correct */
1060                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1061                                       inccb->ccb_h.pinfo.priority);
1062                         xpt_merge_ccb(&ccb, inccb);
1063                         ccb.ccb_h.cbfcnp = xptdone;
1064                         xpt_action(&ccb);
1065                         bcopy(&ccb, inccb, sizeof(union ccb));
1066                         xpt_free_path(ccb.ccb_h.path);
1067                         break;
1068
1069                 }
1070                 case XPT_DEV_MATCH: {
1071                         struct cam_periph_map_info mapinfo;
1072                         struct cam_path *old_path;
1073
1074                         /*
1075                          * We can't deal with physical addresses for this
1076                          * type of transaction.
1077                          */
1078                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1079                                 error = EINVAL;
1080                                 break;
1081                         }
1082
1083                         /*
1084                          * Save this in case the caller had it set to
1085                          * something in particular.
1086                          */
1087                         old_path = inccb->ccb_h.path;
1088
1089                         /*
1090                          * We really don't need a path for the matching
1091                          * code.  The path is needed because of the
1092                          * debugging statements in xpt_action().  They
1093                          * assume that the CCB has a valid path.
1094                          */
1095                         inccb->ccb_h.path = xpt_periph->path;
1096
1097                         bzero(&mapinfo, sizeof(mapinfo));
1098
1099                         /*
1100                          * Map the pattern and match buffers into kernel
1101                          * virtual address space.
1102                          */
1103                         error = cam_periph_mapmem(inccb, &mapinfo);
1104
1105                         if (error) {
1106                                 inccb->ccb_h.path = old_path;
1107                                 break;
1108                         }
1109
1110                         /*
1111                          * This is an immediate CCB, we can send it on directly.
1112                          */
1113                         xpt_action(inccb);
1114
1115                         /*
1116                          * Map the buffers back into user space.
1117                          */
1118                         cam_periph_unmapmem(inccb, &mapinfo);
1119
1120                         inccb->ccb_h.path = old_path;
1121
1122                         error = 0;
1123                         break;
1124                 }
1125                 default:
1126                         error = ENOTSUP;
1127                         break;
1128                 }
1129                 break;
1130         }
1131         /*
1132          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1133          * with the periphal driver name and unit name filled in.  The other
1134          * fields don't really matter as input.  The passthrough driver name
1135          * ("pass"), and unit number are passed back in the ccb.  The current
1136          * device generation number, and the index into the device peripheral
1137          * driver list, and the status are also passed back.  Note that
1138          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1139          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1140          * (or rather should be) impossible for the device peripheral driver
1141          * list to change since we look at the whole thing in one pass, and
1142          * we do it within a critical section.
1143          * 
1144          */
1145         case CAMGETPASSTHRU: {
1146                 union ccb *ccb;
1147                 struct cam_periph *periph;
1148                 struct periph_driver **p_drv;
1149                 char   *name;
1150                 u_int unit;
1151                 u_int cur_generation;
1152                 int base_periph_found;
1153                 int splbreaknum;
1154
1155                 ccb = (union ccb *)ap->a_data;
1156                 unit = ccb->cgdl.unit_number;
1157                 name = ccb->cgdl.periph_name;
1158                 /*
1159                  * Every 100 devices, we want to call splz() to check for
1160                  * and allow the software interrupt handler a chance to run.
1161                  *
1162                  * Most systems won't run into this check, but this should
1163                  * avoid starvation in the software interrupt handler in
1164                  * large systems.
1165                  */
1166                 splbreaknum = 100;
1167
1168                 ccb = (union ccb *)ap->a_data;
1169
1170                 base_periph_found = 0;
1171
1172                 /*
1173                  * Sanity check -- make sure we don't get a null peripheral
1174                  * driver name.
1175                  */
1176                 if (*ccb->cgdl.periph_name == '\0') {
1177                         error = EINVAL;
1178                         break;
1179                 }
1180
1181                 /* Keep the list from changing while we traverse it */
1182                 crit_enter();
1183 ptstartover:
1184                 cur_generation = xsoftc.generation;
1185
1186                 /* first find our driver in the list of drivers */
1187                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1188                         if (strcmp((*p_drv)->driver_name, name) == 0)
1189                                 break;
1190                 }
1191
1192                 if (*p_drv == NULL) {
1193                         crit_exit();
1194                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1195                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1196                         *ccb->cgdl.periph_name = '\0';
1197                         ccb->cgdl.unit_number = 0;
1198                         error = ENOENT;
1199                         break;
1200                 }       
1201
1202                 /*
1203                  * Run through every peripheral instance of this driver
1204                  * and check to see whether it matches the unit passed
1205                  * in by the user.  If it does, get out of the loops and
1206                  * find the passthrough driver associated with that
1207                  * peripheral driver.
1208                  */
1209                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1210
1211                         if (periph->unit_number == unit) {
1212                                 break;
1213                         } else if (--splbreaknum == 0) {
1214                                 splz();
1215                                 splbreaknum = 100;
1216                                 if (cur_generation != xsoftc.generation)
1217                                        goto ptstartover;
1218                         }
1219                 }
1220                 /*
1221                  * If we found the peripheral driver that the user passed
1222                  * in, go through all of the peripheral drivers for that
1223                  * particular device and look for a passthrough driver.
1224                  */
1225                 if (periph != NULL) {
1226                         struct cam_ed *device;
1227                         int i;
1228
1229                         base_periph_found = 1;
1230                         device = periph->path->device;
1231                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1232                              periph != NULL;
1233                              periph = SLIST_NEXT(periph, periph_links), i++) {
1234                                 /*
1235                                  * Check to see whether we have a
1236                                  * passthrough device or not. 
1237                                  */
1238                                 if (strcmp(periph->periph_name, "pass") == 0) {
1239                                         /*
1240                                          * Fill in the getdevlist fields.
1241                                          */
1242                                         strcpy(ccb->cgdl.periph_name,
1243                                                periph->periph_name);
1244                                         ccb->cgdl.unit_number =
1245                                                 periph->unit_number;
1246                                         if (SLIST_NEXT(periph, periph_links))
1247                                                 ccb->cgdl.status =
1248                                                         CAM_GDEVLIST_MORE_DEVS;
1249                                         else
1250                                                 ccb->cgdl.status =
1251                                                        CAM_GDEVLIST_LAST_DEVICE;
1252                                         ccb->cgdl.generation =
1253                                                 device->generation;
1254                                         ccb->cgdl.index = i;
1255                                         /*
1256                                          * Fill in some CCB header fields
1257                                          * that the user may want.
1258                                          */
1259                                         ccb->ccb_h.path_id =
1260                                                 periph->path->bus->path_id;
1261                                         ccb->ccb_h.target_id =
1262                                                 periph->path->target->target_id;
1263                                         ccb->ccb_h.target_lun =
1264                                                 periph->path->device->lun_id;
1265                                         ccb->ccb_h.status = CAM_REQ_CMP;
1266                                         break;
1267                                 }
1268                         }
1269                 }
1270
1271                 /*
1272                  * If the periph is null here, one of two things has
1273                  * happened.  The first possibility is that we couldn't
1274                  * find the unit number of the particular peripheral driver
1275                  * that the user is asking about.  e.g. the user asks for
1276                  * the passthrough driver for "da11".  We find the list of
1277                  * "da" peripherals all right, but there is no unit 11.
1278                  * The other possibility is that we went through the list
1279                  * of peripheral drivers attached to the device structure,
1280                  * but didn't find one with the name "pass".  Either way,
1281                  * we return ENOENT, since we couldn't find something.
1282                  */
1283                 if (periph == NULL) {
1284                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1285                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1286                         *ccb->cgdl.periph_name = '\0';
1287                         ccb->cgdl.unit_number = 0;
1288                         error = ENOENT;
1289                         /*
1290                          * It is unfortunate that this is even necessary,
1291                          * but there are many, many clueless users out there.
1292                          * If this is true, the user is looking for the
1293                          * passthrough driver, but doesn't have one in his
1294                          * kernel.
1295                          */
1296                         if (base_periph_found == 1) {
1297                                 kprintf("xptioctl: pass driver is not in the "
1298                                        "kernel\n");
1299                                 kprintf("xptioctl: put \"device pass0\" in "
1300                                        "your kernel config file\n");
1301                         }
1302                 }
1303                 crit_exit();
1304                 break;
1305                 }
1306         default:
1307                 error = ENOTTY;
1308                 break;
1309         }
1310
1311         return(error);
1312 }
1313
1314 static int
1315 cam_module_event_handler(module_t mod, int what, void *arg)
1316 {
1317         if (what == MOD_LOAD) {
1318                 xpt_init(NULL);
1319         } else if (what == MOD_UNLOAD) {
1320                 return EBUSY;
1321         } else {
1322                 return EOPNOTSUPP;
1323         }
1324
1325         return 0;
1326 }
1327
1328 /* Functions accessed by the peripheral drivers */
1329 static void
1330 xpt_init(void *dummy)
1331 {
1332         struct cam_sim *xpt_sim;
1333         struct cam_path *path;
1334         struct cam_devq *devq;
1335         cam_status status;
1336
1337         TAILQ_INIT(&xpt_busses);
1338         TAILQ_INIT(&cam_bioq);
1339         TAILQ_INIT(&cam_netq);
1340         SLIST_INIT(&ccb_freeq);
1341         STAILQ_INIT(&highpowerq);
1342
1343         /*
1344          * The xpt layer is, itself, the equivelent of a SIM.
1345          * Allow 16 ccbs in the ccb pool for it.  This should
1346          * give decent parallelism when we probe busses and
1347          * perform other XPT functions.
1348          */
1349         devq = cam_simq_alloc(16);
1350         xpt_sim = cam_sim_alloc(xptaction,
1351                                 xptpoll,
1352                                 "xpt",
1353                                 /*softc*/NULL,
1354                                 /*unit*/0,
1355                                 /*max_dev_transactions*/0,
1356                                 /*max_tagged_dev_transactions*/0,
1357                                 devq);
1358         cam_simq_release(devq);
1359         xpt_max_ccbs = 16;
1360                                 
1361         xpt_bus_register(xpt_sim, /*bus #*/0);
1362
1363         /*
1364          * Looking at the XPT from the SIM layer, the XPT is
1365          * the equivelent of a peripheral driver.  Allocate
1366          * a peripheral driver entry for us.
1367          */
1368         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1369                                       CAM_TARGET_WILDCARD,
1370                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1371                 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1372                        " failing attach\n", status);
1373                 return;
1374         }
1375
1376         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1377                          path, NULL, 0, NULL);
1378         xpt_free_path(path);
1379
1380         xpt_sim->softc = xpt_periph;
1381
1382         /*
1383          * Register a callback for when interrupts are enabled.
1384          */
1385         xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1386                                   M_TEMP, M_INTWAIT | M_ZERO);
1387         xpt_config_hook->ich_func = xpt_config;
1388         xpt_config_hook->ich_desc = "xpt";
1389         xpt_config_hook->ich_order = 1000;
1390         if (config_intrhook_establish(xpt_config_hook) != 0) {
1391                 kfree (xpt_config_hook, M_TEMP);
1392                 kprintf("xpt_init: config_intrhook_establish failed "
1393                        "- failing attach\n");
1394         }
1395
1396         /* Install our software interrupt handlers */
1397         register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet", NULL);
1398         register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1399 }
1400
1401 static cam_status
1402 xptregister(struct cam_periph *periph, void *arg)
1403 {
1404         if (periph == NULL) {
1405                 kprintf("xptregister: periph was NULL!!\n");
1406                 return(CAM_REQ_CMP_ERR);
1407         }
1408
1409         periph->softc = NULL;
1410
1411         xpt_periph = periph;
1412
1413         return(CAM_REQ_CMP);
1414 }
1415
1416 int32_t
1417 xpt_add_periph(struct cam_periph *periph)
1418 {
1419         struct cam_ed *device;
1420         int32_t  status;
1421         struct periph_list *periph_head;
1422
1423         device = periph->path->device;
1424
1425         periph_head = &device->periphs;
1426
1427         status = CAM_REQ_CMP;
1428
1429         if (device != NULL) {
1430                 /*
1431                  * Make room for this peripheral
1432                  * so it will fit in the queue
1433                  * when it's scheduled to run
1434                  */
1435                 crit_enter();
1436                 status = camq_resize(&device->drvq,
1437                                      device->drvq.array_size + 1);
1438
1439                 device->generation++;
1440
1441                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1442                 crit_exit();
1443         }
1444
1445         xsoftc.generation++;
1446
1447         return (status);
1448 }
1449
1450 void
1451 xpt_remove_periph(struct cam_periph *periph)
1452 {
1453         struct cam_ed *device;
1454
1455         device = periph->path->device;
1456
1457         if (device != NULL) {
1458                 struct periph_list *periph_head;
1459
1460                 periph_head = &device->periphs;
1461                 
1462                 /* Release the slot for this peripheral */
1463                 crit_enter();
1464                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1465
1466                 device->generation++;
1467
1468                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1469                 crit_exit();
1470         }
1471
1472         xsoftc.generation++;
1473
1474 }
1475
1476 #ifdef CAM_NEW_TRAN_CODE
1477
1478 void
1479 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1480 {
1481         struct  ccb_pathinq cpi;
1482         struct  ccb_trans_settings cts;
1483         struct  cam_path *path;
1484         u_int   speed;
1485         u_int   freq;
1486         u_int   mb;
1487
1488         path = periph->path;
1489         /*
1490          * To ensure that this is printed in one piece,
1491          * mask out CAM interrupts.
1492          */
1493         crit_enter();
1494         printf("%s%d at %s%d bus %d target %d lun %d\n",
1495                periph->periph_name, periph->unit_number,
1496                path->bus->sim->sim_name,
1497                path->bus->sim->unit_number,
1498                path->bus->sim->bus_id,
1499                path->target->target_id,
1500                path->device->lun_id);
1501         printf("%s%d: ", periph->periph_name, periph->unit_number);
1502         scsi_print_inquiry(&path->device->inq_data);
1503         if (bootverbose && path->device->serial_num_len > 0) {
1504                 /* Don't wrap the screen  - print only the first 60 chars */
1505                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1506                        periph->unit_number, path->device->serial_num);
1507         }
1508         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1509         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1510         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1511         xpt_action((union ccb*)&cts);
1512
1513         /* Ask the SIM for its base transfer speed */
1514         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1515         cpi.ccb_h.func_code = XPT_PATH_INQ;
1516         xpt_action((union ccb *)&cpi);
1517
1518         speed = cpi.base_transfer_speed;
1519         freq = 0;
1520         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1521                 struct  ccb_trans_settings_spi *spi;
1522
1523                 spi = &cts.xport_specific.spi;
1524                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1525                   && spi->sync_offset != 0) {
1526                         freq = scsi_calc_syncsrate(spi->sync_period);
1527                         speed = freq;
1528                 }
1529
1530                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1531                         speed *= (0x01 << spi->bus_width);
1532         }
1533         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1534                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1535                 if (fc->valid & CTS_FC_VALID_SPEED) {
1536                         speed = fc->bitrate;
1537                 }
1538         }
1539
1540         mb = speed / 1000;
1541         if (mb > 0)
1542                 printf("%s%d: %d.%03dMB/s transfers",
1543                        periph->periph_name, periph->unit_number,
1544                        mb, speed % 1000);
1545         else
1546                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1547                        periph->unit_number, speed);
1548         /* Report additional information about SPI connections */
1549         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1550                 struct  ccb_trans_settings_spi *spi;
1551
1552                 spi = &cts.xport_specific.spi;
1553                 if (freq != 0) {
1554                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1555                                freq % 1000,
1556                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1557                              ? " DT" : "",
1558                                spi->sync_offset);
1559                 }
1560                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1561                  && spi->bus_width > 0) {
1562                         if (freq != 0) {
1563                                 printf(", ");
1564                         } else {
1565                                 printf(" (");
1566                         }
1567                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1568                 } else if (freq != 0) {
1569                         printf(")");
1570                 }
1571         }
1572         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1573                 struct  ccb_trans_settings_fc *fc;
1574
1575                 fc = &cts.xport_specific.fc;
1576                 if (fc->valid & CTS_FC_VALID_WWNN)
1577                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1578                 if (fc->valid & CTS_FC_VALID_WWPN)
1579                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1580                 if (fc->valid & CTS_FC_VALID_PORT)
1581                         printf(" PortID 0x%x", fc->port);
1582         }
1583
1584         if (path->device->inq_flags & SID_CmdQue
1585          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1586                 printf("\n%s%d: Tagged Queueing Enabled",
1587                        periph->periph_name, periph->unit_number);
1588         }
1589         printf("\n");
1590
1591         /*
1592          * We only want to print the caller's announce string if they've
1593          * passed one in..
1594          */
1595         if (announce_string != NULL)
1596                 printf("%s%d: %s\n", periph->periph_name,
1597                        periph->unit_number, announce_string);
1598         crit_exit();
1599 }
1600 #else /* CAM_NEW_TRAN_CODE */
1601 void
1602 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1603 {
1604         u_int mb;
1605         struct cam_path *path;
1606         struct ccb_trans_settings cts;
1607
1608         path = periph->path;
1609         /*
1610          * To ensure that this is printed in one piece,
1611          * mask out CAM interrupts.
1612          */
1613         crit_enter();
1614         kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1615                periph->periph_name, periph->unit_number,
1616                path->bus->sim->sim_name,
1617                path->bus->sim->unit_number,
1618                path->bus->sim->bus_id,
1619                path->target->target_id,
1620                path->device->lun_id);
1621         kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1622         scsi_print_inquiry(&path->device->inq_data);
1623         if ((bootverbose)
1624          && (path->device->serial_num_len > 0)) {
1625                 /* Don't wrap the screen  - print only the first 60 chars */
1626                 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1627                        periph->unit_number, path->device->serial_num);
1628         }
1629         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1630         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1631         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1632         xpt_action((union ccb*)&cts);
1633         if (cts.ccb_h.status == CAM_REQ_CMP) {
1634                 u_int speed;
1635                 u_int freq;
1636
1637                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1638                   && cts.sync_offset != 0) {
1639                         freq = scsi_calc_syncsrate(cts.sync_period);
1640                         speed = freq;
1641                 } else {
1642                         struct ccb_pathinq cpi;
1643
1644                         /* Ask the SIM for its base transfer speed */
1645                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1646                         cpi.ccb_h.func_code = XPT_PATH_INQ;
1647                         xpt_action((union ccb *)&cpi);
1648
1649                         speed = cpi.base_transfer_speed;
1650                         freq = 0;
1651                 }
1652                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1653                         speed *= (0x01 << cts.bus_width);
1654                 mb = speed / 1000;
1655                 if (mb > 0)
1656                         kprintf("%s%d: %d.%03dMB/s transfers",
1657                                periph->periph_name, periph->unit_number,
1658                                mb, speed % 1000);
1659                 else
1660                         kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1661                                periph->unit_number, speed);
1662                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1663                  && cts.sync_offset != 0) {
1664                         kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1665                                freq % 1000, cts.sync_offset);
1666                 }
1667                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1668                  && cts.bus_width > 0) {
1669                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1670                          && cts.sync_offset != 0) {
1671                                 kprintf(", ");
1672                         } else {
1673                                 kprintf(" (");
1674                         }
1675                         kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1676                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1677                         && cts.sync_offset != 0) {
1678                         kprintf(")");
1679                 }
1680
1681                 if (path->device->inq_flags & SID_CmdQue
1682                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1683                         kprintf(", Tagged Queueing Enabled");
1684                 }
1685
1686                 kprintf("\n");
1687         } else if (path->device->inq_flags & SID_CmdQue
1688                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1689                 kprintf("%s%d: Tagged Queueing Enabled\n",
1690                        periph->periph_name, periph->unit_number);
1691         }
1692
1693         /*
1694          * We only want to print the caller's announce string if they've
1695          * passed one in..
1696          */
1697         if (announce_string != NULL)
1698                 kprintf("%s%d: %s\n", periph->periph_name,
1699                        periph->unit_number, announce_string);
1700         crit_exit();
1701 }
1702
1703 #endif /* CAM_NEW_TRAN_CODE */
1704
1705 static dev_match_ret
1706 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1707             struct cam_eb *bus)
1708 {
1709         dev_match_ret retval;
1710         int i;
1711
1712         retval = DM_RET_NONE;
1713
1714         /*
1715          * If we aren't given something to match against, that's an error.
1716          */
1717         if (bus == NULL)
1718                 return(DM_RET_ERROR);
1719
1720         /*
1721          * If there are no match entries, then this bus matches no
1722          * matter what.
1723          */
1724         if ((patterns == NULL) || (num_patterns == 0))
1725                 return(DM_RET_DESCEND | DM_RET_COPY);
1726
1727         for (i = 0; i < num_patterns; i++) {
1728                 struct bus_match_pattern *cur_pattern;
1729
1730                 /*
1731                  * If the pattern in question isn't for a bus node, we
1732                  * aren't interested.  However, we do indicate to the
1733                  * calling routine that we should continue descending the
1734                  * tree, since the user wants to match against lower-level
1735                  * EDT elements.
1736                  */
1737                 if (patterns[i].type != DEV_MATCH_BUS) {
1738                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1739                                 retval |= DM_RET_DESCEND;
1740                         continue;
1741                 }
1742
1743                 cur_pattern = &patterns[i].pattern.bus_pattern;
1744
1745                 /*
1746                  * If they want to match any bus node, we give them any
1747                  * device node.
1748                  */
1749                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1750                         /* set the copy flag */
1751                         retval |= DM_RET_COPY;
1752
1753                         /*
1754                          * If we've already decided on an action, go ahead
1755                          * and return.
1756                          */
1757                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1758                                 return(retval);
1759                 }
1760
1761                 /*
1762                  * Not sure why someone would do this...
1763                  */
1764                 if (cur_pattern->flags == BUS_MATCH_NONE)
1765                         continue;
1766
1767                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1768                  && (cur_pattern->path_id != bus->path_id))
1769                         continue;
1770
1771                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1772                  && (cur_pattern->bus_id != bus->sim->bus_id))
1773                         continue;
1774
1775                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1776                  && (cur_pattern->unit_number != bus->sim->unit_number))
1777                         continue;
1778
1779                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1780                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1781                              DEV_IDLEN) != 0))
1782                         continue;
1783
1784                 /*
1785                  * If we get to this point, the user definitely wants 
1786                  * information on this bus.  So tell the caller to copy the
1787                  * data out.
1788                  */
1789                 retval |= DM_RET_COPY;
1790
1791                 /*
1792                  * If the return action has been set to descend, then we
1793                  * know that we've already seen a non-bus matching
1794                  * expression, therefore we need to further descend the tree.
1795                  * This won't change by continuing around the loop, so we
1796                  * go ahead and return.  If we haven't seen a non-bus
1797                  * matching expression, we keep going around the loop until
1798                  * we exhaust the matching expressions.  We'll set the stop
1799                  * flag once we fall out of the loop.
1800                  */
1801                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1802                         return(retval);
1803         }
1804
1805         /*
1806          * If the return action hasn't been set to descend yet, that means
1807          * we haven't seen anything other than bus matching patterns.  So
1808          * tell the caller to stop descending the tree -- the user doesn't
1809          * want to match against lower level tree elements.
1810          */
1811         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1812                 retval |= DM_RET_STOP;
1813
1814         return(retval);
1815 }
1816
1817 static dev_match_ret
1818 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1819                struct cam_ed *device)
1820 {
1821         dev_match_ret retval;
1822         int i;
1823
1824         retval = DM_RET_NONE;
1825
1826         /*
1827          * If we aren't given something to match against, that's an error.
1828          */
1829         if (device == NULL)
1830                 return(DM_RET_ERROR);
1831
1832         /*
1833          * If there are no match entries, then this device matches no
1834          * matter what.
1835          */
1836         if ((patterns == NULL) || (num_patterns == 0))
1837                 return(DM_RET_DESCEND | DM_RET_COPY);
1838
1839         for (i = 0; i < num_patterns; i++) {
1840                 struct device_match_pattern *cur_pattern;
1841
1842                 /*
1843                  * If the pattern in question isn't for a device node, we
1844                  * aren't interested.
1845                  */
1846                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1847                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1848                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1849                                 retval |= DM_RET_DESCEND;
1850                         continue;
1851                 }
1852
1853                 cur_pattern = &patterns[i].pattern.device_pattern;
1854
1855                 /*
1856                  * If they want to match any device node, we give them any
1857                  * device node.
1858                  */
1859                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1860                         /* set the copy flag */
1861                         retval |= DM_RET_COPY;
1862
1863                         
1864                         /*
1865                          * If we've already decided on an action, go ahead
1866                          * and return.
1867                          */
1868                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1869                                 return(retval);
1870                 }
1871
1872                 /*
1873                  * Not sure why someone would do this...
1874                  */
1875                 if (cur_pattern->flags == DEV_MATCH_NONE)
1876                         continue;
1877
1878                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1879                  && (cur_pattern->path_id != device->target->bus->path_id))
1880                         continue;
1881
1882                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1883                  && (cur_pattern->target_id != device->target->target_id))
1884                         continue;
1885
1886                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1887                  && (cur_pattern->target_lun != device->lun_id))
1888                         continue;
1889
1890                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1891                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1892                                     (caddr_t)&cur_pattern->inq_pat,
1893                                     1, sizeof(cur_pattern->inq_pat),
1894                                     scsi_static_inquiry_match) == NULL))
1895                         continue;
1896
1897                 /*
1898                  * If we get to this point, the user definitely wants 
1899                  * information on this device.  So tell the caller to copy
1900                  * the data out.
1901                  */
1902                 retval |= DM_RET_COPY;
1903
1904                 /*
1905                  * If the return action has been set to descend, then we
1906                  * know that we've already seen a peripheral matching
1907                  * expression, therefore we need to further descend the tree.
1908                  * This won't change by continuing around the loop, so we
1909                  * go ahead and return.  If we haven't seen a peripheral
1910                  * matching expression, we keep going around the loop until
1911                  * we exhaust the matching expressions.  We'll set the stop
1912                  * flag once we fall out of the loop.
1913                  */
1914                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1915                         return(retval);
1916         }
1917
1918         /*
1919          * If the return action hasn't been set to descend yet, that means
1920          * we haven't seen any peripheral matching patterns.  So tell the
1921          * caller to stop descending the tree -- the user doesn't want to
1922          * match against lower level tree elements.
1923          */
1924         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1925                 retval |= DM_RET_STOP;
1926
1927         return(retval);
1928 }
1929
1930 /*
1931  * Match a single peripheral against any number of match patterns.
1932  */
1933 static dev_match_ret
1934 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1935                struct cam_periph *periph)
1936 {
1937         dev_match_ret retval;
1938         int i;
1939
1940         /*
1941          * If we aren't given something to match against, that's an error.
1942          */
1943         if (periph == NULL)
1944                 return(DM_RET_ERROR);
1945
1946         /*
1947          * If there are no match entries, then this peripheral matches no
1948          * matter what.
1949          */
1950         if ((patterns == NULL) || (num_patterns == 0))
1951                 return(DM_RET_STOP | DM_RET_COPY);
1952
1953         /*
1954          * There aren't any nodes below a peripheral node, so there's no
1955          * reason to descend the tree any further.
1956          */
1957         retval = DM_RET_STOP;
1958
1959         for (i = 0; i < num_patterns; i++) {
1960                 struct periph_match_pattern *cur_pattern;
1961
1962                 /*
1963                  * If the pattern in question isn't for a peripheral, we
1964                  * aren't interested.
1965                  */
1966                 if (patterns[i].type != DEV_MATCH_PERIPH)
1967                         continue;
1968
1969                 cur_pattern = &patterns[i].pattern.periph_pattern;
1970
1971                 /*
1972                  * If they want to match on anything, then we will do so.
1973                  */
1974                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1975                         /* set the copy flag */
1976                         retval |= DM_RET_COPY;
1977
1978                         /*
1979                          * We've already set the return action to stop,
1980                          * since there are no nodes below peripherals in
1981                          * the tree.
1982                          */
1983                         return(retval);
1984                 }
1985
1986                 /*
1987                  * Not sure why someone would do this...
1988                  */
1989                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1990                         continue;
1991
1992                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1993                  && (cur_pattern->path_id != periph->path->bus->path_id))
1994                         continue;
1995
1996                 /*
1997                  * For the target and lun id's, we have to make sure the
1998                  * target and lun pointers aren't NULL.  The xpt peripheral
1999                  * has a wildcard target and device.
2000                  */
2001                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2002                  && ((periph->path->target == NULL)
2003                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2004                         continue;
2005
2006                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2007                  && ((periph->path->device == NULL)
2008                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2009                         continue;
2010
2011                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2012                  && (cur_pattern->unit_number != periph->unit_number))
2013                         continue;
2014
2015                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2016                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2017                              DEV_IDLEN) != 0))
2018                         continue;
2019
2020                 /*
2021                  * If we get to this point, the user definitely wants 
2022                  * information on this peripheral.  So tell the caller to
2023                  * copy the data out.
2024                  */
2025                 retval |= DM_RET_COPY;
2026
2027                 /*
2028                  * The return action has already been set to stop, since
2029                  * peripherals don't have any nodes below them in the EDT.
2030                  */
2031                 return(retval);
2032         }
2033
2034         /*
2035          * If we get to this point, the peripheral that was passed in
2036          * doesn't match any of the patterns.
2037          */
2038         return(retval);
2039 }
2040
2041 static int
2042 xptedtbusfunc(struct cam_eb *bus, void *arg)
2043 {
2044         struct ccb_dev_match *cdm;
2045         dev_match_ret retval;
2046
2047         cdm = (struct ccb_dev_match *)arg;
2048
2049         /*
2050          * If our position is for something deeper in the tree, that means
2051          * that we've already seen this node.  So, we keep going down.
2052          */
2053         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2054          && (cdm->pos.cookie.bus == bus)
2055          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2056          && (cdm->pos.cookie.target != NULL))
2057                 retval = DM_RET_DESCEND;
2058         else
2059                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2060
2061         /*
2062          * If we got an error, bail out of the search.
2063          */
2064         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2065                 cdm->status = CAM_DEV_MATCH_ERROR;
2066                 return(0);
2067         }
2068
2069         /*
2070          * If the copy flag is set, copy this bus out.
2071          */
2072         if (retval & DM_RET_COPY) {
2073                 int spaceleft, j;
2074
2075                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2076                         sizeof(struct dev_match_result));
2077
2078                 /*
2079                  * If we don't have enough space to put in another
2080                  * match result, save our position and tell the
2081                  * user there are more devices to check.
2082                  */
2083                 if (spaceleft < sizeof(struct dev_match_result)) {
2084                         bzero(&cdm->pos, sizeof(cdm->pos));
2085                         cdm->pos.position_type = 
2086                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2087
2088                         cdm->pos.cookie.bus = bus;
2089                         cdm->pos.generations[CAM_BUS_GENERATION]=
2090                                 bus_generation;
2091                         cdm->status = CAM_DEV_MATCH_MORE;
2092                         return(0);
2093                 }
2094                 j = cdm->num_matches;
2095                 cdm->num_matches++;
2096                 cdm->matches[j].type = DEV_MATCH_BUS;
2097                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2098                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2099                 cdm->matches[j].result.bus_result.unit_number =
2100                         bus->sim->unit_number;
2101                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2102                         bus->sim->sim_name, DEV_IDLEN);
2103         }
2104
2105         /*
2106          * If the user is only interested in busses, there's no
2107          * reason to descend to the next level in the tree.
2108          */
2109         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2110                 return(1);
2111
2112         /*
2113          * If there is a target generation recorded, check it to
2114          * make sure the target list hasn't changed.
2115          */
2116         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2117          && (bus == cdm->pos.cookie.bus)
2118          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2119          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2120          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2121              bus->generation)) {
2122                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2123                 return(0);
2124         }
2125
2126         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2127          && (cdm->pos.cookie.bus == bus)
2128          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2129          && (cdm->pos.cookie.target != NULL))
2130                 return(xpttargettraverse(bus,
2131                                         (struct cam_et *)cdm->pos.cookie.target,
2132                                          xptedttargetfunc, arg));
2133         else
2134                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2135 }
2136
2137 static int
2138 xptedttargetfunc(struct cam_et *target, void *arg)
2139 {
2140         struct ccb_dev_match *cdm;
2141
2142         cdm = (struct ccb_dev_match *)arg;
2143
2144         /*
2145          * If there is a device list generation recorded, check it to
2146          * make sure the device list hasn't changed.
2147          */
2148         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2149          && (cdm->pos.cookie.bus == target->bus)
2150          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2151          && (cdm->pos.cookie.target == target)
2152          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2153          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2154          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2155              target->generation)) {
2156                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2157                 return(0);
2158         }
2159
2160         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2161          && (cdm->pos.cookie.bus == target->bus)
2162          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2163          && (cdm->pos.cookie.target == target)
2164          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2165          && (cdm->pos.cookie.device != NULL))
2166                 return(xptdevicetraverse(target,
2167                                         (struct cam_ed *)cdm->pos.cookie.device,
2168                                          xptedtdevicefunc, arg));
2169         else
2170                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2171 }
2172
2173 static int
2174 xptedtdevicefunc(struct cam_ed *device, void *arg)
2175 {
2176
2177         struct ccb_dev_match *cdm;
2178         dev_match_ret retval;
2179
2180         cdm = (struct ccb_dev_match *)arg;
2181
2182         /*
2183          * If our position is for something deeper in the tree, that means
2184          * that we've already seen this node.  So, we keep going down.
2185          */
2186         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2187          && (cdm->pos.cookie.device == device)
2188          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2189          && (cdm->pos.cookie.periph != NULL))
2190                 retval = DM_RET_DESCEND;
2191         else
2192                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2193                                         device);
2194
2195         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2196                 cdm->status = CAM_DEV_MATCH_ERROR;
2197                 return(0);
2198         }
2199
2200         /*
2201          * If the copy flag is set, copy this device out.
2202          */
2203         if (retval & DM_RET_COPY) {
2204                 int spaceleft, j;
2205
2206                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2207                         sizeof(struct dev_match_result));
2208
2209                 /*
2210                  * If we don't have enough space to put in another
2211                  * match result, save our position and tell the
2212                  * user there are more devices to check.
2213                  */
2214                 if (spaceleft < sizeof(struct dev_match_result)) {
2215                         bzero(&cdm->pos, sizeof(cdm->pos));
2216                         cdm->pos.position_type = 
2217                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2218                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2219
2220                         cdm->pos.cookie.bus = device->target->bus;
2221                         cdm->pos.generations[CAM_BUS_GENERATION]=
2222                                 bus_generation;
2223                         cdm->pos.cookie.target = device->target;
2224                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2225                                 device->target->bus->generation;
2226                         cdm->pos.cookie.device = device;
2227                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2228                                 device->target->generation;
2229                         cdm->status = CAM_DEV_MATCH_MORE;
2230                         return(0);
2231                 }
2232                 j = cdm->num_matches;
2233                 cdm->num_matches++;
2234                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2235                 cdm->matches[j].result.device_result.path_id =
2236                         device->target->bus->path_id;
2237                 cdm->matches[j].result.device_result.target_id =
2238                         device->target->target_id;
2239                 cdm->matches[j].result.device_result.target_lun =
2240                         device->lun_id;
2241                 bcopy(&device->inq_data,
2242                       &cdm->matches[j].result.device_result.inq_data,
2243                       sizeof(struct scsi_inquiry_data));
2244
2245                 /* Let the user know whether this device is unconfigured */
2246                 if (device->flags & CAM_DEV_UNCONFIGURED)
2247                         cdm->matches[j].result.device_result.flags =
2248                                 DEV_RESULT_UNCONFIGURED;
2249                 else
2250                         cdm->matches[j].result.device_result.flags =
2251                                 DEV_RESULT_NOFLAG;
2252         }
2253
2254         /*
2255          * If the user isn't interested in peripherals, don't descend
2256          * the tree any further.
2257          */
2258         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2259                 return(1);
2260
2261         /*
2262          * If there is a peripheral list generation recorded, make sure
2263          * it hasn't changed.
2264          */
2265         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2266          && (device->target->bus == cdm->pos.cookie.bus)
2267          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2268          && (device->target == cdm->pos.cookie.target)
2269          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2270          && (device == cdm->pos.cookie.device)
2271          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2272          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2273          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2274              device->generation)){
2275                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2276                 return(0);
2277         }
2278
2279         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2280          && (cdm->pos.cookie.bus == device->target->bus)
2281          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2282          && (cdm->pos.cookie.target == device->target)
2283          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2284          && (cdm->pos.cookie.device == device)
2285          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2286          && (cdm->pos.cookie.periph != NULL))
2287                 return(xptperiphtraverse(device,
2288                                 (struct cam_periph *)cdm->pos.cookie.periph,
2289                                 xptedtperiphfunc, arg));
2290         else
2291                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2292 }
2293
2294 static int
2295 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2296 {
2297         struct ccb_dev_match *cdm;
2298         dev_match_ret retval;
2299
2300         cdm = (struct ccb_dev_match *)arg;
2301
2302         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2303
2304         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2305                 cdm->status = CAM_DEV_MATCH_ERROR;
2306                 return(0);
2307         }
2308
2309         /*
2310          * If the copy flag is set, copy this peripheral out.
2311          */
2312         if (retval & DM_RET_COPY) {
2313                 int spaceleft, j;
2314
2315                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2316                         sizeof(struct dev_match_result));
2317
2318                 /*
2319                  * If we don't have enough space to put in another
2320                  * match result, save our position and tell the
2321                  * user there are more devices to check.
2322                  */
2323                 if (spaceleft < sizeof(struct dev_match_result)) {
2324                         bzero(&cdm->pos, sizeof(cdm->pos));
2325                         cdm->pos.position_type = 
2326                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2327                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2328                                 CAM_DEV_POS_PERIPH;
2329
2330                         cdm->pos.cookie.bus = periph->path->bus;
2331                         cdm->pos.generations[CAM_BUS_GENERATION]=
2332                                 bus_generation;
2333                         cdm->pos.cookie.target = periph->path->target;
2334                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2335                                 periph->path->bus->generation;
2336                         cdm->pos.cookie.device = periph->path->device;
2337                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2338                                 periph->path->target->generation;
2339                         cdm->pos.cookie.periph = periph;
2340                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2341                                 periph->path->device->generation;
2342                         cdm->status = CAM_DEV_MATCH_MORE;
2343                         return(0);
2344                 }
2345
2346                 j = cdm->num_matches;
2347                 cdm->num_matches++;
2348                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2349                 cdm->matches[j].result.periph_result.path_id =
2350                         periph->path->bus->path_id;
2351                 cdm->matches[j].result.periph_result.target_id =
2352                         periph->path->target->target_id;
2353                 cdm->matches[j].result.periph_result.target_lun =
2354                         periph->path->device->lun_id;
2355                 cdm->matches[j].result.periph_result.unit_number =
2356                         periph->unit_number;
2357                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2358                         periph->periph_name, DEV_IDLEN);
2359         }
2360
2361         return(1);
2362 }
2363
2364 static int
2365 xptedtmatch(struct ccb_dev_match *cdm)
2366 {
2367         int ret;
2368
2369         cdm->num_matches = 0;
2370
2371         /*
2372          * Check the bus list generation.  If it has changed, the user
2373          * needs to reset everything and start over.
2374          */
2375         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2376          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2377          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2378                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2379                 return(0);
2380         }
2381
2382         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2383          && (cdm->pos.cookie.bus != NULL))
2384                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2385                                      xptedtbusfunc, cdm);
2386         else
2387                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2388
2389         /*
2390          * If we get back 0, that means that we had to stop before fully
2391          * traversing the EDT.  It also means that one of the subroutines
2392          * has set the status field to the proper value.  If we get back 1,
2393          * we've fully traversed the EDT and copied out any matching entries.
2394          */
2395         if (ret == 1)
2396                 cdm->status = CAM_DEV_MATCH_LAST;
2397
2398         return(ret);
2399 }
2400
2401 static int
2402 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2403 {
2404         struct ccb_dev_match *cdm;
2405
2406         cdm = (struct ccb_dev_match *)arg;
2407
2408         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2409          && (cdm->pos.cookie.pdrv == pdrv)
2410          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2411          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2412          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2413              (*pdrv)->generation)) {
2414                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2415                 return(0);
2416         }
2417
2418         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2419          && (cdm->pos.cookie.pdrv == pdrv)
2420          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2421          && (cdm->pos.cookie.periph != NULL))
2422                 return(xptpdperiphtraverse(pdrv,
2423                                 (struct cam_periph *)cdm->pos.cookie.periph,
2424                                 xptplistperiphfunc, arg));
2425         else
2426                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2427 }
2428
2429 static int
2430 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2431 {
2432         struct ccb_dev_match *cdm;
2433         dev_match_ret retval;
2434
2435         cdm = (struct ccb_dev_match *)arg;
2436
2437         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2438
2439         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2440                 cdm->status = CAM_DEV_MATCH_ERROR;
2441                 return(0);
2442         }
2443
2444         /*
2445          * If the copy flag is set, copy this peripheral out.
2446          */
2447         if (retval & DM_RET_COPY) {
2448                 int spaceleft, j;
2449
2450                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2451                         sizeof(struct dev_match_result));
2452
2453                 /*
2454                  * If we don't have enough space to put in another
2455                  * match result, save our position and tell the
2456                  * user there are more devices to check.
2457                  */
2458                 if (spaceleft < sizeof(struct dev_match_result)) {
2459                         struct periph_driver **pdrv;
2460
2461                         pdrv = NULL;
2462                         bzero(&cdm->pos, sizeof(cdm->pos));
2463                         cdm->pos.position_type = 
2464                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2465                                 CAM_DEV_POS_PERIPH;
2466
2467                         /*
2468                          * This may look a bit non-sensical, but it is
2469                          * actually quite logical.  There are very few
2470                          * peripheral drivers, and bloating every peripheral
2471                          * structure with a pointer back to its parent
2472                          * peripheral driver linker set entry would cost
2473                          * more in the long run than doing this quick lookup.
2474                          */
2475                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2476                                 if (strcmp((*pdrv)->driver_name,
2477                                     periph->periph_name) == 0)
2478                                         break;
2479                         }
2480
2481                         if (*pdrv == NULL) {
2482                                 cdm->status = CAM_DEV_MATCH_ERROR;
2483                                 return(0);
2484                         }
2485
2486                         cdm->pos.cookie.pdrv = pdrv;
2487                         /*
2488                          * The periph generation slot does double duty, as
2489                          * does the periph pointer slot.  They are used for
2490                          * both edt and pdrv lookups and positioning.
2491                          */
2492                         cdm->pos.cookie.periph = periph;
2493                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2494                                 (*pdrv)->generation;
2495                         cdm->status = CAM_DEV_MATCH_MORE;
2496                         return(0);
2497                 }
2498
2499                 j = cdm->num_matches;
2500                 cdm->num_matches++;
2501                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2502                 cdm->matches[j].result.periph_result.path_id =
2503                         periph->path->bus->path_id;
2504
2505                 /*
2506                  * The transport layer peripheral doesn't have a target or
2507                  * lun.
2508                  */
2509                 if (periph->path->target)
2510                         cdm->matches[j].result.periph_result.target_id =
2511                                 periph->path->target->target_id;
2512                 else
2513                         cdm->matches[j].result.periph_result.target_id = -1;
2514
2515                 if (periph->path->device)
2516                         cdm->matches[j].result.periph_result.target_lun =
2517                                 periph->path->device->lun_id;
2518                 else
2519                         cdm->matches[j].result.periph_result.target_lun = -1;
2520
2521                 cdm->matches[j].result.periph_result.unit_number =
2522                         periph->unit_number;
2523                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2524                         periph->periph_name, DEV_IDLEN);
2525         }
2526
2527         return(1);
2528 }
2529
2530 static int
2531 xptperiphlistmatch(struct ccb_dev_match *cdm)
2532 {
2533         int ret;
2534
2535         cdm->num_matches = 0;
2536
2537         /*
2538          * At this point in the edt traversal function, we check the bus
2539          * list generation to make sure that no busses have been added or
2540          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2541          * For the peripheral driver list traversal function, however, we
2542          * don't have to worry about new peripheral driver types coming or
2543          * going; they're in a linker set, and therefore can't change
2544          * without a recompile.
2545          */
2546
2547         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2548          && (cdm->pos.cookie.pdrv != NULL))
2549                 ret = xptpdrvtraverse(
2550                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2551                                 xptplistpdrvfunc, cdm);
2552         else
2553                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2554
2555         /*
2556          * If we get back 0, that means that we had to stop before fully
2557          * traversing the peripheral driver tree.  It also means that one of
2558          * the subroutines has set the status field to the proper value.  If
2559          * we get back 1, we've fully traversed the EDT and copied out any
2560          * matching entries.
2561          */
2562         if (ret == 1)
2563                 cdm->status = CAM_DEV_MATCH_LAST;
2564
2565         return(ret);
2566 }
2567
2568 static int
2569 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2570 {
2571         struct cam_eb *bus, *next_bus;
2572         int retval;
2573
2574         retval = 1;
2575
2576         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2577              bus != NULL;
2578              bus = next_bus) {
2579                 next_bus = TAILQ_NEXT(bus, links);
2580
2581                 retval = tr_func(bus, arg);
2582                 if (retval == 0)
2583                         return(retval);
2584         }
2585
2586         return(retval);
2587 }
2588
2589 static int
2590 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2591                   xpt_targetfunc_t *tr_func, void *arg)
2592 {
2593         struct cam_et *target, *next_target;
2594         int retval;
2595
2596         retval = 1;
2597         for (target = (start_target ? start_target :
2598                        TAILQ_FIRST(&bus->et_entries));
2599              target != NULL; target = next_target) {
2600
2601                 next_target = TAILQ_NEXT(target, links);
2602
2603                 retval = tr_func(target, arg);
2604
2605                 if (retval == 0)
2606                         return(retval);
2607         }
2608
2609         return(retval);
2610 }
2611
2612 static int
2613 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2614                   xpt_devicefunc_t *tr_func, void *arg)
2615 {
2616         struct cam_ed *device, *next_device;
2617         int retval;
2618
2619         retval = 1;
2620         for (device = (start_device ? start_device :
2621                        TAILQ_FIRST(&target->ed_entries));
2622              device != NULL;
2623              device = next_device) {
2624
2625                 next_device = TAILQ_NEXT(device, links);
2626
2627                 retval = tr_func(device, arg);
2628
2629                 if (retval == 0)
2630                         return(retval);
2631         }
2632
2633         return(retval);
2634 }
2635
2636 static int
2637 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2638                   xpt_periphfunc_t *tr_func, void *arg)
2639 {
2640         struct cam_periph *periph, *next_periph;
2641         int retval;
2642
2643         retval = 1;
2644
2645         for (periph = (start_periph ? start_periph :
2646                        SLIST_FIRST(&device->periphs));
2647              periph != NULL;
2648              periph = next_periph) {
2649
2650                 next_periph = SLIST_NEXT(periph, periph_links);
2651
2652                 retval = tr_func(periph, arg);
2653                 if (retval == 0)
2654                         return(retval);
2655         }
2656
2657         return(retval);
2658 }
2659
2660 static int
2661 xptpdrvtraverse(struct periph_driver **start_pdrv,
2662                 xpt_pdrvfunc_t *tr_func, void *arg)
2663 {
2664         struct periph_driver **pdrv;
2665         int retval;
2666
2667         retval = 1;
2668
2669         /*
2670          * We don't traverse the peripheral driver list like we do the
2671          * other lists, because it is a linker set, and therefore cannot be
2672          * changed during runtime.  If the peripheral driver list is ever
2673          * re-done to be something other than a linker set (i.e. it can
2674          * change while the system is running), the list traversal should
2675          * be modified to work like the other traversal functions.
2676          */
2677         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2678              *pdrv != NULL; pdrv++) {
2679                 retval = tr_func(pdrv, arg);
2680
2681                 if (retval == 0)
2682                         return(retval);
2683         }
2684
2685         return(retval);
2686 }
2687
2688 static int
2689 xptpdperiphtraverse(struct periph_driver **pdrv,
2690                     struct cam_periph *start_periph,
2691                     xpt_periphfunc_t *tr_func, void *arg)
2692 {
2693         struct cam_periph *periph, *next_periph;
2694         int retval;
2695
2696         retval = 1;
2697
2698         for (periph = (start_periph ? start_periph :
2699              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2700              periph = next_periph) {
2701
2702                 next_periph = TAILQ_NEXT(periph, unit_links);
2703
2704                 retval = tr_func(periph, arg);
2705                 if (retval == 0)
2706                         return(retval);
2707         }
2708         return(retval);
2709 }
2710
2711 static int
2712 xptdefbusfunc(struct cam_eb *bus, void *arg)
2713 {
2714         struct xpt_traverse_config *tr_config;
2715
2716         tr_config = (struct xpt_traverse_config *)arg;
2717
2718         if (tr_config->depth == XPT_DEPTH_BUS) {
2719                 xpt_busfunc_t *tr_func;
2720
2721                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2722
2723                 return(tr_func(bus, tr_config->tr_arg));
2724         } else
2725                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2726 }
2727
2728 static int
2729 xptdeftargetfunc(struct cam_et *target, void *arg)
2730 {
2731         struct xpt_traverse_config *tr_config;
2732
2733         tr_config = (struct xpt_traverse_config *)arg;
2734
2735         if (tr_config->depth == XPT_DEPTH_TARGET) {
2736                 xpt_targetfunc_t *tr_func;
2737
2738                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2739
2740                 return(tr_func(target, tr_config->tr_arg));
2741         } else
2742                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2743 }
2744
2745 static int
2746 xptdefdevicefunc(struct cam_ed *device, void *arg)
2747 {
2748         struct xpt_traverse_config *tr_config;
2749
2750         tr_config = (struct xpt_traverse_config *)arg;
2751
2752         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2753                 xpt_devicefunc_t *tr_func;
2754
2755                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2756
2757                 return(tr_func(device, tr_config->tr_arg));
2758         } else
2759                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2760 }
2761
2762 static int
2763 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2764 {
2765         struct xpt_traverse_config *tr_config;
2766         xpt_periphfunc_t *tr_func;
2767
2768         tr_config = (struct xpt_traverse_config *)arg;
2769
2770         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2771
2772         /*
2773          * Unlike the other default functions, we don't check for depth
2774          * here.  The peripheral driver level is the last level in the EDT,
2775          * so if we're here, we should execute the function in question.
2776          */
2777         return(tr_func(periph, tr_config->tr_arg));
2778 }
2779
2780 /*
2781  * Execute the given function for every bus in the EDT.
2782  */
2783 static int
2784 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2785 {
2786         struct xpt_traverse_config tr_config;
2787
2788         tr_config.depth = XPT_DEPTH_BUS;
2789         tr_config.tr_func = tr_func;
2790         tr_config.tr_arg = arg;
2791
2792         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2793 }
2794
2795 #ifdef notusedyet
2796 /*
2797  * Execute the given function for every target in the EDT.
2798  */
2799 static int
2800 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2801 {
2802         struct xpt_traverse_config tr_config;
2803
2804         tr_config.depth = XPT_DEPTH_TARGET;
2805         tr_config.tr_func = tr_func;
2806         tr_config.tr_arg = arg;
2807
2808         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2809 }
2810 #endif /* notusedyet */
2811
2812 /*
2813  * Execute the given function for every device in the EDT.
2814  */
2815 static int
2816 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2817 {
2818         struct xpt_traverse_config tr_config;
2819
2820         tr_config.depth = XPT_DEPTH_DEVICE;
2821         tr_config.tr_func = tr_func;
2822         tr_config.tr_arg = arg;
2823
2824         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2825 }
2826
2827 #ifdef notusedyet
2828 /*
2829  * Execute the given function for every peripheral in the EDT.
2830  */
2831 static int
2832 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2833 {
2834         struct xpt_traverse_config tr_config;
2835
2836         tr_config.depth = XPT_DEPTH_PERIPH;
2837         tr_config.tr_func = tr_func;
2838         tr_config.tr_arg = arg;
2839
2840         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2841 }
2842 #endif /* notusedyet */
2843
2844 static int
2845 xptsetasyncfunc(struct cam_ed *device, void *arg)
2846 {
2847         struct cam_path path;
2848         struct ccb_getdev cgd;
2849         struct async_node *cur_entry;
2850
2851         cur_entry = (struct async_node *)arg;
2852
2853         /*
2854          * Don't report unconfigured devices (Wildcard devs,
2855          * devices only for target mode, device instances
2856          * that have been invalidated but are waiting for
2857          * their last reference count to be released).
2858          */
2859         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2860                 return (1);
2861
2862         xpt_compile_path(&path,
2863                          NULL,
2864                          device->target->bus->path_id,
2865                          device->target->target_id,
2866                          device->lun_id);
2867         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2868         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2869         xpt_action((union ccb *)&cgd);
2870         cur_entry->callback(cur_entry->callback_arg,
2871                             AC_FOUND_DEVICE,
2872                             &path, &cgd);
2873         xpt_release_path(&path);
2874
2875         return(1);
2876 }
2877
2878 static int
2879 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2880 {
2881         struct cam_path path;
2882         struct ccb_pathinq cpi;
2883         struct async_node *cur_entry;
2884
2885         cur_entry = (struct async_node *)arg;
2886
2887         xpt_compile_path(&path, /*periph*/NULL,
2888                          bus->sim->path_id,
2889                          CAM_TARGET_WILDCARD,
2890                          CAM_LUN_WILDCARD);
2891         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2892         cpi.ccb_h.func_code = XPT_PATH_INQ;
2893         xpt_action((union ccb *)&cpi);
2894         cur_entry->callback(cur_entry->callback_arg,
2895                             AC_PATH_REGISTERED,
2896                             &path, &cpi);
2897         xpt_release_path(&path);
2898
2899         return(1);
2900 }
2901
2902 void
2903 xpt_action(union ccb *start_ccb)
2904 {
2905         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2906
2907         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2908
2909         crit_enter();
2910
2911         switch (start_ccb->ccb_h.func_code) {
2912         case XPT_SCSI_IO:
2913         {
2914 #ifdef CAM_NEW_TRAN_CODE
2915                 struct cam_ed *device;
2916 #endif /* CAM_NEW_TRAN_CODE */
2917 #ifdef CAMDEBUG
2918                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2919                 struct cam_path *path;
2920
2921                 path = start_ccb->ccb_h.path;
2922 #endif
2923
2924                 /*
2925                  * For the sake of compatibility with SCSI-1
2926                  * devices that may not understand the identify
2927                  * message, we include lun information in the
2928                  * second byte of all commands.  SCSI-1 specifies
2929                  * that luns are a 3 bit value and reserves only 3
2930                  * bits for lun information in the CDB.  Later
2931                  * revisions of the SCSI spec allow for more than 8
2932                  * luns, but have deprecated lun information in the
2933                  * CDB.  So, if the lun won't fit, we must omit.
2934                  *
2935                  * Also be aware that during initial probing for devices,
2936                  * the inquiry information is unknown but initialized to 0.
2937                  * This means that this code will be exercised while probing
2938                  * devices with an ANSI revision greater than 2.
2939                  */
2940 #ifdef CAM_NEW_TRAN_CODE
2941                 device = start_ccb->ccb_h.path->device;
2942                 if (device->protocol_version <= SCSI_REV_2
2943 #else /* CAM_NEW_TRAN_CODE */
2944                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2945 #endif /* CAM_NEW_TRAN_CODE */
2946                  && start_ccb->ccb_h.target_lun < 8
2947                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2948
2949                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2950                             start_ccb->ccb_h.target_lun << 5;
2951                 }
2952                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2953                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2954                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2955                                        &path->device->inq_data),
2956                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2957                                           cdb_str, sizeof(cdb_str))));
2958                 /* FALLTHROUGH */
2959         }
2960         case XPT_TARGET_IO:
2961         case XPT_CONT_TARGET_IO:
2962                 start_ccb->csio.sense_resid = 0;
2963                 start_ccb->csio.resid = 0;
2964                 /* FALLTHROUGH */
2965         case XPT_RESET_DEV:
2966         case XPT_ENG_EXEC:
2967         {
2968                 struct cam_path *path;
2969                 int runq;
2970
2971                 path = start_ccb->ccb_h.path;
2972
2973                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2974                 if (path->device->qfrozen_cnt == 0)
2975                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
2976                 else
2977                         runq = 0;
2978                 if (runq != 0)
2979                         xpt_run_dev_sendq(path->bus);
2980                 break;
2981         }
2982         case XPT_SET_TRAN_SETTINGS:
2983         {
2984                 xpt_set_transfer_settings(&start_ccb->cts,
2985                                           start_ccb->ccb_h.path->device,
2986                                           /*async_update*/FALSE);
2987                 break;
2988         }
2989         case XPT_CALC_GEOMETRY:
2990         {
2991                 struct cam_sim *sim;
2992
2993                 /* Filter out garbage */
2994                 if (start_ccb->ccg.block_size == 0
2995                  || start_ccb->ccg.volume_size == 0) {
2996                         start_ccb->ccg.cylinders = 0;
2997                         start_ccb->ccg.heads = 0;
2998                         start_ccb->ccg.secs_per_track = 0;
2999                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3000                         break;
3001                 }
3002                 sim = start_ccb->ccb_h.path->bus->sim;
3003                 (*(sim->sim_action))(sim, start_ccb);
3004                 break;
3005         }
3006         case XPT_ABORT:
3007         {
3008                 union ccb* abort_ccb;
3009
3010                 abort_ccb = start_ccb->cab.abort_ccb;
3011                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3012
3013                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3014                                 struct cam_ccbq *ccbq;
3015
3016                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3017                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3018                                 abort_ccb->ccb_h.status =
3019                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3020                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3021                                 xpt_done(abort_ccb);
3022                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3023                                 break;
3024                         }
3025                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3026                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3027                                 /*
3028                                  * We've caught this ccb en route to
3029                                  * the SIM.  Flag it for abort and the
3030                                  * SIM will do so just before starting
3031                                  * real work on the CCB.
3032                                  */
3033                                 abort_ccb->ccb_h.status =
3034                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3035                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3036                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3037                                 break;
3038                         }
3039                 } 
3040                 if (XPT_FC_IS_QUEUED(abort_ccb)
3041                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3042                         /*
3043                          * It's already completed but waiting
3044                          * for our SWI to get to it.
3045                          */
3046                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3047                         break;
3048                 }
3049                 /*
3050                  * If we weren't able to take care of the abort request
3051                  * in the XPT, pass the request down to the SIM for processing.
3052                  */
3053                 /* FALLTHROUGH */
3054         }
3055         case XPT_ACCEPT_TARGET_IO:
3056         case XPT_EN_LUN:
3057         case XPT_IMMED_NOTIFY:
3058         case XPT_NOTIFY_ACK:
3059         case XPT_GET_TRAN_SETTINGS:
3060         case XPT_RESET_BUS:
3061         {
3062                 struct cam_sim *sim;
3063
3064                 sim = start_ccb->ccb_h.path->bus->sim;
3065                 (*(sim->sim_action))(sim, start_ccb);
3066                 break;
3067         }
3068         case XPT_PATH_INQ:
3069         {
3070                 struct cam_sim *sim;
3071
3072                 sim = start_ccb->ccb_h.path->bus->sim;
3073                 (*(sim->sim_action))(sim, start_ccb);
3074                 break;
3075         }
3076         case XPT_PATH_STATS:
3077                 start_ccb->cpis.last_reset =
3078                         start_ccb->ccb_h.path->bus->last_reset;
3079                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3080                 break;
3081         case XPT_GDEV_TYPE:
3082         {
3083                 struct cam_ed *dev;
3084
3085                 dev = start_ccb->ccb_h.path->device;
3086                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3087                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3088                 } else {
3089                         struct ccb_getdev *cgd;
3090                         struct cam_eb *bus;
3091                         struct cam_et *tar;
3092
3093                         cgd = &start_ccb->cgd;
3094                         bus = cgd->ccb_h.path->bus;
3095                         tar = cgd->ccb_h.path->target;
3096                         cgd->inq_data = dev->inq_data;
3097                         cgd->ccb_h.status = CAM_REQ_CMP;
3098                         cgd->serial_num_len = dev->serial_num_len;
3099                         if ((dev->serial_num_len > 0)
3100                          && (dev->serial_num != NULL))
3101                                 bcopy(dev->serial_num, cgd->serial_num,
3102                                       dev->serial_num_len);
3103                 }
3104                 break; 
3105         }
3106         case XPT_GDEV_STATS:
3107         {
3108                 struct cam_ed *dev;
3109
3110                 dev = start_ccb->ccb_h.path->device;
3111                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3112                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3113                 } else {
3114                         struct ccb_getdevstats *cgds;
3115                         struct cam_eb *bus;
3116                         struct cam_et *tar;
3117
3118                         cgds = &start_ccb->cgds;
3119                         bus = cgds->ccb_h.path->bus;
3120                         tar = cgds->ccb_h.path->target;
3121                         cgds->dev_openings = dev->ccbq.dev_openings;
3122                         cgds->dev_active = dev->ccbq.dev_active;
3123                         cgds->devq_openings = dev->ccbq.devq_openings;
3124                         cgds->devq_queued = dev->ccbq.queue.entries;
3125                         cgds->held = dev->ccbq.held;
3126                         cgds->last_reset = tar->last_reset;
3127                         cgds->maxtags = dev->quirk->maxtags;
3128                         cgds->mintags = dev->quirk->mintags;
3129                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3130                                 cgds->last_reset = bus->last_reset;
3131                         cgds->ccb_h.status = CAM_REQ_CMP;
3132                 }
3133                 break;
3134         }
3135         case XPT_GDEVLIST:
3136         {
3137                 struct cam_periph       *nperiph;
3138                 struct periph_list      *periph_head;
3139                 struct ccb_getdevlist   *cgdl;
3140                 u_int                   i;
3141                 struct cam_ed           *device;
3142                 int                     found;
3143
3144
3145                 found = 0;
3146
3147                 /*
3148                  * Don't want anyone mucking with our data.
3149                  */
3150                 device = start_ccb->ccb_h.path->device;
3151                 periph_head = &device->periphs;
3152                 cgdl = &start_ccb->cgdl;
3153
3154                 /*
3155                  * Check and see if the list has changed since the user
3156                  * last requested a list member.  If so, tell them that the
3157                  * list has changed, and therefore they need to start over 
3158                  * from the beginning.
3159                  */
3160                 if ((cgdl->index != 0) && 
3161                     (cgdl->generation != device->generation)) {
3162                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3163                         break;
3164                 }
3165
3166                 /*
3167                  * Traverse the list of peripherals and attempt to find 
3168                  * the requested peripheral.
3169                  */
3170                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3171                      (nperiph != NULL) && (i <= cgdl->index);
3172                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3173                         if (i == cgdl->index) {
3174                                 strncpy(cgdl->periph_name,
3175                                         nperiph->periph_name,
3176                                         DEV_IDLEN);
3177                                 cgdl->unit_number = nperiph->unit_number;
3178                                 found = 1;
3179                         }
3180                 }
3181                 if (found == 0) {
3182                         cgdl->status = CAM_GDEVLIST_ERROR;
3183                         break;
3184                 }
3185
3186                 if (nperiph == NULL)
3187                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3188                 else
3189                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3190
3191                 cgdl->index++;
3192                 cgdl->generation = device->generation;
3193
3194                 cgdl->ccb_h.status = CAM_REQ_CMP;
3195                 break;
3196         }
3197         case XPT_DEV_MATCH:
3198         {
3199                 dev_pos_type position_type;
3200                 struct ccb_dev_match *cdm;
3201                 int ret;
3202
3203                 cdm = &start_ccb->cdm;
3204
3205                 /*
3206                  * Prevent EDT changes while we traverse it.
3207                  */
3208                 /*
3209                  * There are two ways of getting at information in the EDT.
3210                  * The first way is via the primary EDT tree.  It starts
3211                  * with a list of busses, then a list of targets on a bus,
3212                  * then devices/luns on a target, and then peripherals on a
3213                  * device/lun.  The "other" way is by the peripheral driver
3214                  * lists.  The peripheral driver lists are organized by
3215                  * peripheral driver.  (obviously)  So it makes sense to
3216                  * use the peripheral driver list if the user is looking
3217                  * for something like "da1", or all "da" devices.  If the
3218                  * user is looking for something on a particular bus/target
3219                  * or lun, it's generally better to go through the EDT tree.
3220                  */
3221
3222                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3223                         position_type = cdm->pos.position_type;
3224                 else {
3225                         u_int i;
3226
3227                         position_type = CAM_DEV_POS_NONE;
3228
3229                         for (i = 0; i < cdm->num_patterns; i++) {
3230                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3231                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3232                                         position_type = CAM_DEV_POS_EDT;
3233                                         break;
3234                                 }
3235                         }
3236
3237                         if (cdm->num_patterns == 0)
3238                                 position_type = CAM_DEV_POS_EDT;
3239                         else if (position_type == CAM_DEV_POS_NONE)
3240                                 position_type = CAM_DEV_POS_PDRV;
3241                 }
3242
3243                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3244                 case CAM_DEV_POS_EDT:
3245                         ret = xptedtmatch(cdm);
3246                         break;
3247                 case CAM_DEV_POS_PDRV:
3248                         ret = xptperiphlistmatch(cdm);
3249                         break;
3250                 default:
3251                         cdm->status = CAM_DEV_MATCH_ERROR;
3252                         break;
3253                 }
3254
3255                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3256                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3257                 else
3258                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3259
3260                 break;
3261         }
3262         case XPT_SASYNC_CB:
3263         {
3264                 struct ccb_setasync *csa;
3265                 struct async_node *cur_entry;
3266                 struct async_list *async_head;
3267                 u_int32_t added;
3268
3269                 csa = &start_ccb->csa;
3270                 added = csa->event_enable;
3271                 async_head = &csa->ccb_h.path->device->asyncs;
3272
3273                 /*
3274                  * If there is already an entry for us, simply
3275                  * update it.
3276                  */
3277                 cur_entry = SLIST_FIRST(async_head);
3278                 while (cur_entry != NULL) {
3279                         if ((cur_entry->callback_arg == csa->callback_arg)
3280                          && (cur_entry->callback == csa->callback))
3281                                 break;
3282                         cur_entry = SLIST_NEXT(cur_entry, links);
3283                 }
3284
3285                 if (cur_entry != NULL) {
3286                         /*
3287                          * If the request has no flags set,
3288                          * remove the entry.
3289                          */
3290                         added &= ~cur_entry->event_enable;
3291                         if (csa->event_enable == 0) {
3292                                 SLIST_REMOVE(async_head, cur_entry,
3293                                              async_node, links);
3294                                 csa->ccb_h.path->device->refcount--;
3295                                 kfree(cur_entry, M_DEVBUF);
3296                         } else {
3297                                 cur_entry->event_enable = csa->event_enable;
3298                         }
3299                 } else {
3300                         cur_entry = kmalloc(sizeof(*cur_entry), 
3301                                             M_DEVBUF, M_INTWAIT);
3302                         cur_entry->event_enable = csa->event_enable;
3303                         cur_entry->callback_arg = csa->callback_arg;
3304                         cur_entry->callback = csa->callback;
3305                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3306                         csa->ccb_h.path->device->refcount++;
3307                 }
3308
3309                 if ((added & AC_FOUND_DEVICE) != 0) {
3310                         /*
3311                          * Get this peripheral up to date with all
3312                          * the currently existing devices.
3313                          */
3314                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3315                 }
3316                 if ((added & AC_PATH_REGISTERED) != 0) {
3317                         /*
3318                          * Get this peripheral up to date with all
3319                          * the currently existing busses.
3320                          */
3321                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3322                 }
3323                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3324                 break;
3325         }
3326         case XPT_REL_SIMQ:
3327         {
3328                 struct ccb_relsim *crs;
3329                 struct cam_ed *dev;
3330
3331                 crs = &start_ccb->crs;
3332                 dev = crs->ccb_h.path->device;
3333                 if (dev == NULL) {
3334
3335                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3336                         break;
3337                 }
3338
3339                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3340
3341                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3342
3343                                 /* Don't ever go below one opening */
3344                                 if (crs->openings > 0) {
3345                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3346                                                             crs->openings);
3347
3348                                         if (bootverbose) {
3349                                                 xpt_print_path(crs->ccb_h.path);
3350                                                 kprintf("tagged openings "
3351                                                        "now %d\n",
3352                                                        crs->openings);
3353                                         }
3354                                 }
3355                         }
3356                 }
3357
3358                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3359
3360                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3361
3362                                 /*
3363                                  * Just extend the old timeout and decrement
3364                                  * the freeze count so that a single timeout
3365                                  * is sufficient for releasing the queue.
3366                                  */
3367                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3368                                 callout_stop(&dev->c_handle);
3369                         } else {
3370
3371                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3372                         }
3373
3374                         callout_reset(&dev->c_handle,
3375                                       (crs->release_timeout * hz) / 1000, 
3376                                       xpt_release_devq_timeout, dev);
3377
3378                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3379
3380                 }
3381
3382                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3383
3384                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3385                                 /*
3386                                  * Decrement the freeze count so that a single
3387                                  * completion is still sufficient to unfreeze
3388                                  * the queue.
3389                                  */
3390                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3391                         } else {
3392                                 
3393                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3394                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3395                         }
3396                 }
3397
3398                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3399
3400                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3401                          || (dev->ccbq.dev_active == 0)) {
3402
3403                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3404                         } else {
3405                                 
3406                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3407                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3408                         }
3409                 }
3410                 
3411                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3412
3413                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3414                                          /*run_queue*/TRUE);
3415                 }
3416                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3417                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3418                 break;
3419         }
3420         case XPT_SCAN_BUS:
3421                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3422                 break;
3423         case XPT_SCAN_LUN:
3424                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3425                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3426                              start_ccb);
3427                 break;
3428         case XPT_DEBUG: {
3429 #ifdef CAMDEBUG
3430 #ifdef CAM_DEBUG_DELAY
3431                 cam_debug_delay = CAM_DEBUG_DELAY;
3432 #endif
3433                 cam_dflags = start_ccb->cdbg.flags;
3434                 if (cam_dpath != NULL) {
3435                         xpt_free_path(cam_dpath);
3436                         cam_dpath = NULL;
3437                 }
3438
3439                 if (cam_dflags != CAM_DEBUG_NONE) {
3440                         if (xpt_create_path(&cam_dpath, xpt_periph,
3441                                             start_ccb->ccb_h.path_id,
3442                                             start_ccb->ccb_h.target_id,
3443                                             start_ccb->ccb_h.target_lun) !=
3444                                             CAM_REQ_CMP) {
3445                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3446                                 cam_dflags = CAM_DEBUG_NONE;
3447                         } else {
3448                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3449                                 xpt_print_path(cam_dpath);
3450                                 kprintf("debugging flags now %x\n", cam_dflags);
3451                         }
3452                 } else {
3453                         cam_dpath = NULL;
3454                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3455                 }
3456 #else /* !CAMDEBUG */
3457                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3458 #endif /* CAMDEBUG */
3459                 break;
3460         }
3461         case XPT_NOOP:
3462                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3463                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3464                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3465                 break;
3466         default:
3467         case XPT_SDEV_TYPE:
3468         case XPT_TERM_IO:
3469         case XPT_ENG_INQ:
3470                 /* XXX Implement */
3471                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3472                 break;
3473         }
3474         crit_exit();
3475 }
3476
3477 void
3478 xpt_polled_action(union ccb *start_ccb)
3479 {
3480         u_int32_t timeout;
3481         struct    cam_sim *sim; 
3482         struct    cam_devq *devq;
3483         struct    cam_ed *dev;
3484
3485         timeout = start_ccb->ccb_h.timeout;
3486         sim = start_ccb->ccb_h.path->bus->sim;
3487         devq = sim->devq;
3488         dev = start_ccb->ccb_h.path->device;
3489
3490         crit_enter();
3491
3492         /*
3493          * Steal an opening so that no other queued requests
3494          * can get it before us while we simulate interrupts.
3495          */
3496         dev->ccbq.devq_openings--;
3497         dev->ccbq.dev_openings--;       
3498         
3499         while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3500            && (--timeout > 0)) {
3501                 DELAY(1000);
3502                 (*(sim->sim_poll))(sim);
3503                 swi_camnet(NULL, NULL);
3504                 swi_cambio(NULL, NULL);         
3505         }
3506         
3507         dev->ccbq.devq_openings++;
3508         dev->ccbq.dev_openings++;
3509         
3510         if (timeout != 0) {
3511                 xpt_action(start_ccb);
3512                 while(--timeout > 0) {
3513                         (*(sim->sim_poll))(sim);
3514                         swi_camnet(NULL, NULL);
3515                         swi_cambio(NULL, NULL);
3516                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3517                             != CAM_REQ_INPROG)
3518                                 break;
3519                         DELAY(1000);
3520                 }
3521                 if (timeout == 0) {
3522                         /*
3523                          * XXX Is it worth adding a sim_timeout entry
3524                          * point so we can attempt recovery?  If
3525                          * this is only used for dumps, I don't think
3526                          * it is.
3527                          */
3528                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3529                 }
3530         } else {
3531                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3532         }
3533         crit_exit();
3534 }
3535         
3536 /*
3537  * Schedule a peripheral driver to receive a ccb when it's
3538  * target device has space for more transactions.
3539  */
3540 void
3541 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3542 {
3543         struct cam_ed *device;
3544         int runq;
3545
3546         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3547         device = perph->path->device;
3548         crit_enter();
3549         if (periph_is_queued(perph)) {
3550                 /* Simply reorder based on new priority */
3551                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3552                           ("   change priority to %d\n", new_priority));
3553                 if (new_priority < perph->pinfo.priority) {
3554                         camq_change_priority(&device->drvq,
3555                                              perph->pinfo.index,
3556                                              new_priority);
3557                 }
3558                 runq = 0;
3559         } else {
3560                 /* New entry on the queue */
3561                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3562                           ("   added periph to queue\n"));
3563                 perph->pinfo.priority = new_priority;
3564                 perph->pinfo.generation = ++device->drvq.generation;
3565                 camq_insert(&device->drvq, &perph->pinfo);
3566                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3567         }
3568         crit_exit();
3569         if (runq != 0) {
3570                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3571                           ("   calling xpt_run_devq\n"));
3572                 xpt_run_dev_allocq(perph->path->bus);
3573         }
3574 }
3575
3576
3577 /*
3578  * Schedule a device to run on a given queue.
3579  * If the device was inserted as a new entry on the queue,
3580  * return 1 meaning the device queue should be run. If we
3581  * were already queued, implying someone else has already
3582  * started the queue, return 0 so the caller doesn't attempt
3583  * to run the queue.  Must be run in a critical section.
3584  */
3585 static int
3586 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3587                  u_int32_t new_priority)
3588 {
3589         int retval;
3590         u_int32_t old_priority;
3591
3592         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3593
3594         old_priority = pinfo->priority;
3595
3596         /*
3597          * Are we already queued?
3598          */
3599         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3600                 /* Simply reorder based on new priority */
3601                 if (new_priority < old_priority) {
3602                         camq_change_priority(queue, pinfo->index,
3603                                              new_priority);
3604                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3605                                         ("changed priority to %d\n",
3606                                          new_priority));
3607                 }
3608                 retval = 0;
3609         } else {
3610                 /* New entry on the queue */
3611                 if (new_priority < old_priority)
3612                         pinfo->priority = new_priority;
3613
3614                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3615                                 ("Inserting onto queue\n"));
3616                 pinfo->generation = ++queue->generation;
3617                 camq_insert(queue, pinfo);
3618                 retval = 1;
3619         }
3620         return (retval);
3621 }
3622
3623 static void
3624 xpt_run_dev_allocq(struct cam_eb *bus)
3625 {
3626         struct  cam_devq *devq;
3627
3628         if ((devq = bus->sim->devq) == NULL) {
3629                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3630                 return;
3631         }
3632         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3633
3634         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3635                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3636                          "openings == %d, active == %d\n",
3637                          devq->alloc_queue.qfrozen_cnt,
3638                          devq->alloc_queue.entries,
3639                          devq->alloc_openings,
3640                          devq->alloc_active));
3641
3642         crit_enter();
3643         devq->alloc_queue.qfrozen_cnt++;
3644         while ((devq->alloc_queue.entries > 0)
3645             && (devq->alloc_openings > 0)
3646             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3647                 struct  cam_ed_qinfo *qinfo;
3648                 struct  cam_ed *device;
3649                 union   ccb *work_ccb;
3650                 struct  cam_periph *drv;
3651                 struct  camq *drvq;
3652                 
3653                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3654                                                            CAMQ_HEAD);
3655                 device = qinfo->device;
3656
3657                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3658                                 ("running device %p\n", device));
3659
3660                 drvq = &device->drvq;
3661
3662 #ifdef CAMDEBUG
3663                 if (drvq->entries <= 0) {
3664                         panic("xpt_run_dev_allocq: "
3665                               "Device on queue without any work to do");
3666                 }
3667 #endif
3668                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3669                         devq->alloc_openings--;
3670                         devq->alloc_active++;
3671                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3672                         crit_exit();
3673                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3674                                       drv->pinfo.priority);
3675                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3676                                         ("calling periph start\n"));
3677                         drv->periph_start(drv, work_ccb);
3678                 } else {
3679                         /*
3680                          * Malloc failure in alloc_ccb
3681                          */
3682                         /*
3683                          * XXX add us to a list to be run from free_ccb
3684                          * if we don't have any ccbs active on this
3685                          * device queue otherwise we may never get run
3686                          * again.
3687                          */
3688                         break;
3689                 }
3690         
3691                 /* Raise IPL for possible insertion and test at top of loop */
3692                 crit_enter();
3693
3694                 if (drvq->entries > 0) {
3695                         /* We have more work.  Attempt to reschedule */
3696                         xpt_schedule_dev_allocq(bus, device);
3697                 }
3698         }
3699         devq->alloc_queue.qfrozen_cnt--;
3700         crit_exit();
3701 }
3702
3703 static void
3704 xpt_run_dev_sendq(struct cam_eb *bus)
3705 {
3706         struct  cam_devq *devq;
3707
3708         if ((devq = bus->sim->devq) == NULL) {
3709                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3710                 return;
3711         }
3712         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3713
3714         crit_enter();
3715         devq->send_queue.qfrozen_cnt++;
3716         while ((devq->send_queue.entries > 0)
3717             && (devq->send_openings > 0)) {
3718                 struct  cam_ed_qinfo *qinfo;
3719                 struct  cam_ed *device;
3720                 union ccb *work_ccb;
3721                 struct  cam_sim *sim;
3722
3723                 if (devq->send_queue.qfrozen_cnt > 1) {
3724                         break;
3725                 }
3726
3727                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3728                                                            CAMQ_HEAD);
3729                 device = qinfo->device;
3730
3731                 /*
3732                  * If the device has been "frozen", don't attempt
3733                  * to run it.
3734                  */
3735                 if (device->qfrozen_cnt > 0) {
3736                         continue;
3737                 }
3738
3739                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3740                                 ("running device %p\n", device));
3741
3742                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3743                 if (work_ccb == NULL) {
3744                         kprintf("device on run queue with no ccbs???\n");
3745                         continue;
3746                 }
3747
3748                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3749
3750                         if (num_highpower <= 0) {
3751                                 /*
3752                                  * We got a high power command, but we
3753                                  * don't have any available slots.  Freeze
3754                                  * the device queue until we have a slot
3755                                  * available.
3756                                  */
3757                                 device->qfrozen_cnt++;
3758                                 STAILQ_INSERT_TAIL(&highpowerq, 
3759                                                    &work_ccb->ccb_h, 
3760                                                    xpt_links.stqe);
3761
3762                                 continue;
3763                         } else {
3764                                 /*
3765                                  * Consume a high power slot while
3766                                  * this ccb runs.
3767                                  */
3768                                 num_highpower--;
3769                         }
3770                 }
3771                 devq->active_dev = device;
3772                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3773
3774                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3775
3776                 devq->send_openings--;
3777                 devq->send_active++;            
3778                 
3779                 if (device->ccbq.queue.entries > 0)
3780                         xpt_schedule_dev_sendq(bus, device);
3781
3782                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3783                         /*
3784                          * The client wants to freeze the queue
3785                          * after this CCB is sent.
3786                          */
3787                         device->qfrozen_cnt++;
3788                 }
3789
3790                 /* In Target mode, the peripheral driver knows best... */
3791                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3792                         if ((device->inq_flags & SID_CmdQue) != 0
3793                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3794                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3795                         else
3796                                 /*
3797                                  * Clear this in case of a retried CCB that
3798                                  * failed due to a rejected tag.
3799                                  */
3800                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3801                 }
3802
3803                 /*
3804                  * Device queues can be shared among multiple sim instances
3805                  * that reside on different busses.  Use the SIM in the queue
3806                  * CCB's path, rather than the one in the bus that was passed
3807                  * into this function.
3808                  */
3809                 sim = work_ccb->ccb_h.path->bus->sim;
3810                 (*(sim->sim_action))(sim, work_ccb);
3811
3812                 devq->active_dev = NULL;
3813                 /* Raise IPL for possible insertion and test at top of loop */
3814         }
3815         devq->send_queue.qfrozen_cnt--;
3816         crit_exit();
3817 }
3818
3819 /*
3820  * This function merges stuff from the slave ccb into the master ccb, while
3821  * keeping important fields in the master ccb constant.
3822  */
3823 void
3824 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3825 {
3826         /*
3827          * Pull fields that are valid for peripheral drivers to set
3828          * into the master CCB along with the CCB "payload".
3829          */
3830         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3831         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3832         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3833         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3834         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3835               sizeof(union ccb) - sizeof(struct ccb_hdr));
3836 }
3837
3838 void
3839 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3840 {
3841         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3842         callout_init(&ccb_h->timeout_ch);
3843         ccb_h->pinfo.priority = priority;
3844         ccb_h->path = path;
3845         ccb_h->path_id = path->bus->path_id;
3846         if (path->target)
3847                 ccb_h->target_id = path->target->target_id;
3848         else
3849                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3850         if (path->device) {
3851                 ccb_h->target_lun = path->device->lun_id;
3852                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3853         } else {
3854                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3855         }
3856         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3857         ccb_h->flags = 0;
3858 }
3859
3860 /* Path manipulation functions */
3861 cam_status
3862 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3863                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3864 {
3865         struct     cam_path *path;
3866         cam_status status;
3867
3868         path = kmalloc(sizeof(*path), M_DEVBUF, M_INTWAIT);
3869         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3870         if (status != CAM_REQ_CMP) {
3871                 kfree(path, M_DEVBUF);
3872                 path = NULL;
3873         }
3874         *new_path_ptr = path;
3875         return (status);
3876 }
3877
3878 static cam_status
3879 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3880                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3881 {
3882         struct       cam_eb *bus;
3883         struct       cam_et *target;
3884         struct       cam_ed *device;
3885         cam_status   status;
3886
3887         status = CAM_REQ_CMP;   /* Completed without error */
3888         target = NULL;          /* Wildcarded */
3889         device = NULL;          /* Wildcarded */
3890
3891         /*
3892          * We will potentially modify the EDT, so block interrupts
3893          * that may attempt to create cam paths.
3894          */
3895         crit_enter();
3896         bus = xpt_find_bus(path_id);
3897         if (bus == NULL) {
3898                 status = CAM_PATH_INVALID;
3899         } else {
3900                 target = xpt_find_target(bus, target_id);
3901                 if (target == NULL) {
3902                         /* Create one */
3903                         struct cam_et *new_target;
3904
3905                         new_target = xpt_alloc_target(bus, target_id);
3906                         if (new_target == NULL) {
3907                                 status = CAM_RESRC_UNAVAIL;
3908                         } else {
3909                                 target = new_target;
3910                         }
3911                 }
3912                 if (target != NULL) {
3913                         device = xpt_find_device(target, lun_id);
3914                         if (device == NULL) {
3915                                 /* Create one */
3916                                 struct cam_ed *new_device;
3917
3918                                 new_device = xpt_alloc_device(bus,
3919                                                               target,
3920                                                               lun_id);
3921                                 if (new_device == NULL) {
3922                                         status = CAM_RESRC_UNAVAIL;
3923                                 } else {
3924                                         device = new_device;
3925                                 }
3926                         }
3927                 }
3928         }
3929         crit_exit();
3930
3931         /*
3932          * Only touch the user's data if we are successful.
3933          */
3934         if (status == CAM_REQ_CMP) {
3935                 new_path->periph = perph;
3936                 new_path->bus = bus;
3937                 new_path->target = target;
3938                 new_path->device = device;
3939                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3940         } else {
3941                 if (device != NULL)
3942                         xpt_release_device(bus, target, device);
3943                 if (target != NULL)
3944                         xpt_release_target(bus, target);
3945                 if (bus != NULL)
3946                         xpt_release_bus(bus);
3947         }
3948         return (status);
3949 }
3950
3951 static void
3952 xpt_release_path(struct cam_path *path)
3953 {
3954         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3955         if (path->device != NULL) {
3956                 xpt_release_device(path->bus, path->target, path->device);
3957                 path->device = NULL;
3958         }
3959         if (path->target != NULL) {
3960                 xpt_release_target(path->bus, path->target);
3961                 path->target = NULL;
3962         }
3963         if (path->bus != NULL) {
3964                 xpt_release_bus(path->bus);
3965                 path->bus = NULL;
3966         }
3967 }
3968
3969 void
3970 xpt_free_path(struct cam_path *path)
3971 {
3972         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3973         xpt_release_path(path);
3974         kfree(path, M_DEVBUF);
3975 }
3976
3977
3978 /*
3979  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3980  * in path1, 2 for match with wildcards in path2.
3981  */
3982 int
3983 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3984 {
3985         int retval = 0;
3986
3987         if (path1->bus != path2->bus) {
3988                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3989                         retval = 1;
3990                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3991                         retval = 2;
3992                 else
3993                         return (-1);
3994         }
3995         if (path1->target != path2->target) {
3996                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3997                         if (retval == 0)
3998                                 retval = 1;
3999                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4000                         retval = 2;
4001                 else
4002                         return (-1);
4003         }
4004         if (path1->device != path2->device) {
4005                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4006                         if (retval == 0)
4007                                 retval = 1;
4008                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4009                         retval = 2;
4010                 else
4011                         return (-1);
4012         }
4013         return (retval);
4014 }
4015
4016 void
4017 xpt_print_path(struct cam_path *path)
4018 {
4019         if (path == NULL)
4020                 kprintf("(nopath): ");
4021         else {
4022                 if (path->periph != NULL)
4023                         kprintf("(%s%d:", path->periph->periph_name,
4024                                path->periph->unit_number);
4025                 else
4026                         kprintf("(noperiph:");
4027
4028                 if (path->bus != NULL)
4029                         kprintf("%s%d:%d:", path->bus->sim->sim_name,
4030                                path->bus->sim->unit_number,
4031                                path->bus->sim->bus_id);
4032                 else
4033                         kprintf("nobus:");
4034
4035                 if (path->target != NULL)
4036                         kprintf("%d:", path->target->target_id);
4037                 else
4038                         kprintf("X:");
4039
4040                 if (path->device != NULL)
4041                         kprintf("%d): ", path->device->lun_id);
4042                 else
4043                         kprintf("X): ");
4044         }
4045 }
4046
4047 int
4048 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4049 {
4050         struct sbuf sb;
4051
4052         sbuf_new(&sb, str, str_len, 0);
4053
4054         if (path == NULL)
4055                 sbuf_printf(&sb, "(nopath): ");
4056         else {
4057                 if (path->periph != NULL)
4058                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4059                                     path->periph->unit_number);
4060                 else
4061                         sbuf_printf(&sb, "(noperiph:");
4062
4063                 if (path->bus != NULL)
4064                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4065                                     path->bus->sim->unit_number,
4066                                     path->bus->sim->bus_id);
4067                 else
4068                         sbuf_printf(&sb, "nobus:");
4069
4070                 if (path->target != NULL)
4071                         sbuf_printf(&sb, "%d:", path->target->target_id);
4072                 else
4073                         sbuf_printf(&sb, "X:");
4074
4075                 if (path->device != NULL)
4076                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
4077                 else
4078                         sbuf_printf(&sb, "X): ");
4079         }
4080         sbuf_finish(&sb);
4081
4082         return(sbuf_len(&sb));
4083 }
4084
4085 path_id_t
4086 xpt_path_path_id(struct cam_path *path)
4087 {
4088         return(path->bus->path_id);
4089 }
4090
4091 target_id_t
4092 xpt_path_target_id(struct cam_path *path)
4093 {
4094         if (path->target != NULL)
4095                 return (path->target->target_id);
4096         else
4097                 return (CAM_TARGET_WILDCARD);
4098 }
4099
4100 lun_id_t
4101 xpt_path_lun_id(struct cam_path *path)
4102 {
4103         if (path->device != NULL)
4104                 return (path->device->lun_id);
4105         else
4106                 return (CAM_LUN_WILDCARD);
4107 }
4108
4109 struct cam_sim *
4110 xpt_path_sim(struct cam_path *path)
4111 {
4112         return (path->bus->sim);
4113 }
4114
4115 struct cam_periph*
4116 xpt_path_periph(struct cam_path *path)
4117 {
4118         return (path->periph);
4119 }
4120
4121 /*
4122  * Release a CAM control block for the caller.  Remit the cost of the structure
4123  * to the device referenced by the path.  If the this device had no 'credits'
4124  * and peripheral drivers have registered async callbacks for this notification
4125  * call them now.
4126  */
4127 void
4128 xpt_release_ccb(union ccb *free_ccb)
4129 {
4130         struct   cam_path *path;
4131         struct   cam_ed *device;
4132         struct   cam_eb *bus;
4133
4134         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4135         path = free_ccb->ccb_h.path;
4136         device = path->device;
4137         bus = path->bus;
4138         crit_enter();
4139         cam_ccbq_release_opening(&device->ccbq);
4140         if (xpt_ccb_count > xpt_max_ccbs) {
4141                 xpt_free_ccb(free_ccb);
4142                 xpt_ccb_count--;
4143         } else {
4144                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4145         }
4146         if (bus->sim->devq) {
4147                 bus->sim->devq->alloc_openings++;
4148                 bus->sim->devq->alloc_active--;
4149         }
4150         /* XXX Turn this into an inline function - xpt_run_device?? */
4151         if ((device_is_alloc_queued(device) == 0)
4152          && (device->drvq.entries > 0)) {
4153                 xpt_schedule_dev_allocq(bus, device);
4154         }
4155         crit_exit();
4156         if (bus->sim->devq && dev_allocq_is_runnable(bus->sim->devq))
4157                 xpt_run_dev_allocq(bus);
4158 }
4159
4160 /* Functions accessed by SIM drivers */
4161
4162 /*
4163  * A sim structure, listing the SIM entry points and instance
4164  * identification info is passed to xpt_bus_register to hook the SIM
4165  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4166  * for this new bus and places it in the array of busses and assigns
4167  * it a path_id.  The path_id may be influenced by "hard wiring"
4168  * information specified by the user.  Once interrupt services are
4169  * availible, the bus will be probed.
4170  */
4171 int32_t
4172 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4173 {
4174         struct cam_eb *new_bus;
4175         struct cam_eb *old_bus;
4176         struct ccb_pathinq cpi;
4177
4178         sim->bus_id = bus;
4179         new_bus = kmalloc(sizeof(*new_bus), M_DEVBUF, M_INTWAIT);
4180
4181         if (strcmp(sim->sim_name, "xpt") != 0) {
4182                 sim->path_id =
4183                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4184         }
4185
4186         TAILQ_INIT(&new_bus->et_entries);
4187         new_bus->path_id = sim->path_id;
4188         new_bus->sim = sim;
4189         ++sim->refcount;
4190         timevalclear(&new_bus->last_reset);
4191         new_bus->flags = 0;
4192         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4193         new_bus->generation = 0;
4194         crit_enter();
4195         old_bus = TAILQ_FIRST(&xpt_busses);
4196         while (old_bus != NULL
4197             && old_bus->path_id < new_bus->path_id)
4198                 old_bus = TAILQ_NEXT(old_bus, links);
4199         if (old_bus != NULL)
4200                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4201         else
4202                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4203         bus_generation++;
4204         crit_exit();
4205
4206         /* Notify interested parties */
4207         if (sim->path_id != CAM_XPT_PATH_ID) {
4208                 struct cam_path path;
4209
4210                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4211                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4212                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4213                 cpi.ccb_h.func_code = XPT_PATH_INQ;
4214                 xpt_action((union ccb *)&cpi);
4215                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4216                 xpt_release_path(&path);
4217         }
4218         return (CAM_SUCCESS);
4219 }
4220
4221 /*
4222  * Deregister a bus.  We must clean out all transactions pending on the bus.
4223  * This routine is typically called prior to cam_sim_free() (e.g. see
4224  * dev/usbmisc/umass/umass.c)
4225  */
4226 int32_t
4227 xpt_bus_deregister(path_id_t pathid)
4228 {
4229         struct cam_path bus_path;
4230         cam_status status;
4231
4232         status = xpt_compile_path(&bus_path, NULL, pathid,
4233                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4234         if (status != CAM_REQ_CMP)
4235                 return (status);
4236
4237         /*
4238          * This should clear out all pending requests and timeouts, but
4239          * the ccb's may be queued to a software interrupt.
4240          *
4241          * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4242          * and it really ought to.
4243          */
4244         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4245         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4246
4247         /* make sure all responses have been processed */
4248         camisr(&cam_netq);
4249         camisr(&cam_bioq);
4250         
4251         /* Release the reference count held while registered. */
4252         xpt_release_bus(bus_path.bus);
4253         xpt_release_path(&bus_path);
4254
4255         return (CAM_REQ_CMP);
4256 }
4257
4258 static path_id_t
4259 xptnextfreepathid(void)
4260 {
4261         struct cam_eb *bus;
4262         path_id_t pathid;
4263         char *strval;
4264
4265         pathid = 0;
4266         bus = TAILQ_FIRST(&xpt_busses);
4267 retry:
4268         /* Find an unoccupied pathid */
4269         while (bus != NULL
4270             && bus->path_id <= pathid) {
4271                 if (bus->path_id == pathid)
4272                         pathid++;
4273                 bus = TAILQ_NEXT(bus, links);
4274         }
4275
4276         /*
4277          * Ensure that this pathid is not reserved for
4278          * a bus that may be registered in the future.
4279          */
4280         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4281                 ++pathid;
4282                 /* Start the search over */
4283                 goto retry;
4284         }
4285         return (pathid);
4286 }
4287
4288 static path_id_t
4289 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4290 {
4291         path_id_t pathid;
4292         int i, dunit, val;
4293         char buf[32];
4294
4295         pathid = CAM_XPT_PATH_ID;
4296         ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4297         i = -1;
4298         while ((i = resource_query_string(i, "at", buf)) != -1) {
4299                 if (strcmp(resource_query_name(i), "scbus")) {
4300                         /* Avoid a bit of foot shooting. */
4301                         continue;
4302                 }
4303                 dunit = resource_query_unit(i);
4304                 if (dunit < 0)          /* unwired?! */
4305                         continue;
4306                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4307                         if (sim_bus == val) {
4308                                 pathid = dunit;
4309                                 break;
4310                         }
4311                 } else if (sim_bus == 0) {
4312                         /* Unspecified matches bus 0 */
4313                         pathid = dunit;
4314                         break;
4315                 } else {
4316                         kprintf("Ambiguous scbus configuration for %s%d "
4317                                "bus %d, cannot wire down.  The kernel "
4318                                "config entry for scbus%d should "
4319                                "specify a controller bus.\n"
4320                                "Scbus will be assigned dynamically.\n",
4321                                sim_name, sim_unit, sim_bus, dunit);
4322                         break;
4323                 }
4324         }
4325
4326         if (pathid == CAM_XPT_PATH_ID)
4327                 pathid = xptnextfreepathid();
4328         return (pathid);
4329 }
4330
4331 void
4332 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4333 {
4334         struct cam_eb *bus;
4335         struct cam_et *target, *next_target;
4336         struct cam_ed *device, *next_device;
4337
4338         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4339
4340         /*
4341          * Most async events come from a CAM interrupt context.  In
4342          * a few cases, the error recovery code at the peripheral layer,
4343          * which may run from our SWI or a process context, may signal
4344          * deferred events with a call to xpt_async. Ensure async
4345          * notifications are serialized by blocking cam interrupts.
4346          */
4347         crit_enter();
4348
4349         bus = path->bus;
4350
4351         if (async_code == AC_BUS_RESET) { 
4352                 /* Update our notion of when the last reset occurred */
4353                 microuptime(&bus->last_reset);
4354         }
4355
4356         for (target = TAILQ_FIRST(&bus->et_entries);
4357              target != NULL;
4358              target = next_target) {
4359
4360                 next_target = TAILQ_NEXT(target, links);
4361
4362                 if (path->target != target
4363                  && path->target->target_id != CAM_TARGET_WILDCARD
4364                  && target->target_id != CAM_TARGET_WILDCARD)
4365                         continue;
4366
4367                 if (async_code == AC_SENT_BDR) {
4368                         /* Update our notion of when the last reset occurred */
4369                         microuptime(&path->target->last_reset);
4370                 }
4371
4372                 for (device = TAILQ_FIRST(&target->ed_entries);
4373                      device != NULL;
4374                      device = next_device) {
4375
4376                         next_device = TAILQ_NEXT(device, links);
4377
4378                         if (path->device != device 
4379                          && path->device->lun_id != CAM_LUN_WILDCARD
4380                          && device->lun_id != CAM_LUN_WILDCARD)
4381                                 continue;
4382
4383                         xpt_dev_async(async_code, bus, target,
4384                                       device, async_arg);
4385
4386                         xpt_async_bcast(&device->asyncs, async_code,
4387                                         path, async_arg);
4388                 }
4389         }
4390         
4391         /*
4392          * If this wasn't a fully wildcarded async, tell all
4393          * clients that want all async events.
4394          */
4395         if (bus != xpt_periph->path->bus)
4396                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4397                                 path, async_arg);
4398         crit_exit();
4399 }
4400
4401 static void
4402 xpt_async_bcast(struct async_list *async_head,
4403                 u_int32_t async_code,
4404                 struct cam_path *path, void *async_arg)
4405 {
4406         struct async_node *cur_entry;
4407
4408         cur_entry = SLIST_FIRST(async_head);
4409         while (cur_entry != NULL) {
4410                 struct async_node *next_entry;
4411                 /*
4412                  * Grab the next list entry before we call the current
4413                  * entry's callback.  This is because the callback function
4414                  * can delete its async callback entry.
4415                  */
4416                 next_entry = SLIST_NEXT(cur_entry, links);
4417                 if ((cur_entry->event_enable & async_code) != 0)
4418                         cur_entry->callback(cur_entry->callback_arg,
4419                                             async_code, path,
4420                                             async_arg);
4421                 cur_entry = next_entry;
4422         }
4423 }
4424
4425 /*
4426  * Handle any per-device event notifications that require action by the XPT.
4427  */
4428 static void
4429 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4430               struct cam_ed *device, void *async_arg)
4431 {
4432         cam_status status;
4433         struct cam_path newpath;
4434
4435         /*
4436          * We only need to handle events for real devices.
4437          */
4438         if (target->target_id == CAM_TARGET_WILDCARD
4439          || device->lun_id == CAM_LUN_WILDCARD)
4440                 return;
4441
4442         /*
4443          * We need our own path with wildcards expanded to
4444          * handle certain types of events.
4445          */
4446         if ((async_code == AC_SENT_BDR)
4447          || (async_code == AC_BUS_RESET)
4448          || (async_code == AC_INQ_CHANGED))
4449                 status = xpt_compile_path(&newpath, NULL,
4450                                           bus->path_id,
4451                                           target->target_id,
4452                                           device->lun_id);
4453         else
4454                 status = CAM_REQ_CMP_ERR;
4455
4456         if (status == CAM_REQ_CMP) {
4457
4458                 /*
4459                  * Allow transfer negotiation to occur in a
4460                  * tag free environment.
4461                  */
4462                 if (async_code == AC_SENT_BDR
4463                  || async_code == AC_BUS_RESET)
4464                         xpt_toggle_tags(&newpath);
4465
4466                 if (async_code == AC_INQ_CHANGED) {
4467                         /*
4468                          * We've sent a start unit command, or
4469                          * something similar to a device that
4470                          * may have caused its inquiry data to
4471                          * change. So we re-scan the device to
4472                          * refresh the inquiry data for it.
4473                          */
4474                         xpt_scan_lun(newpath.periph, &newpath,
4475                                      CAM_EXPECT_INQ_CHANGE, NULL);
4476                 }
4477                 xpt_release_path(&newpath);
4478         } else if (async_code == AC_LOST_DEVICE) {
4479                 /*
4480                  * When we lose a device the device may be about to detach
4481                  * the sim, we have to clear out all pending timeouts and
4482                  * requests before that happens.  XXX it would be nice if
4483                  * we could abort the requests pertaining to the device.
4484                  */
4485                 xpt_release_devq_timeout(device);
4486                 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4487                         device->flags |= CAM_DEV_UNCONFIGURED;
4488                         xpt_release_device(bus, target, device);
4489                 }
4490         } else if (async_code == AC_TRANSFER_NEG) {
4491                 struct ccb_trans_settings *settings;
4492
4493                 settings = (struct ccb_trans_settings *)async_arg;
4494                 xpt_set_transfer_settings(settings, device,
4495                                           /*async_update*/TRUE);
4496         }
4497 }
4498
4499 u_int32_t
4500 xpt_freeze_devq(struct cam_path *path, u_int count)
4501 {
4502         struct ccb_hdr *ccbh;
4503
4504         crit_enter();
4505         path->device->qfrozen_cnt += count;
4506
4507         /*
4508          * Mark the last CCB in the queue as needing
4509          * to be requeued if the driver hasn't
4510          * changed it's state yet.  This fixes a race
4511          * where a ccb is just about to be queued to
4512          * a controller driver when it's interrupt routine
4513          * freezes the queue.  To completly close the
4514          * hole, controller drives must check to see
4515          * if a ccb's status is still CAM_REQ_INPROG
4516          * under critical section protection just before they queue
4517          * the CCB.  See ahc_action/ahc_freeze_devq for
4518          * an example.
4519          */
4520         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4521         if (ccbh && ccbh->status == CAM_REQ_INPROG)
4522                 ccbh->status = CAM_REQUEUE_REQ;
4523         crit_exit();
4524         return (path->device->qfrozen_cnt);
4525 }
4526
4527 u_int32_t
4528 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4529 {
4530         if (sim->devq == NULL)
4531                 return(count);
4532         sim->devq->send_queue.qfrozen_cnt += count;
4533         if (sim->devq->active_dev != NULL) {
4534                 struct ccb_hdr *ccbh;
4535                 
4536                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4537                                   ccb_hdr_tailq);
4538                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4539                         ccbh->status = CAM_REQUEUE_REQ;
4540         }
4541         return (sim->devq->send_queue.qfrozen_cnt);
4542 }
4543
4544 /*
4545  * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4546  * We ref-count the sim (and the bus only NULLs it out when the bus has been
4547  * freed, which is not the case here), but the device queue is also freed XXX
4548  * and we have to check that here.
4549  *
4550  * XXX fixme: could we simply not null-out the device queue via 
4551  * cam_sim_free()?
4552  */
4553 static void
4554 xpt_release_devq_timeout(void *arg)
4555 {
4556         struct cam_ed *device;
4557
4558         device = (struct cam_ed *)arg;
4559
4560         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4561 }
4562
4563 void
4564 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4565 {
4566         xpt_release_devq_device(path->device, count, run_queue);
4567 }
4568
4569 static void
4570 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4571 {
4572         int     rundevq;
4573
4574         rundevq = 0;
4575         crit_enter();
4576
4577         if (dev->qfrozen_cnt > 0) {
4578
4579                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4580                 dev->qfrozen_cnt -= count;
4581                 if (dev->qfrozen_cnt == 0) {
4582
4583                         /*
4584                          * No longer need to wait for a successful
4585                          * command completion.
4586                          */
4587                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4588
4589                         /*
4590                          * Remove any timeouts that might be scheduled
4591                          * to release this queue.
4592                          */
4593                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4594                                 callout_stop(&dev->c_handle);
4595                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4596                         }
4597
4598                         /*
4599                          * Now that we are unfrozen schedule the
4600                          * device so any pending transactions are
4601                          * run.
4602                          */
4603                         if ((dev->ccbq.queue.entries > 0)
4604                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4605                          && (run_queue != 0)) {
4606                                 rundevq = 1;
4607                         }
4608                 }
4609         }
4610         if (rundevq != 0)
4611                 xpt_run_dev_sendq(dev->target->bus);
4612         crit_exit();
4613 }
4614
4615 void
4616 xpt_release_simq(struct cam_sim *sim, int run_queue)
4617 {
4618         struct  camq *sendq;
4619
4620         if (sim->devq == NULL)
4621                 return;
4622
4623         sendq = &(sim->devq->send_queue);
4624         crit_enter();
4625
4626         if (sendq->qfrozen_cnt > 0) {
4627                 sendq->qfrozen_cnt--;
4628                 if (sendq->qfrozen_cnt == 0) {
4629                         struct cam_eb *bus;
4630
4631                         /*
4632                          * If there is a timeout scheduled to release this
4633                          * sim queue, remove it.  The queue frozen count is
4634                          * already at 0.
4635                          */
4636                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4637                                 callout_stop(&sim->c_handle);
4638                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4639                         }
4640                         bus = xpt_find_bus(sim->path_id);
4641                         crit_exit();
4642
4643                         if (run_queue) {
4644                                 /*
4645                                  * Now that we are unfrozen run the send queue.
4646                                  */
4647                                 xpt_run_dev_sendq(bus);
4648                         }
4649                         xpt_release_bus(bus);
4650                 } else {
4651                         crit_exit();
4652                 }
4653         } else {
4654                 crit_exit();
4655         }
4656 }
4657
4658 void
4659 xpt_done(union ccb *done_ccb)
4660 {
4661         crit_enter();
4662
4663         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4664         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4665                 /*
4666                  * Queue up the request for handling by our SWI handler
4667                  * any of the "non-immediate" type of ccbs.
4668                  */
4669                 switch (done_ccb->ccb_h.path->periph->type) {
4670                 case CAM_PERIPH_BIO:
4671                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4672                                           sim_links.tqe);
4673                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4674                         setsoftcambio();
4675                         break;
4676                 case CAM_PERIPH_NET:
4677                         TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4678                                           sim_links.tqe);
4679                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4680                         setsoftcamnet();
4681                         break;
4682                 }
4683         }
4684         crit_exit();
4685 }
4686
4687 union ccb *
4688 xpt_alloc_ccb(void)
4689 {
4690         union ccb *new_ccb;
4691
4692         new_ccb = kmalloc(sizeof(*new_ccb), M_DEVBUF, M_INTWAIT);
4693         return (new_ccb);
4694 }
4695
4696 void
4697 xpt_free_ccb(union ccb *free_ccb)
4698 {
4699         kfree(free_ccb, M_DEVBUF);
4700 }
4701
4702
4703
4704 /* Private XPT functions */
4705
4706 /*
4707  * Get a CAM control block for the caller. Charge the structure to the device
4708  * referenced by the path.  If the this device has no 'credits' then the
4709  * device already has the maximum number of outstanding operations under way
4710  * and we return NULL. If we don't have sufficient resources to allocate more
4711  * ccbs, we also return NULL.
4712  */
4713 static union ccb *
4714 xpt_get_ccb(struct cam_ed *device)
4715 {
4716         union ccb *new_ccb;
4717
4718         crit_enter();
4719         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4720                 new_ccb = kmalloc(sizeof(*new_ccb), M_DEVBUF, M_INTWAIT);
4721                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4722                                   xpt_links.sle);
4723                 xpt_ccb_count++;
4724         }
4725         cam_ccbq_take_opening(&device->ccbq);
4726         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4727         crit_exit();
4728         return (new_ccb);
4729 }
4730
4731 static void
4732 xpt_release_bus(struct cam_eb *bus)
4733 {
4734
4735         crit_enter();
4736         if (bus->refcount == 1) {
4737                 KKASSERT(TAILQ_FIRST(&bus->et_entries) == NULL);
4738                 TAILQ_REMOVE(&xpt_busses, bus, links);
4739                 if (bus->sim) {
4740                         cam_sim_release(bus->sim, 0);
4741                         bus->sim = NULL;
4742                 }
4743                 bus_generation++;
4744                 KKASSERT(bus->refcount == 1);
4745                 kfree(bus, M_DEVBUF);
4746         } else {
4747                 --bus->refcount;
4748         }
4749         crit_exit();
4750 }
4751
4752 static struct cam_et *
4753 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4754 {
4755         struct cam_et *target;
4756         struct cam_et *cur_target;
4757
4758         target = kmalloc(sizeof(*target), M_DEVBUF, M_INTWAIT);
4759
4760         TAILQ_INIT(&target->ed_entries);
4761         target->bus = bus;
4762         target->target_id = target_id;
4763         target->refcount = 1;
4764         target->generation = 0;
4765         timevalclear(&target->last_reset);
4766         /*
4767          * Hold a reference to our parent bus so it
4768          * will not go away before we do.
4769          */
4770         bus->refcount++;
4771
4772         /* Insertion sort into our bus's target list */
4773         cur_target = TAILQ_FIRST(&bus->et_entries);
4774         while (cur_target != NULL && cur_target->target_id < target_id)
4775                 cur_target = TAILQ_NEXT(cur_target, links);
4776
4777         if (cur_target != NULL) {
4778                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4779         } else {
4780                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4781         }
4782         bus->generation++;
4783         return (target);
4784 }
4785
4786 static void
4787 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4788 {
4789         crit_enter();
4790         if (target->refcount == 1) {
4791                 KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
4792                 TAILQ_REMOVE(&bus->et_entries, target, links);
4793                 bus->generation++;
4794                 xpt_release_bus(bus);
4795                 KKASSERT(target->refcount == 1);
4796                 kfree(target, M_DEVBUF);
4797         } else {
4798                 --target->refcount;
4799         }
4800         crit_exit();
4801 }
4802
4803 static struct cam_ed *
4804 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4805 {
4806 #ifdef CAM_NEW_TRAN_CODE
4807         struct     cam_path path;
4808 #endif /* CAM_NEW_TRAN_CODE */
4809         struct     cam_ed *device;
4810         struct     cam_devq *devq;
4811         cam_status status;
4812
4813         /* Make space for us in the device queue on our bus */
4814         if (bus->sim->devq == NULL)
4815                 return(NULL);
4816         devq = bus->sim->devq;
4817         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4818
4819         if (status != CAM_REQ_CMP) {
4820                 device = NULL;
4821         } else {
4822                 device = kmalloc(sizeof(*device), M_DEVBUF, M_INTWAIT);
4823         }
4824
4825         if (device != NULL) {
4826                 struct cam_ed *cur_device;
4827
4828                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4829                 device->alloc_ccb_entry.device = device;
4830                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4831                 device->send_ccb_entry.device = device;
4832                 device->target = target;
4833                 device->lun_id = lun_id;
4834                 /* Initialize our queues */
4835                 if (camq_init(&device->drvq, 0) != 0) {
4836                         kfree(device, M_DEVBUF);
4837                         return (NULL);
4838                 }
4839                 if (cam_ccbq_init(&device->ccbq,
4840                                   bus->sim->max_dev_openings) != 0) {
4841                         camq_fini(&device->drvq);
4842                         kfree(device, M_DEVBUF);
4843                         return (NULL);
4844                 }
4845                 SLIST_INIT(&device->asyncs);
4846                 SLIST_INIT(&device->periphs);
4847                 device->generation = 0;
4848                 device->owner = NULL;
4849                 /*
4850                  * Take the default quirk entry until we have inquiry
4851                  * data and can determine a better quirk to use.
4852                  */
4853                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4854                 bzero(&device->inq_data, sizeof(device->inq_data));
4855                 device->inq_flags = 0;
4856                 device->queue_flags = 0;
4857                 device->serial_num = NULL;
4858                 device->serial_num_len = 0;
4859                 device->qfrozen_cnt = 0;
4860                 device->flags = CAM_DEV_UNCONFIGURED;
4861                 device->tag_delay_count = 0;
4862                 device->refcount = 1;
4863                 callout_init(&device->c_handle);
4864
4865                 /*
4866                  * Hold a reference to our parent target so it
4867                  * will not go away before we do.
4868                  */
4869                 target->refcount++;
4870
4871                 /*
4872                  * XXX should be limited by number of CCBs this bus can
4873                  * do.
4874                  */
4875                 xpt_max_ccbs += device->ccbq.devq_openings;
4876                 /* Insertion sort into our target's device list */
4877                 cur_device = TAILQ_FIRST(&target->ed_entries);
4878                 while (cur_device != NULL && cur_device->lun_id < lun_id)
4879                         cur_device = TAILQ_NEXT(cur_device, links);
4880                 if (cur_device != NULL) {
4881                         TAILQ_INSERT_BEFORE(cur_device, device, links);
4882                 } else {
4883                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4884                 }
4885                 target->generation++;
4886 #ifdef CAM_NEW_TRAN_CODE
4887                 if (lun_id != CAM_LUN_WILDCARD) {
4888                         xpt_compile_path(&path,
4889                                          NULL,
4890                                          bus->path_id,
4891                                          target->target_id,
4892                                          lun_id);
4893                         xpt_devise_transport(&path);
4894                         xpt_release_path(&path);
4895                 }
4896 #endif /* CAM_NEW_TRAN_CODE */
4897         }
4898         return (device);
4899 }
4900
4901 static void
4902 xpt_reference_device(struct cam_ed *device)
4903 {
4904         ++device->refcount;
4905 }
4906
4907 static void
4908 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4909                    struct cam_ed *device)
4910 {
4911         struct cam_devq *devq;
4912
4913         crit_enter();
4914         if (device->refcount == 1) {
4915                 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
4916
4917                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4918                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4919                         panic("Removing device while still queued for ccbs");
4920
4921                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4922                         device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4923                         callout_stop(&device->c_handle);
4924                 }
4925
4926                 TAILQ_REMOVE(&target->ed_entries, device,links);
4927                 target->generation++;
4928                 xpt_max_ccbs -= device->ccbq.devq_openings;
4929                 /* Release our slot in the devq */
4930                 devq = bus->sim->devq;
4931                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4932                 xpt_release_target(bus, target);
4933                 KKASSERT(device->refcount == 1);
4934                 kfree(device, M_DEVBUF);
4935         } else {
4936                 --device->refcount;
4937         }
4938         crit_exit();
4939 }
4940
4941 static u_int32_t
4942 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4943 {
4944         int     diff;
4945         int     result;
4946         struct  cam_ed *dev;
4947
4948         dev = path->device;
4949
4950         crit_enter();
4951
4952         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4953         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4954         if (result == CAM_REQ_CMP && (diff < 0)) {
4955                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4956         }
4957         /* Adjust the global limit */
4958         xpt_max_ccbs += diff;
4959         crit_exit();
4960         return (result);
4961 }
4962
4963 static struct cam_eb *
4964 xpt_find_bus(path_id_t path_id)
4965 {
4966         struct cam_eb *bus;
4967
4968         TAILQ_FOREACH(bus, &xpt_busses, links) {
4969                 if (bus->path_id == path_id) {
4970                         bus->refcount++;
4971                         break;
4972                 }
4973         }
4974         return (bus);
4975 }
4976
4977 static struct cam_et *
4978 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4979 {
4980         struct cam_et *target;
4981
4982         TAILQ_FOREACH(target, &bus->et_entries, links) {
4983                 if (target->target_id == target_id) {
4984                         target->refcount++;
4985                         break;
4986                 }
4987         }
4988         return (target);
4989 }
4990
4991 static struct cam_ed *
4992 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4993 {
4994         struct cam_ed *device;
4995
4996         TAILQ_FOREACH(device, &target->ed_entries, links) {
4997                 if (device->lun_id == lun_id) {
4998                         device->refcount++;
4999                         break;
5000                 }
5001         }
5002         return (device);
5003 }
5004
5005 typedef struct {
5006         union   ccb *request_ccb;
5007         struct  ccb_pathinq *cpi;
5008         int     pending_count;
5009 } xpt_scan_bus_info;
5010
5011 /*
5012  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5013  * As the scan progresses, xpt_scan_bus is used as the
5014  * callback on completion function.
5015  */
5016 static void
5017 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5018 {
5019         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5020                   ("xpt_scan_bus\n"));
5021         switch (request_ccb->ccb_h.func_code) {
5022         case XPT_SCAN_BUS:
5023         {
5024                 xpt_scan_bus_info *scan_info;
5025                 union   ccb *work_ccb;
5026                 struct  cam_path *path;
5027                 u_int   i;
5028                 u_int   max_target;
5029                 u_int   initiator_id;
5030
5031                 /* Find out the characteristics of the bus */
5032                 work_ccb = xpt_alloc_ccb();
5033                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5034                               request_ccb->ccb_h.pinfo.priority);
5035                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5036                 xpt_action(work_ccb);
5037                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5038                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5039                         xpt_free_ccb(work_ccb);
5040                         xpt_done(request_ccb);
5041                         return;
5042                 }
5043
5044                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5045                         /*
5046                          * Can't scan the bus on an adapter that
5047                          * cannot perform the initiator role.
5048                          */
5049                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5050                         xpt_free_ccb(work_ccb);
5051                         xpt_done(request_ccb);
5052                         return;
5053                 }
5054
5055                 /* Save some state for use while we probe for devices */
5056                 scan_info = (xpt_scan_bus_info *)
5057                     kmalloc(sizeof(xpt_scan_bus_info), M_TEMP, M_INTWAIT);
5058                 scan_info->request_ccb = request_ccb;
5059                 scan_info->cpi = &work_ccb->cpi;
5060
5061                 /* Cache on our stack so we can work asynchronously */
5062                 max_target = scan_info->cpi->max_target;
5063                 initiator_id = scan_info->cpi->initiator_id;
5064
5065                 /*
5066                  * Don't count the initiator if the
5067                  * initiator is addressable.
5068                  */
5069                 scan_info->pending_count = max_target + 1;
5070                 if (initiator_id <= max_target)
5071                         scan_info->pending_count--;
5072
5073                 for (i = 0; i <= max_target; i++) {
5074                         cam_status status;
5075                         if (i == initiator_id)
5076                                 continue;
5077
5078                         status = xpt_create_path(&path, xpt_periph,
5079                                                  request_ccb->ccb_h.path_id,
5080                                                  i, 0);
5081                         if (status != CAM_REQ_CMP) {
5082                                 kprintf("xpt_scan_bus: xpt_create_path failed"
5083                                        " with status %#x, bus scan halted\n",
5084                                        status);
5085                                 break;
5086                         }
5087                         work_ccb = xpt_alloc_ccb();
5088                         xpt_setup_ccb(&work_ccb->ccb_h, path,
5089                                       request_ccb->ccb_h.pinfo.priority);
5090                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5091                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5092                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5093                         work_ccb->crcn.flags = request_ccb->crcn.flags;
5094                         xpt_action(work_ccb);
5095                 }
5096                 break;
5097         }
5098         case XPT_SCAN_LUN:
5099         {
5100                 xpt_scan_bus_info *scan_info;
5101                 path_id_t path_id;
5102                 target_id_t target_id;
5103                 lun_id_t lun_id;
5104
5105                 /* Reuse the same CCB to query if a device was really found */
5106                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5107                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5108                               request_ccb->ccb_h.pinfo.priority);
5109                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5110
5111                 path_id = request_ccb->ccb_h.path_id;
5112                 target_id = request_ccb->ccb_h.target_id;
5113                 lun_id = request_ccb->ccb_h.target_lun;
5114                 xpt_action(request_ccb);
5115
5116                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5117                         struct cam_ed *device;
5118                         struct cam_et *target;
5119                         int phl;
5120
5121                         /*
5122                          * If we already probed lun 0 successfully, or
5123                          * we have additional configured luns on this
5124                          * target that might have "gone away", go onto
5125                          * the next lun.
5126                          */
5127                         target = request_ccb->ccb_h.path->target;
5128                         /*
5129                          * We may touch devices that we don't
5130                          * hold references too, so ensure they
5131                          * don't disappear out from under us.
5132                          * The target above is referenced by the
5133                          * path in the request ccb.
5134                          */
5135                         phl = 0;
5136                         crit_enter();
5137                         device = TAILQ_FIRST(&target->ed_entries);
5138                         if (device != NULL) {
5139                                 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
5140                                 if (device->lun_id == 0)
5141                                         device = TAILQ_NEXT(device, links);
5142                         }
5143                         crit_exit();
5144                         if ((lun_id != 0) || (device != NULL)) {
5145                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5146                                         lun_id++;
5147                         }
5148                 } else {
5149                         struct cam_ed *device;
5150                         
5151                         device = request_ccb->ccb_h.path->device;
5152
5153                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5154                                 /* Try the next lun */
5155                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
5156                                     (device->quirk->quirks & CAM_QUIRK_HILUNS))
5157                                         lun_id++;
5158                         }
5159                 }
5160
5161                 xpt_free_path(request_ccb->ccb_h.path);
5162
5163                 /* Check Bounds */
5164                 if ((lun_id == request_ccb->ccb_h.target_lun)
5165                  || lun_id > scan_info->cpi->max_lun) {
5166                         /* We're done */
5167
5168                         xpt_free_ccb(request_ccb);
5169                         scan_info->pending_count--;
5170                         if (scan_info->pending_count == 0) {
5171                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5172                                 request_ccb = scan_info->request_ccb;
5173                                 kfree(scan_info, M_TEMP);
5174                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
5175                                 xpt_done(request_ccb);
5176                         }
5177                 } else {
5178                         /* Try the next device */
5179                         struct cam_path *path;
5180                         cam_status status;
5181
5182                         path = request_ccb->ccb_h.path;
5183                         status = xpt_create_path(&path, xpt_periph,
5184                                                  path_id, target_id, lun_id);
5185                         if (status != CAM_REQ_CMP) {
5186                                 kprintf("xpt_scan_bus: xpt_create_path failed "
5187                                        "with status %#x, halting LUN scan\n",
5188                                        status);
5189                                 xpt_free_ccb(request_ccb);
5190                                 scan_info->pending_count--;
5191                                 if (scan_info->pending_count == 0) {
5192                                         xpt_free_ccb(
5193                                                 (union ccb *)scan_info->cpi);
5194                                         request_ccb = scan_info->request_ccb;
5195                                         kfree(scan_info, M_TEMP);
5196                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5197                                         xpt_done(request_ccb);
5198                                         break;
5199                                 }
5200                         }
5201                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5202                                       request_ccb->ccb_h.pinfo.priority);
5203                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5204                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5205                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5206                         request_ccb->crcn.flags =
5207                                 scan_info->request_ccb->crcn.flags;
5208                         xpt_action(request_ccb);
5209                 }
5210                 break;
5211         }
5212         default:
5213                 break;
5214         }
5215 }
5216
5217 typedef enum {
5218         PROBE_TUR,
5219         PROBE_INQUIRY,
5220         PROBE_FULL_INQUIRY,
5221         PROBE_MODE_SENSE,
5222         PROBE_SERIAL_NUM,
5223         PROBE_TUR_FOR_NEGOTIATION
5224 } probe_action;
5225
5226 typedef enum {
5227         PROBE_INQUIRY_CKSUM     = 0x01,
5228         PROBE_SERIAL_CKSUM      = 0x02,
5229         PROBE_NO_ANNOUNCE       = 0x04
5230 } probe_flags;
5231
5232 typedef struct {
5233         TAILQ_HEAD(, ccb_hdr) request_ccbs;
5234         probe_action    action;
5235         union ccb       saved_ccb;
5236         probe_flags     flags;
5237         MD5_CTX         context;
5238         u_int8_t        digest[16];
5239 } probe_softc;
5240
5241 static void
5242 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5243              cam_flags flags, union ccb *request_ccb)
5244 {
5245         struct ccb_pathinq cpi;
5246         cam_status status;
5247         struct cam_path *new_path;
5248         struct cam_periph *old_periph;
5249         
5250         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5251                   ("xpt_scan_lun\n"));
5252         
5253         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5254         cpi.ccb_h.func_code = XPT_PATH_INQ;
5255         xpt_action((union ccb *)&cpi);
5256
5257         if (cpi.ccb_h.status != CAM_REQ_CMP) {
5258                 if (request_ccb != NULL) {
5259                         request_ccb->ccb_h.status = cpi.ccb_h.status;
5260                         xpt_done(request_ccb);
5261                 }
5262                 return;
5263         }
5264
5265         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5266                 /*
5267                  * Can't scan the bus on an adapter that
5268                  * cannot perform the initiator role.
5269                  */
5270                 if (request_ccb != NULL) {
5271                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5272                         xpt_done(request_ccb);
5273                 }
5274                 return;
5275         }
5276
5277         if (request_ccb == NULL) {
5278                 request_ccb = kmalloc(sizeof(union ccb), M_TEMP, M_INTWAIT);
5279                 new_path = kmalloc(sizeof(*new_path), M_TEMP, M_INTWAIT);
5280                 status = xpt_compile_path(new_path, xpt_periph,
5281                                           path->bus->path_id,
5282                                           path->target->target_id,
5283                                           path->device->lun_id);
5284
5285                 if (status != CAM_REQ_CMP) {
5286                         xpt_print_path(path);
5287                         kprintf("xpt_scan_lun: can't compile path, can't "
5288                                "continue\n");
5289                         kfree(request_ccb, M_TEMP);
5290                         kfree(new_path, M_TEMP);
5291                         return;
5292                 }
5293                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5294                 request_ccb->ccb_h.cbfcnp = xptscandone;
5295                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5296                 request_ccb->crcn.flags = flags;
5297         }
5298
5299         crit_enter();
5300         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5301                 probe_softc *softc;
5302
5303                 softc = (probe_softc *)old_periph->softc;
5304                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5305                                   periph_links.tqe);
5306         } else {
5307                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5308                                           probestart, "probe",
5309                                           CAM_PERIPH_BIO,
5310                                           request_ccb->ccb_h.path, NULL, 0,
5311                                           request_ccb);
5312
5313                 if (status != CAM_REQ_CMP) {
5314                         xpt_print_path(path);
5315                         kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5316                                "error, can't continue probe\n");
5317                         request_ccb->ccb_h.status = status;
5318                         xpt_done(request_ccb);
5319                 }
5320         }
5321         crit_exit();
5322 }
5323
5324 static void
5325 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5326 {
5327         xpt_release_path(done_ccb->ccb_h.path);
5328         kfree(done_ccb->ccb_h.path, M_TEMP);
5329         kfree(done_ccb, M_TEMP);
5330 }
5331
5332 static cam_status
5333 proberegister(struct cam_periph *periph, void *arg)
5334 {
5335         union ccb *request_ccb; /* CCB representing the probe request */
5336         probe_softc *softc;
5337
5338         request_ccb = (union ccb *)arg;
5339         if (periph == NULL) {
5340                 kprintf("proberegister: periph was NULL!!\n");
5341                 return(CAM_REQ_CMP_ERR);
5342         }
5343
5344         if (request_ccb == NULL) {
5345                 kprintf("proberegister: no probe CCB, "
5346                        "can't register device\n");
5347                 return(CAM_REQ_CMP_ERR);
5348         }
5349
5350         softc = kmalloc(sizeof(*softc), M_TEMP, M_INTWAIT | M_ZERO);
5351         TAILQ_INIT(&softc->request_ccbs);
5352         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5353                           periph_links.tqe);
5354         softc->flags = 0;
5355         periph->softc = softc;
5356         cam_periph_acquire(periph);
5357         /*
5358          * Ensure we've waited at least a bus settle
5359          * delay before attempting to probe the device.
5360          * For HBAs that don't do bus resets, this won't make a difference.
5361          */
5362         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5363                                       scsi_delay);
5364         probeschedule(periph);
5365         return(CAM_REQ_CMP);
5366 }
5367
5368 static void
5369 probeschedule(struct cam_periph *periph)
5370 {
5371         struct ccb_pathinq cpi;
5372         union ccb *ccb;
5373         probe_softc *softc;
5374
5375         softc = (probe_softc *)periph->softc;
5376         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5377
5378         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5379         cpi.ccb_h.func_code = XPT_PATH_INQ;
5380         xpt_action((union ccb *)&cpi);
5381
5382         /*
5383          * If a device has gone away and another device, or the same one,
5384          * is back in the same place, it should have a unit attention
5385          * condition pending.  It will not report the unit attention in
5386          * response to an inquiry, which may leave invalid transfer
5387          * negotiations in effect.  The TUR will reveal the unit attention
5388          * condition.  Only send the TUR for lun 0, since some devices 
5389          * will get confused by commands other than inquiry to non-existent
5390          * luns.  If you think a device has gone away start your scan from
5391          * lun 0.  This will insure that any bogus transfer settings are
5392          * invalidated.
5393          *
5394          * If we haven't seen the device before and the controller supports
5395          * some kind of transfer negotiation, negotiate with the first
5396          * sent command if no bus reset was performed at startup.  This
5397          * ensures that the device is not confused by transfer negotiation
5398          * settings left over by loader or BIOS action.
5399          */
5400         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5401          && (ccb->ccb_h.target_lun == 0)) {
5402                 softc->action = PROBE_TUR;
5403         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5404               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5405                 proberequestdefaultnegotiation(periph);
5406                 softc->action = PROBE_INQUIRY;
5407         } else {
5408                 softc->action = PROBE_INQUIRY;
5409         }
5410
5411         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5412                 softc->flags |= PROBE_NO_ANNOUNCE;
5413         else
5414                 softc->flags &= ~PROBE_NO_ANNOUNCE;
5415
5416         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5417 }
5418
5419 static void
5420 probestart(struct cam_periph *periph, union ccb *start_ccb)
5421 {
5422         /* Probe the device that our peripheral driver points to */
5423         struct ccb_scsiio *csio;
5424         probe_softc *softc;
5425
5426         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5427
5428         softc = (probe_softc *)periph->softc;
5429         csio = &start_ccb->csio;
5430
5431         switch (softc->action) {
5432         case PROBE_TUR:
5433         case PROBE_TUR_FOR_NEGOTIATION:
5434         {
5435                 scsi_test_unit_ready(csio,
5436                                      /*retries*/4,
5437                                      probedone,
5438                                      MSG_SIMPLE_Q_TAG,
5439                                      SSD_FULL_SIZE,
5440                                      /*timeout*/60000);
5441                 break;
5442         }
5443         case PROBE_INQUIRY:
5444         case PROBE_FULL_INQUIRY:
5445         {
5446                 u_int inquiry_len;
5447                 struct scsi_inquiry_data *inq_buf;
5448
5449                 inq_buf = &periph->path->device->inq_data;
5450                 /*
5451                  * If the device is currently configured, we calculate an
5452                  * MD5 checksum of the inquiry data, and if the serial number
5453                  * length is greater than 0, add the serial number data
5454                  * into the checksum as well.  Once the inquiry and the
5455                  * serial number check finish, we attempt to figure out
5456                  * whether we still have the same device.
5457                  */
5458                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5459                         
5460                         MD5Init(&softc->context);
5461                         MD5Update(&softc->context, (unsigned char *)inq_buf,
5462                                   sizeof(struct scsi_inquiry_data));
5463                         softc->flags |= PROBE_INQUIRY_CKSUM;
5464                         if (periph->path->device->serial_num_len > 0) {
5465                                 MD5Update(&softc->context,
5466                                           periph->path->device->serial_num,
5467                                           periph->path->device->serial_num_len);
5468                                 softc->flags |= PROBE_SERIAL_CKSUM;
5469                         }
5470                         MD5Final(softc->digest, &softc->context);
5471                 } 
5472
5473                 if (softc->action == PROBE_INQUIRY)
5474                         inquiry_len = SHORT_INQUIRY_LENGTH;
5475                 else
5476                         inquiry_len = inq_buf->additional_length + 5;
5477         
5478                 scsi_inquiry(csio,
5479                              /*retries*/4,
5480                              probedone,
5481                              MSG_SIMPLE_Q_TAG,
5482                              (u_int8_t *)inq_buf,
5483                              inquiry_len,
5484                              /*evpd*/FALSE,
5485                              /*page_code*/0,
5486                              SSD_MIN_SIZE,
5487                              /*timeout*/60 * 1000);
5488                 break;
5489         }
5490         case PROBE_MODE_SENSE:
5491         {
5492                 void  *mode_buf;
5493                 int    mode_buf_len;
5494
5495                 mode_buf_len = sizeof(struct scsi_mode_header_6)
5496                              + sizeof(struct scsi_mode_blk_desc)
5497                              + sizeof(struct scsi_control_page);
5498                 mode_buf = kmalloc(mode_buf_len, M_TEMP, M_INTWAIT);
5499                 scsi_mode_sense(csio,
5500                                 /*retries*/4,
5501                                 probedone,
5502                                 MSG_SIMPLE_Q_TAG,
5503                                 /*dbd*/FALSE,
5504                                 SMS_PAGE_CTRL_CURRENT,
5505                                 SMS_CONTROL_MODE_PAGE,
5506                                 mode_buf,
5507                                 mode_buf_len,
5508                                 SSD_FULL_SIZE,
5509                                 /*timeout*/60000);
5510                 break;
5511         }
5512         case PROBE_SERIAL_NUM:
5513         {
5514                 struct scsi_vpd_unit_serial_number *serial_buf;
5515                 struct cam_ed* device;
5516
5517                 serial_buf = NULL;
5518                 device = periph->path->device;
5519                 device->serial_num = NULL;
5520                 device->serial_num_len = 0;
5521
5522                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5523                         serial_buf = kmalloc(sizeof(*serial_buf), M_TEMP,
5524                                             M_INTWAIT | M_ZERO);
5525                         scsi_inquiry(csio,
5526                                      /*retries*/4,
5527                                      probedone,
5528                                      MSG_SIMPLE_Q_TAG,
5529                                      (u_int8_t *)serial_buf,
5530                                      sizeof(*serial_buf),
5531                                      /*evpd*/TRUE,
5532                                      SVPD_UNIT_SERIAL_NUMBER,
5533                                      SSD_MIN_SIZE,
5534                                      /*timeout*/60 * 1000);
5535                         break;
5536                 }
5537                 /*
5538                  * We'll have to do without, let our probedone
5539                  * routine finish up for us.
5540                  */
5541                 start_ccb->csio.data_ptr = NULL;
5542                 probedone(periph, start_ccb);
5543                 return;
5544         }
5545         }
5546         xpt_action(start_ccb);
5547 }
5548
5549 static void
5550 proberequestdefaultnegotiation(struct cam_periph *periph)
5551 {
5552         struct ccb_trans_settings cts;
5553
5554         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5555         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5556 #ifdef CAM_NEW_TRAN_CODE
5557         cts.type = CTS_TYPE_USER_SETTINGS;
5558 #else /* CAM_NEW_TRAN_CODE */
5559         cts.flags = CCB_TRANS_USER_SETTINGS;
5560 #endif /* CAM_NEW_TRAN_CODE */
5561         xpt_action((union ccb *)&cts);
5562         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5563 #ifdef CAM_NEW_TRAN_CODE
5564         cts.type = CTS_TYPE_CURRENT_SETTINGS;
5565 #else /* CAM_NEW_TRAN_CODE */
5566         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5567         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5568 #endif /* CAM_NEW_TRAN_CODE */
5569         xpt_action((union ccb *)&cts);
5570 }
5571
5572 static void
5573 probedone(struct cam_periph *periph, union ccb *done_ccb)
5574 {
5575         probe_softc *softc;
5576         struct cam_path *path;
5577         u_int32_t  priority;
5578
5579         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5580
5581         softc = (probe_softc *)periph->softc;
5582         path = done_ccb->ccb_h.path;
5583         priority = done_ccb->ccb_h.pinfo.priority;
5584
5585         switch (softc->action) {
5586         case PROBE_TUR:
5587         {
5588                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5589
5590                         if (cam_periph_error(done_ccb, 0,
5591                                              SF_NO_PRINT, NULL) == ERESTART)
5592                                 return;
5593                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5594                                 /* Don't wedge the queue */
5595                                 xpt_release_devq(done_ccb->ccb_h.path,
5596                                                  /*count*/1,
5597                                                  /*run_queue*/TRUE);
5598                 }
5599                 softc->action = PROBE_INQUIRY;
5600                 xpt_release_ccb(done_ccb);
5601                 xpt_schedule(periph, priority);
5602                 return;
5603         }
5604         case PROBE_INQUIRY:
5605         case PROBE_FULL_INQUIRY:
5606         {
5607                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5608                         struct scsi_inquiry_data *inq_buf;
5609                         u_int8_t periph_qual;
5610
5611                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5612                         inq_buf = &path->device->inq_data;
5613
5614                         periph_qual = SID_QUAL(inq_buf);
5615                         
5616                         switch(periph_qual) {
5617                         case SID_QUAL_LU_CONNECTED:
5618                         {
5619                                 u_int8_t alen;
5620
5621                                 /*
5622                                  * We conservatively request only
5623                                  * SHORT_INQUIRY_LEN bytes of inquiry
5624                                  * information during our first try
5625                                  * at sending an INQUIRY. If the device
5626                                  * has more information to give,
5627                                  * perform a second request specifying
5628                                  * the amount of information the device
5629                                  * is willing to give.
5630                                  */
5631                                 alen = inq_buf->additional_length;
5632                                 if (softc->action == PROBE_INQUIRY
5633                                  && alen > (SHORT_INQUIRY_LENGTH - 5)) {
5634                                         softc->action = PROBE_FULL_INQUIRY;
5635                                         xpt_release_ccb(done_ccb);
5636                                         xpt_schedule(periph, priority);
5637                                         return;
5638                                 }
5639
5640                                 xpt_find_quirk(path->device);
5641
5642 #ifdef CAM_NEW_TRAN_CODE
5643                                 xpt_devise_transport(path);
5644 #endif /* CAM_NEW_TRAN_CODE */
5645                                 if ((inq_buf->flags & SID_CmdQue) != 0)
5646                                         softc->action = PROBE_MODE_SENSE;
5647                                 else
5648                                         softc->action = PROBE_SERIAL_NUM;
5649
5650                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5651                                 xpt_reference_device(path->device);
5652
5653                                 xpt_release_ccb(done_ccb);
5654                                 xpt_schedule(periph, priority);
5655                                 return;
5656                         }
5657                         default:
5658                                 break;
5659                         }
5660                 } else if (cam_periph_error(done_ccb, 0,
5661                                             done_ccb->ccb_h.target_lun > 0
5662                                             ? SF_RETRY_UA|SF_QUIET_IR
5663                                             : SF_RETRY_UA,
5664                                             &softc->saved_ccb) == ERESTART) {
5665                         return;
5666                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5667                         /* Don't wedge the queue */
5668                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5669                                          /*run_queue*/TRUE);
5670                 }
5671                 /*
5672                  * If we get to this point, we got an error status back
5673                  * from the inquiry and the error status doesn't require
5674                  * automatically retrying the command.  Therefore, the
5675                  * inquiry failed.  If we had inquiry information before
5676                  * for this device, but this latest inquiry command failed,
5677                  * the device has probably gone away.  If this device isn't
5678                  * already marked unconfigured, notify the peripheral
5679                  * drivers that this device is no more.
5680                  */
5681                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5682                         /* Send the async notification. */
5683                         xpt_async(AC_LOST_DEVICE, path, NULL);
5684                 }
5685
5686                 xpt_release_ccb(done_ccb);
5687                 break;
5688         }
5689         case PROBE_MODE_SENSE:
5690         {
5691                 struct ccb_scsiio *csio;
5692                 struct scsi_mode_header_6 *mode_hdr;
5693
5694                 csio = &done_ccb->csio;
5695                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5696                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5697                         struct scsi_control_page *page;
5698                         u_int8_t *offset;
5699
5700                         offset = ((u_int8_t *)&mode_hdr[1])
5701                             + mode_hdr->blk_desc_len;
5702                         page = (struct scsi_control_page *)offset;
5703                         path->device->queue_flags = page->queue_flags;
5704                 } else if (cam_periph_error(done_ccb, 0,
5705                                             SF_RETRY_UA|SF_NO_PRINT,
5706                                             &softc->saved_ccb) == ERESTART) {
5707                         return;
5708                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5709                         /* Don't wedge the queue */
5710                         xpt_release_devq(done_ccb->ccb_h.path,
5711                                          /*count*/1, /*run_queue*/TRUE);
5712                 }
5713                 xpt_release_ccb(done_ccb);
5714                 kfree(mode_hdr, M_TEMP);
5715                 softc->action = PROBE_SERIAL_NUM;
5716                 xpt_schedule(periph, priority);
5717                 return;
5718         }
5719         case PROBE_SERIAL_NUM:
5720         {
5721                 struct ccb_scsiio *csio;
5722                 struct scsi_vpd_unit_serial_number *serial_buf;
5723                 u_int32_t  priority;
5724                 int changed;
5725                 int have_serialnum;
5726
5727                 changed = 1;
5728                 have_serialnum = 0;
5729                 csio = &done_ccb->csio;
5730                 priority = done_ccb->ccb_h.pinfo.priority;
5731                 serial_buf =
5732                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5733
5734                 /* Clean up from previous instance of this device */
5735                 if (path->device->serial_num != NULL) {
5736                         kfree(path->device->serial_num, M_DEVBUF);
5737                         path->device->serial_num = NULL;
5738                         path->device->serial_num_len = 0;
5739                 }
5740
5741                 if (serial_buf == NULL) {
5742                         /*
5743                          * Don't process the command as it was never sent
5744                          */
5745                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5746                         && (serial_buf->length > 0)) {
5747
5748                         have_serialnum = 1;
5749                         path->device->serial_num =
5750                                 kmalloc((serial_buf->length + 1),
5751                                        M_DEVBUF, M_INTWAIT);
5752                         bcopy(serial_buf->serial_num,
5753                               path->device->serial_num,
5754                               serial_buf->length);
5755                         path->device->serial_num_len = serial_buf->length;
5756                         path->device->serial_num[serial_buf->length] = '\0';
5757                 } else if (cam_periph_error(done_ccb, 0,
5758                                             SF_RETRY_UA|SF_NO_PRINT,
5759                                             &softc->saved_ccb) == ERESTART) {
5760                         return;
5761                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5762                         /* Don't wedge the queue */
5763                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5764                                          /*run_queue*/TRUE);
5765                 }
5766                 
5767                 /*
5768                  * Let's see if we have seen this device before.
5769                  */
5770                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5771                         MD5_CTX context;
5772                         u_int8_t digest[16];
5773
5774                         MD5Init(&context);
5775                         
5776                         MD5Update(&context,
5777                                   (unsigned char *)&path->device->inq_data,
5778                                   sizeof(struct scsi_inquiry_data));
5779
5780                         if (have_serialnum)
5781                                 MD5Update(&context, serial_buf->serial_num,
5782                                           serial_buf->length);
5783
5784                         MD5Final(digest, &context);
5785                         if (bcmp(softc->digest, digest, 16) == 0)
5786                                 changed = 0;
5787
5788                         /*
5789                          * XXX Do we need to do a TUR in order to ensure
5790                          *     that the device really hasn't changed???
5791                          */
5792                         if ((changed != 0)
5793                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5794                                 xpt_async(AC_LOST_DEVICE, path, NULL);
5795                 }
5796                 if (serial_buf != NULL)
5797                         kfree(serial_buf, M_TEMP);
5798
5799                 if (changed != 0) {
5800                         /*
5801                          * Now that we have all the necessary
5802                          * information to safely perform transfer
5803                          * negotiations... Controllers don't perform
5804                          * any negotiation or tagged queuing until
5805                          * after the first XPT_SET_TRAN_SETTINGS ccb is
5806                          * received.  So, on a new device, just retreive
5807                          * the user settings, and set them as the current
5808                          * settings to set the device up.
5809                          */
5810                         proberequestdefaultnegotiation(periph);
5811                         xpt_release_ccb(done_ccb);
5812
5813                         /*
5814                          * Perform a TUR to allow the controller to
5815                          * perform any necessary transfer negotiation.
5816                          */
5817                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
5818                         xpt_schedule(periph, priority);
5819                         return;
5820                 }
5821                 xpt_release_ccb(done_ccb);
5822                 break;
5823         }
5824         case PROBE_TUR_FOR_NEGOTIATION:
5825                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5826                         /* Don't wedge the queue */
5827                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5828                                          /*run_queue*/TRUE);
5829                 }
5830
5831                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5832                 xpt_reference_device(path->device);
5833
5834                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5835                         /* Inform the XPT that a new device has been found */
5836                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5837                         xpt_action(done_ccb);
5838
5839                         xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5840                 }
5841                 xpt_release_ccb(done_ccb);
5842                 break;
5843         }
5844         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5845         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5846         done_ccb->ccb_h.status = CAM_REQ_CMP;
5847         xpt_done(done_ccb);
5848         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5849                 cam_periph_invalidate(periph);
5850                 cam_periph_release(periph);
5851         } else {
5852                 probeschedule(periph);
5853         }
5854 }
5855
5856 static void
5857 probecleanup(struct cam_periph *periph)
5858 {
5859         kfree(periph->softc, M_TEMP);
5860 }
5861
5862 static void
5863 xpt_find_quirk(struct cam_ed *device)
5864 {
5865         caddr_t match;
5866
5867         match = cam_quirkmatch((caddr_t)&device->inq_data,
5868                                (caddr_t)xpt_quirk_table,
5869                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5870                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
5871
5872         if (match == NULL)
5873                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
5874
5875         device->quirk = (struct xpt_quirk_entry *)match;
5876 }
5877
5878 #ifdef CAM_NEW_TRAN_CODE
5879
5880 static void
5881 xpt_devise_transport(struct cam_path *path)
5882 {
5883         struct ccb_pathinq cpi;
5884         struct ccb_trans_settings cts;
5885         struct scsi_inquiry_data *inq_buf;
5886
5887         /* Get transport information from the SIM */
5888         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5889         cpi.ccb_h.func_code = XPT_PATH_INQ;
5890         xpt_action((union ccb *)&cpi);
5891
5892         inq_buf = NULL;
5893         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
5894                 inq_buf = &path->device->inq_data;
5895         path->device->protocol = PROTO_SCSI;
5896         path->device->protocol_version =
5897             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
5898         path->device->transport = cpi.transport;
5899         path->device->transport_version = cpi.transport_version;
5900
5901         /*
5902          * Any device not using SPI3 features should
5903          * be considered SPI2 or lower.
5904          */
5905         if (inq_buf != NULL) {
5906                 if (path->device->transport == XPORT_SPI
5907                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
5908                  && path->device->transport_version > 2)
5909                         path->device->transport_version = 2;
5910         } else {
5911                 struct cam_ed* otherdev;
5912
5913                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
5914                      otherdev != NULL;
5915                      otherdev = TAILQ_NEXT(otherdev, links)) {
5916                         if (otherdev != path->device)
5917                                 break;
5918                 }
5919
5920                 if (otherdev != NULL) {
5921                         /*
5922                          * Initially assume the same versioning as
5923                          * prior luns for this target.
5924                          */
5925                         path->device->protocol_version =
5926                             otherdev->protocol_version;
5927                         path->device->transport_version =
5928                             otherdev->transport_version;
5929                 } else {
5930                         /* Until we know better, opt for safty */
5931                         path->device->protocol_version = 2;
5932                         if (path->device->transport == XPORT_SPI)
5933                                 path->device->transport_version = 2;
5934                         else
5935                                 path->device->transport_version = 0;
5936                 }
5937         }
5938
5939         /*
5940          * XXX
5941          * For a device compliant with SPC-2 we should be able
5942          * to determine the transport version supported by
5943          * scrutinizing the version descriptors in the
5944          * inquiry buffer.
5945          */
5946
5947         /* Tell the controller what we think */
5948         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
5949         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5950         cts.type = CTS_TYPE_CURRENT_SETTINGS;
5951         cts.transport = path->device->transport;
5952         cts.transport_version = path->device->transport_version;
5953         cts.protocol = path->device->protocol;
5954         cts.protocol_version = path->device->protocol_version;
5955         cts.proto_specific.valid = 0;
5956         cts.xport_specific.valid = 0;
5957         xpt_action((union ccb *)&cts);
5958 }
5959
5960 static void
5961 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5962                           int async_update)
5963 {
5964         struct  ccb_pathinq cpi;
5965         struct  ccb_trans_settings cur_cts;
5966         struct  ccb_trans_settings_scsi *scsi;
5967         struct  ccb_trans_settings_scsi *cur_scsi;
5968         struct  cam_sim *sim;
5969         struct  scsi_inquiry_data *inq_data;
5970
5971         if (device == NULL) {
5972                 cts->ccb_h.status = CAM_PATH_INVALID;
5973                 xpt_done((union ccb *)cts);
5974                 return;
5975         }
5976
5977         if (cts->protocol == PROTO_UNKNOWN
5978          || cts->protocol == PROTO_UNSPECIFIED) {
5979                 cts->protocol = device->protocol;
5980                 cts->protocol_version = device->protocol_version;
5981         }
5982
5983         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
5984          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
5985                 cts->protocol_version = device->protocol_version;
5986
5987         if (cts->protocol != device->protocol) {
5988                 xpt_print_path(cts->ccb_h.path);
5989                 printf("Uninitialized Protocol %x:%x?\n",
5990                        cts->protocol, device->protocol);
5991                 cts->protocol = device->protocol;
5992         }
5993
5994         if (cts->protocol_version > device->protocol_version) {
5995                 if (bootverbose) {
5996                         xpt_print_path(cts->ccb_h.path);
5997                         printf("Down reving Protocol Version from %d to %d?\n",
5998                                cts->protocol_version, device->protocol_version);
5999                 }
6000                 cts->protocol_version = device->protocol_version;
6001         }
6002
6003         if (cts->transport == XPORT_UNKNOWN
6004          || cts->transport == XPORT_UNSPECIFIED) {
6005                 cts->transport = device->transport;
6006                 cts->transport_version = device->transport_version;
6007         }
6008
6009         if (cts->transport_version == XPORT_VERSION_UNKNOWN
6010          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6011                 cts->transport_version = device->transport_version;
6012
6013         if (cts->transport != device->transport) {
6014                 xpt_print_path(cts->ccb_h.path);
6015                 printf("Uninitialized Transport %x:%x?\n",
6016                        cts->transport, device->transport);
6017                 cts->transport = device->transport;
6018         }
6019
6020         if (cts->transport_version > device->transport_version) {
6021                 if (bootverbose) {
6022                         xpt_print_path(cts->ccb_h.path);
6023                         printf("Down reving Transport Version from %d to %d?\n",
6024                                cts->transport_version,
6025                                device->transport_version);
6026                 }
6027                 cts->transport_version = device->transport_version;
6028         }
6029
6030         sim = cts->ccb_h.path->bus->sim;
6031
6032         /*
6033          * Nothing more of interest to do unless
6034          * this is a device connected via the
6035          * SCSI protocol.
6036          */
6037         if (cts->protocol != PROTO_SCSI) {
6038                 if (async_update == FALSE)
6039                         (*(sim->sim_action))(sim, (union ccb *)cts);
6040                 return;
6041         }
6042
6043         inq_data = &device->inq_data;
6044         scsi = &cts->proto_specific.scsi;
6045         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6046         cpi.ccb_h.func_code = XPT_PATH_INQ;
6047         xpt_action((union ccb *)&cpi);
6048
6049         /* SCSI specific sanity checking */
6050         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6051          || (inq_data->flags & SID_CmdQue) == 0
6052          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6053          || (device->quirk->mintags == 0)) {
6054                 /*
6055                  * Can't tag on hardware that doesn't support tags,
6056                  * doesn't have it enabled, or has broken tag support.
6057                  */
6058                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6059         }
6060
6061         if (async_update == FALSE) {
6062                 /*
6063                  * Perform sanity checking against what the
6064                  * controller and device can do.
6065                  */
6066                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6067                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6068                 cur_cts.type = cts->type;
6069                 xpt_action((union ccb *)&cur_cts);
6070
6071                 cur_scsi = &cur_cts.proto_specific.scsi;
6072                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6073                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6074                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6075                 }
6076                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6077                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6078         }
6079
6080         /* SPI specific sanity checking */
6081         if (cts->transport == XPORT_SPI && async_update == FALSE) {
6082                 u_int spi3caps;
6083                 struct ccb_trans_settings_spi *spi;
6084                 struct ccb_trans_settings_spi *cur_spi;
6085
6086                 spi = &cts->xport_specific.spi;
6087
6088                 cur_spi = &cur_cts.xport_specific.spi;
6089
6090                 /* Fill in any gaps in what the user gave us */
6091                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6092                         spi->sync_period = cur_spi->sync_period;
6093                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6094                         spi->sync_period = 0;
6095                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6096                         spi->sync_offset = cur_spi->sync_offset;
6097                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6098                         spi->sync_offset = 0;
6099                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6100                         spi->ppr_options = cur_spi->ppr_options;
6101                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6102                         spi->ppr_options = 0;
6103                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6104                         spi->bus_width = cur_spi->bus_width;
6105                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6106                         spi->bus_width = 0;
6107                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6108                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6109                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6110                 }
6111                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6112                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6113                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6114                   && (inq_data->flags & SID_Sync) == 0
6115                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6116                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6117                  || (cur_spi->sync_offset == 0)
6118                  || (cur_spi->sync_period == 0)) {
6119                         /* Force async */
6120                         spi->sync_period = 0;
6121                         spi->sync_offset = 0;
6122                 }
6123
6124                 switch (spi->bus_width) {
6125                 case MSG_EXT_WDTR_BUS_32_BIT:
6126                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6127                           || (inq_data->flags & SID_WBus32) != 0
6128                           || cts->type == CTS_TYPE_USER_SETTINGS)
6129                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6130                                 break;
6131                         /* Fall Through to 16-bit */
6132                 case MSG_EXT_WDTR_BUS_16_BIT:
6133                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6134                           || (inq_data->flags & SID_WBus16) != 0
6135                           || cts->type == CTS_TYPE_USER_SETTINGS)
6136                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6137                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6138                                 break;
6139                         }
6140                         /* Fall Through to 8-bit */
6141                 default: /* New bus width?? */
6142                 case MSG_EXT_WDTR_BUS_8_BIT:
6143                         /* All targets can do this */
6144                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6145                         break;
6146                 }
6147
6148                 spi3caps = cpi.xport_specific.spi.ppr_options;
6149                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6150                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6151                         spi3caps &= inq_data->spi3data;
6152
6153                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6154                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6155
6156                 if ((spi3caps & SID_SPI_IUS) == 0)
6157                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6158
6159                 if ((spi3caps & SID_SPI_QAS) == 0)
6160                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6161
6162                 /* No SPI Transfer settings are allowed unless we are wide */
6163                 if (spi->bus_width == 0)
6164                         spi->ppr_options = 0;
6165
6166                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6167                         /*
6168                          * Can't tag queue without disconnection.
6169                          */
6170                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6171                         scsi->valid |= CTS_SCSI_VALID_TQ;
6172                 }
6173
6174                 /*
6175                  * If we are currently performing tagged transactions to
6176                  * this device and want to change its negotiation parameters,
6177                  * go non-tagged for a bit to give the controller a chance to
6178                  * negotiate unhampered by tag messages.
6179                  */
6180                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6181                  && (device->inq_flags & SID_CmdQue) != 0
6182                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6183                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6184                                    CTS_SPI_VALID_SYNC_OFFSET|
6185                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
6186                         xpt_toggle_tags(cts->ccb_h.path);
6187         }
6188
6189         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6190          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6191                 int device_tagenb;
6192
6193                 /*
6194                  * If we are transitioning from tags to no-tags or
6195                  * vice-versa, we need to carefully freeze and restart
6196                  * the queue so that we don't overlap tagged and non-tagged
6197                  * commands.  We also temporarily stop tags if there is
6198                  * a change in transfer negotiation settings to allow
6199                  * "tag-less" negotiation.
6200                  */
6201                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6202                  || (device->inq_flags & SID_CmdQue) != 0)
6203                         device_tagenb = TRUE;
6204                 else
6205                         device_tagenb = FALSE;
6206
6207                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6208                   && device_tagenb == FALSE)
6209                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6210                   && device_tagenb == TRUE)) {
6211
6212                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6213                                 /*
6214                                  * Delay change to use tags until after a
6215                                  * few commands have gone to this device so
6216                                  * the controller has time to perform transfer
6217                                  * negotiations without tagged messages getting
6218                                  * in the way.
6219                                  */
6220                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6221                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6222                         } else {
6223                                 struct ccb_relsim crs;
6224
6225                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6226                                 device->inq_flags &= ~SID_CmdQue;
6227                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6228                                                     sim->max_dev_openings);
6229                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6230                                 device->tag_delay_count = 0;
6231
6232                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6233                                               /*priority*/1);
6234                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6235                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6236                                 crs.openings
6237                                     = crs.release_timeout
6238                                     = crs.qfrozen_cnt
6239                                     = 0;
6240                                 xpt_action((union ccb *)&crs);
6241                         }
6242                 }
6243         }
6244         if (async_update == FALSE)
6245                 (*(sim->sim_action))(sim, (union ccb *)cts);
6246 }
6247
6248 #else /* CAM_NEW_TRAN_CODE */
6249
6250 static void
6251 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6252                           int async_update)
6253 {
6254         struct  cam_sim *sim;
6255         int     qfrozen;
6256
6257         sim = cts->ccb_h.path->bus->sim;
6258         if (async_update == FALSE) {
6259                 struct  scsi_inquiry_data *inq_data;
6260                 struct  ccb_pathinq cpi;
6261                 struct  ccb_trans_settings cur_cts;
6262
6263                 if (device == NULL) {
6264                         cts->ccb_h.status = CAM_PATH_INVALID;
6265                         xpt_done((union ccb *)cts);
6266                         return;
6267                 }
6268
6269                 /*
6270                  * Perform sanity checking against what the
6271                  * controller and device can do.
6272                  */
6273                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6274                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6275                 xpt_action((union ccb *)&cpi);
6276                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6277                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6278                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6279                 xpt_action((union ccb *)&cur_cts);
6280                 inq_data = &device->inq_data;
6281
6282                 /* Fill in any gaps in what the user gave us */
6283                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6284                         cts->sync_period = cur_cts.sync_period;
6285                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6286                         cts->sync_offset = cur_cts.sync_offset;
6287                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6288                         cts->bus_width = cur_cts.bus_width;
6289                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6290                         cts->flags &= ~CCB_TRANS_DISC_ENB;
6291                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6292                 }
6293                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6294                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6295                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6296                 }
6297
6298                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6299                   && (inq_data->flags & SID_Sync) == 0)
6300                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6301                  || (cts->sync_offset == 0)
6302                  || (cts->sync_period == 0)) {
6303                         /* Force async */
6304                         cts->sync_period = 0;
6305                         cts->sync_offset = 0;
6306                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
6307
6308                         if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6309                          && cts->sync_period <= 0x9) {
6310                                 /*
6311                                  * Don't allow DT transmission rates if the
6312                                  * device does not support it.
6313                                  */
6314                                 cts->sync_period = 0xa;
6315                         }
6316                         if ((inq_data->spi3data & SID_SPI_IUS) == 0
6317                          && cts->sync_period <= 0x8) {
6318                                 /*
6319                                  * Don't allow PACE transmission rates
6320                                  * if the device does support packetized
6321                                  * transfers.
6322                                  */
6323                                 cts->sync_period = 0x9;
6324                         }
6325                 }
6326
6327                 switch (cts->bus_width) {
6328                 case MSG_EXT_WDTR_BUS_32_BIT:
6329                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6330                           || (inq_data->flags & SID_WBus32) != 0)
6331                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6332                                 break;
6333                         /* Fall Through to 16-bit */
6334                 case MSG_EXT_WDTR_BUS_16_BIT:
6335                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6336                           || (inq_data->flags & SID_WBus16) != 0)
6337                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6338                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6339                                 break;
6340                         }
6341                         /* Fall Through to 8-bit */
6342                 default: /* New bus width?? */
6343                 case MSG_EXT_WDTR_BUS_8_BIT:
6344                         /* All targets can do this */
6345                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6346                         break;
6347                 }
6348
6349                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6350                         /*
6351                          * Can't tag queue without disconnection.
6352                          */
6353                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6354                         cts->valid |= CCB_TRANS_TQ_VALID;
6355                 }
6356
6357                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6358                  || (inq_data->flags & SID_CmdQue) == 0
6359                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6360                  || (device->quirk->mintags == 0)) {
6361                         /*
6362                          * Can't tag on hardware that doesn't support,
6363                          * doesn't have it enabled, or has broken tag support.
6364                          */
6365                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6366                 }
6367         }
6368
6369         qfrozen = FALSE;
6370         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6371                 int device_tagenb;
6372
6373                 /*
6374                  * If we are transitioning from tags to no-tags or
6375                  * vice-versa, we need to carefully freeze and restart
6376                  * the queue so that we don't overlap tagged and non-tagged
6377                  * commands.  We also temporarily stop tags if there is
6378                  * a change in transfer negotiation settings to allow
6379                  * "tag-less" negotiation.
6380                  */
6381                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6382                  || (device->inq_flags & SID_CmdQue) != 0)
6383                         device_tagenb = TRUE;
6384                 else
6385                         device_tagenb = FALSE;
6386
6387                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6388                   && device_tagenb == FALSE)
6389                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6390                   && device_tagenb == TRUE)) {
6391
6392                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6393                                 /*
6394                                  * Delay change to use tags until after a
6395                                  * few commands have gone to this device so
6396                                  * the controller has time to perform transfer
6397                                  * negotiations without tagged messages getting
6398                                  * in the way.
6399                                  */
6400                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6401                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6402                         } else {
6403                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6404                                 qfrozen = TRUE;
6405                                 device->inq_flags &= ~SID_CmdQue;
6406                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6407                                                     sim->max_dev_openings);
6408                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6409                                 device->tag_delay_count = 0;
6410                         }
6411                 }
6412         }
6413
6414         if (async_update == FALSE) {
6415                 /*
6416                  * If we are currently performing tagged transactions to
6417                  * this device and want to change its negotiation parameters,
6418                  * go non-tagged for a bit to give the controller a chance to
6419                  * negotiate unhampered by tag messages.
6420                  */
6421                 if ((device->inq_flags & SID_CmdQue) != 0
6422                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6423                                    CCB_TRANS_SYNC_OFFSET_VALID|
6424                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6425                         xpt_toggle_tags(cts->ccb_h.path);
6426
6427                 (*(sim->sim_action))(sim, (union ccb *)cts);
6428         }
6429
6430         if (qfrozen) {
6431                 struct ccb_relsim crs;
6432
6433                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6434                               /*priority*/1);
6435                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6436                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6437                 crs.openings
6438                     = crs.release_timeout 
6439                     = crs.qfrozen_cnt
6440                     = 0;
6441                 xpt_action((union ccb *)&crs);
6442         }
6443 }
6444
6445
6446 #endif /* CAM_NEW_TRAN_CODE */
6447
6448 static void
6449 xpt_toggle_tags(struct cam_path *path)
6450 {
6451         struct cam_ed *dev;
6452
6453         /*
6454          * Give controllers a chance to renegotiate
6455          * before starting tag operations.  We
6456          * "toggle" tagged queuing off then on
6457          * which causes the tag enable command delay
6458          * counter to come into effect.
6459          */
6460         dev = path->device;
6461         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6462          || ((dev->inq_flags & SID_CmdQue) != 0
6463           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6464                 struct ccb_trans_settings cts;
6465
6466                 xpt_setup_ccb(&cts.ccb_h, path, 1);
6467 #ifdef CAM_NEW_TRAN_CODE
6468                 cts.protocol = PROTO_SCSI;
6469                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6470                 cts.transport = XPORT_UNSPECIFIED;
6471                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6472                 cts.proto_specific.scsi.flags = 0;
6473                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6474 #else /* CAM_NEW_TRAN_CODE */
6475                 cts.flags = 0;
6476                 cts.valid = CCB_TRANS_TQ_VALID;
6477 #endif /* CAM_NEW_TRAN_CODE */
6478                 xpt_set_transfer_settings(&cts, path->device,
6479                                           /*async_update*/TRUE);
6480 #ifdef CAM_NEW_TRAN_CODE
6481                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6482 #else /* CAM_NEW_TRAN_CODE */
6483                 cts.flags = CCB_TRANS_TAG_ENB;
6484 #endif /* CAM_NEW_TRAN_CODE */
6485                 xpt_set_transfer_settings(&cts, path->device,
6486                                           /*async_update*/TRUE);
6487         }
6488 }
6489
6490 static void
6491 xpt_start_tags(struct cam_path *path)
6492 {
6493         struct ccb_relsim crs;
6494         struct cam_ed *device;
6495         struct cam_sim *sim;
6496         int    newopenings;
6497
6498         device = path->device;
6499         sim = path->bus->sim;
6500         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6501         xpt_freeze_devq(path, /*count*/1);
6502         device->inq_flags |= SID_CmdQue;
6503         newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
6504         xpt_dev_ccbq_resize(path, newopenings);
6505         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6506         crs.ccb_h.func_code = XPT_REL_SIMQ;
6507         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6508         crs.openings
6509             = crs.release_timeout 
6510             = crs.qfrozen_cnt
6511             = 0;
6512         xpt_action((union ccb *)&crs);
6513 }
6514
6515 static int busses_to_config;
6516 static int busses_to_reset;
6517
6518 static int
6519 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6520 {
6521         if (bus->path_id != CAM_XPT_PATH_ID) {
6522                 struct cam_path path;
6523                 struct ccb_pathinq cpi;
6524                 int can_negotiate;
6525
6526                 busses_to_config++;
6527                 xpt_compile_path(&path, NULL, bus->path_id,
6528                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6529                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6530                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6531                 xpt_action((union ccb *)&cpi);
6532                 can_negotiate = cpi.hba_inquiry;
6533                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6534                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6535                  && can_negotiate)
6536                         busses_to_reset++;
6537                 xpt_release_path(&path);
6538         }
6539
6540         return(1);
6541 }
6542
6543 static int
6544 xptconfigfunc(struct cam_eb *bus, void *arg)
6545 {
6546         struct  cam_path *path;
6547         union   ccb *work_ccb;
6548
6549         if (bus->path_id != CAM_XPT_PATH_ID) {
6550                 cam_status status;
6551                 int can_negotiate;
6552
6553                 work_ccb = xpt_alloc_ccb();
6554                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6555                                               CAM_TARGET_WILDCARD,
6556                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6557                         kprintf("xptconfigfunc: xpt_create_path failed with "
6558                                "status %#x for bus %d\n", status, bus->path_id);
6559                         kprintf("xptconfigfunc: halting bus configuration\n");
6560                         xpt_free_ccb(work_ccb);
6561                         busses_to_config--;
6562                         xpt_finishconfig(xpt_periph, NULL);
6563                         return(0);
6564                 }
6565                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6566                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6567                 xpt_action(work_ccb);
6568                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6569                         kprintf("xptconfigfunc: CPI failed on bus %d "
6570                                "with status %d\n", bus->path_id,
6571                                work_ccb->ccb_h.status);
6572                         xpt_finishconfig(xpt_periph, work_ccb);
6573                         return(1);
6574                 }
6575
6576                 can_negotiate = work_ccb->cpi.hba_inquiry;
6577                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6578                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6579                  && (can_negotiate != 0)) {
6580                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6581                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6582                         work_ccb->ccb_h.cbfcnp = NULL;
6583                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6584                                   ("Resetting Bus\n"));
6585                         xpt_action(work_ccb);
6586                         xpt_finishconfig(xpt_periph, work_ccb);
6587                 } else {
6588                         /* Act as though we performed a successful BUS RESET */
6589                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6590                         xpt_finishconfig(xpt_periph, work_ccb);
6591                 }
6592         }
6593
6594         return(1);
6595 }
6596
6597 static void
6598 xpt_config(void *arg)
6599 {
6600         /*
6601          * Now that interrupts are enabled, go find our devices
6602          */
6603
6604 #ifdef CAMDEBUG
6605         /* Setup debugging flags and path */
6606 #ifdef CAM_DEBUG_FLAGS
6607         cam_dflags = CAM_DEBUG_FLAGS;
6608 #else /* !CAM_DEBUG_FLAGS */
6609         cam_dflags = CAM_DEBUG_NONE;
6610 #endif /* CAM_DEBUG_FLAGS */
6611 #ifdef CAM_DEBUG_BUS
6612         if (cam_dflags != CAM_DEBUG_NONE) {
6613                 if (xpt_create_path(&cam_dpath, xpt_periph,
6614                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6615                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6616                         kprintf("xpt_config: xpt_create_path() failed for debug"
6617                                " target %d:%d:%d, debugging disabled\n",
6618                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6619                         cam_dflags = CAM_DEBUG_NONE;
6620                 }
6621         } else
6622                 cam_dpath = NULL;
6623 #else /* !CAM_DEBUG_BUS */
6624         cam_dpath = NULL;
6625 #endif /* CAM_DEBUG_BUS */
6626 #endif /* CAMDEBUG */
6627
6628         /*
6629          * Scan all installed busses.
6630          */
6631         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6632
6633         if (busses_to_config == 0) {
6634                 /* Call manually because we don't have any busses */
6635                 xpt_finishconfig(xpt_periph, NULL);
6636         } else  {
6637                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6638                         kprintf("Waiting %d seconds for SCSI "
6639                                "devices to settle\n", scsi_delay/1000);
6640                 }
6641                 xpt_for_all_busses(xptconfigfunc, NULL);
6642         }
6643 }
6644
6645 /*
6646  * If the given device only has one peripheral attached to it, and if that
6647  * peripheral is the passthrough driver, announce it.  This insures that the
6648  * user sees some sort of announcement for every peripheral in their system.
6649  */
6650 static int
6651 xptpassannouncefunc(struct cam_ed *device, void *arg)
6652 {
6653         struct cam_periph *periph;
6654         int i;
6655
6656         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6657              periph = SLIST_NEXT(periph, periph_links), i++);
6658
6659         periph = SLIST_FIRST(&device->periphs);
6660         if ((i == 1)
6661          && (strncmp(periph->periph_name, "pass", 4) == 0))
6662                 xpt_announce_periph(periph, NULL);
6663
6664         return(1);
6665 }
6666
6667 static void
6668 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6669 {
6670         struct  periph_driver **p_drv;
6671         int     i;
6672
6673         if (done_ccb != NULL) {
6674                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6675                           ("xpt_finishconfig\n"));
6676                 switch(done_ccb->ccb_h.func_code) {
6677                 case XPT_RESET_BUS:
6678                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6679                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6680                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6681                                 xpt_action(done_ccb);
6682                                 return;
6683                         }
6684                         /* FALLTHROUGH */
6685                 case XPT_SCAN_BUS:
6686                 default:
6687                         xpt_free_path(done_ccb->ccb_h.path);
6688                         busses_to_config--;
6689                         break;
6690                 }
6691         }
6692
6693         if (busses_to_config == 0) {
6694                 /* Register all the peripheral drivers */
6695                 /* XXX This will have to change when we have loadable modules */
6696                 p_drv = periph_drivers;
6697                 for (i = 0; p_drv[i] != NULL; i++) {
6698                         (*p_drv[i]->init)();
6699                 }
6700
6701                 /*
6702                  * Check for devices with no "standard" peripheral driver
6703                  * attached.  For any devices like that, announce the
6704                  * passthrough driver so the user will see something.
6705                  */
6706                 xpt_for_all_devices(xptpassannouncefunc, NULL);
6707
6708                 /* Release our hook so that the boot can continue. */
6709                 config_intrhook_disestablish(xpt_config_hook);
6710                 kfree(xpt_config_hook, M_TEMP);
6711                 xpt_config_hook = NULL;
6712         }
6713         if (done_ccb != NULL)
6714                 xpt_free_ccb(done_ccb);
6715 }
6716
6717 static void
6718 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6719 {
6720         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6721
6722         switch (work_ccb->ccb_h.func_code) {
6723         /* Common cases first */
6724         case XPT_PATH_INQ:              /* Path routing inquiry */
6725         {
6726                 struct ccb_pathinq *cpi;
6727
6728                 cpi = &work_ccb->cpi;
6729                 cpi->version_num = 1; /* XXX??? */
6730                 cpi->hba_inquiry = 0;
6731                 cpi->target_sprt = 0;
6732                 cpi->hba_misc = 0;
6733                 cpi->hba_eng_cnt = 0;
6734                 cpi->max_target = 0;
6735                 cpi->max_lun = 0;
6736                 cpi->initiator_id = 0;
6737                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6738                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6739                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6740                 cpi->unit_number = sim->unit_number;
6741                 cpi->bus_id = sim->bus_id;
6742                 cpi->base_transfer_speed = 0;
6743 #ifdef CAM_NEW_TRAN_CODE
6744                 cpi->protocol = PROTO_UNSPECIFIED;
6745                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
6746                 cpi->transport = XPORT_UNSPECIFIED;
6747                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
6748 #endif /* CAM_NEW_TRAN_CODE */
6749                 cpi->ccb_h.status = CAM_REQ_CMP;
6750                 xpt_done(work_ccb);
6751                 break;
6752         }
6753         default:
6754                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6755                 xpt_done(work_ccb);
6756                 break;
6757         }
6758 }
6759
6760 /*
6761  * The xpt as a "controller" has no interrupt sources, so polling
6762  * is a no-op.
6763  */
6764 static void
6765 xptpoll(struct cam_sim *sim)
6766 {
6767 }
6768
6769 /*
6770  * Should only be called by the machine interrupt dispatch routines,
6771  * so put these prototypes here instead of in the header.
6772  */
6773
6774 static void
6775 swi_camnet(void *arg, void *frame)
6776 {
6777         camisr(&cam_netq);
6778 }
6779
6780 static void
6781 swi_cambio(void *arg, void *frame)
6782 {
6783         camisr(&cam_bioq);
6784 }
6785
6786 static void
6787 camisr(cam_isrq_t *queue)
6788 {
6789         struct  ccb_hdr *ccb_h;
6790
6791         crit_enter();
6792         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6793                 int     runq;
6794
6795                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6796                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6797                 splz();
6798
6799                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6800                           ("camisr\n"));
6801
6802                 runq = FALSE;
6803
6804                 if (ccb_h->flags & CAM_HIGH_POWER) {
6805                         struct highpowerlist    *hphead;
6806                         struct cam_ed           *device;
6807                         union ccb               *send_ccb;
6808
6809                         hphead = &highpowerq;
6810
6811                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6812
6813                         /*
6814                          * Increment the count since this command is done.
6815                          */
6816                         num_highpower++;
6817
6818                         /* 
6819                          * Any high powered commands queued up?
6820                          */
6821                         if (send_ccb != NULL) {
6822                                 device = send_ccb->ccb_h.path->device;
6823
6824                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6825
6826                                 xpt_release_devq(send_ccb->ccb_h.path,
6827                                                  /*count*/1, /*runqueue*/TRUE);
6828                         }
6829                 }
6830                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6831                         struct cam_ed *dev;
6832
6833                         dev = ccb_h->path->device;
6834
6835                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6836
6837                         if (ccb_h->path->bus->sim->devq) {
6838                                 ccb_h->path->bus->sim->devq->send_active--;
6839                                 ccb_h->path->bus->sim->devq->send_openings++;
6840                         }
6841                         
6842                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6843                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
6844                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6845                           && (dev->ccbq.dev_active == 0))) {
6846                                 
6847                                 xpt_release_devq(ccb_h->path, /*count*/1,
6848                                                  /*run_queue*/TRUE);
6849                         }
6850
6851                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6852                          && (--dev->tag_delay_count == 0))
6853                                 xpt_start_tags(ccb_h->path);
6854
6855                         if ((dev->ccbq.queue.entries > 0)
6856                          && (dev->qfrozen_cnt == 0)
6857                          && (device_is_send_queued(dev) == 0)) {
6858                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6859                                                               dev);
6860                         }
6861                 }
6862
6863                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
6864                         xpt_release_simq(ccb_h->path->bus->sim,
6865                                          /*run_queue*/TRUE);
6866                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
6867                         runq = FALSE;
6868                 } 
6869
6870                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6871                  && (ccb_h->status & CAM_DEV_QFRZN)) {
6872                         xpt_release_devq(ccb_h->path, /*count*/1,
6873                                          /*run_queue*/TRUE);
6874                         ccb_h->status &= ~CAM_DEV_QFRZN;
6875                 } else if (runq) {
6876                         xpt_run_dev_sendq(ccb_h->path->bus);
6877                 }
6878
6879                 /* Call the peripheral driver's callback */
6880                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6881         }
6882         crit_exit();
6883 }