CAM_NEW_TRAN_CODE placeholders for SAS support.
[dragonfly.git] / sys / bus / cam / cam_xpt.c
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.57 2007/12/02 03:01:55 pavalos Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
48
49 #include <machine/clock.h>
50
51 #include "cam.h"
52 #include "cam_ccb.h"
53 #include "cam_periph.h"
54 #include "cam_sim.h"
55 #include "cam_xpt.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
59
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
63 #include "opt_cam.h"
64
65 /* Datastructures internal to the xpt layer */
66 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
67
68 /*
69  * Definition of an async handler callback block.  These are used to add
70  * SIMs and peripherals to the async callback lists.
71  */
72 struct async_node {
73         SLIST_ENTRY(async_node) links;
74         u_int32_t       event_enable;   /* Async Event enables */
75         void            (*callback)(void *arg, u_int32_t code,
76                                     struct cam_path *path, void *args);
77         void            *callback_arg;
78 };
79
80 SLIST_HEAD(async_list, async_node);
81 SLIST_HEAD(periph_list, cam_periph);
82 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
83
84 /*
85  * This is the maximum number of high powered commands (e.g. start unit)
86  * that can be outstanding at a particular time.
87  */
88 #ifndef CAM_MAX_HIGHPOWER
89 #define CAM_MAX_HIGHPOWER  4
90 #endif
91
92 /* number of high powered commands that can go through right now */
93 static int num_highpower = CAM_MAX_HIGHPOWER;
94
95 /*
96  * Structure for queueing a device in a run queue.
97  * There is one run queue for allocating new ccbs,
98  * and another for sending ccbs to the controller.
99  */
100 struct cam_ed_qinfo {
101         cam_pinfo pinfo;
102         struct    cam_ed *device;
103 };
104
105 /*
106  * The CAM EDT (Existing Device Table) contains the device information for
107  * all devices for all busses in the system.  The table contains a
108  * cam_ed structure for each device on the bus.
109  */
110 struct cam_ed {
111         TAILQ_ENTRY(cam_ed) links;
112         struct  cam_ed_qinfo alloc_ccb_entry;
113         struct  cam_ed_qinfo send_ccb_entry;
114         struct  cam_et   *target;
115         lun_id_t         lun_id;
116         struct  camq drvq;              /*
117                                          * Queue of type drivers wanting to do
118                                          * work on this device.
119                                          */
120         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
121         struct  async_list asyncs;      /* Async callback info for this B/T/L */
122         struct  periph_list periphs;    /* All attached devices */
123         u_int   generation;             /* Generation number */
124         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
125         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
126                                         /* Storage for the inquiry data */
127 #ifdef CAM_NEW_TRAN_CODE
128         cam_proto        protocol;
129         u_int            protocol_version;
130         cam_xport        transport;
131         u_int            transport_version;
132 #endif /* CAM_NEW_TRAN_CODE */
133         struct           scsi_inquiry_data inq_data;
134         u_int8_t         inq_flags;     /*
135                                          * Current settings for inquiry flags.
136                                          * This allows us to override settings
137                                          * like disconnection and tagged
138                                          * queuing for a device.
139                                          */
140         u_int8_t         queue_flags;   /* Queue flags from the control page */
141         u_int8_t         serial_num_len;
142         u_int8_t        *serial_num;
143         u_int32_t        qfrozen_cnt;
144         u_int32_t        flags;
145 #define CAM_DEV_UNCONFIGURED            0x01
146 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
147 #define CAM_DEV_REL_ON_COMPLETE         0x04
148 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
149 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
150 #define CAM_DEV_TAG_AFTER_COUNT         0x20
151 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
152         u_int32_t        tag_delay_count;
153 #define CAM_TAG_DELAY_COUNT             5
154         u_int32_t        tag_saved_openings;
155         u_int32_t        refcount;
156         struct           callout c_handle;
157 };
158
159 /*
160  * Each target is represented by an ET (Existing Target).  These
161  * entries are created when a target is successfully probed with an
162  * identify, and removed when a device fails to respond after a number
163  * of retries, or a bus rescan finds the device missing.
164  */
165 struct cam_et { 
166         TAILQ_HEAD(, cam_ed) ed_entries;
167         TAILQ_ENTRY(cam_et) links;
168         struct  cam_eb  *bus;   
169         target_id_t     target_id;
170         u_int32_t       refcount;       
171         u_int           generation;
172         struct          timeval last_reset;     /* uptime of last reset */
173 };
174
175 /*
176  * Each bus is represented by an EB (Existing Bus).  These entries
177  * are created by calls to xpt_bus_register and deleted by calls to
178  * xpt_bus_deregister.
179  */
180 struct cam_eb { 
181         TAILQ_HEAD(, cam_et) et_entries;
182         TAILQ_ENTRY(cam_eb)  links;
183         path_id_t            path_id;
184         struct cam_sim       *sim;
185         struct timeval       last_reset;        /* uptime of last reset */
186         u_int32_t            flags;
187 #define CAM_EB_RUNQ_SCHEDULED   0x01
188         u_int32_t            refcount;
189         u_int                generation;
190 };
191
192 struct cam_path {
193         struct cam_periph *periph;
194         struct cam_eb     *bus;
195         struct cam_et     *target;
196         struct cam_ed     *device;
197 };
198
199 struct xpt_quirk_entry {
200         struct scsi_inquiry_pattern inq_pat;
201         u_int8_t quirks;
202 #define CAM_QUIRK_NOLUNS        0x01
203 #define CAM_QUIRK_NOSERIAL      0x02
204 #define CAM_QUIRK_HILUNS        0x04
205 #define CAM_QUIRK_NOHILUNS      0x08
206         u_int mintags;
207         u_int maxtags;
208 };
209
210 static int cam_srch_hi = 0;
211 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
212 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
213 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
214     sysctl_cam_search_luns, "I",
215     "allow search above LUN 7 for SCSI3 and greater devices");
216
217 #define CAM_SCSI2_MAXLUN        8
218 /*
219  * If we're not quirked to search <= the first 8 luns
220  * and we are either quirked to search above lun 8,
221  * or we're > SCSI-2 and we've enabled hilun searching,
222  * or we're > SCSI-2 and the last lun was a success,
223  * we can look for luns above lun 8.
224  */
225 #define CAN_SRCH_HI_SPARSE(dv)                          \
226   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
227   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
228   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
229
230 #define CAN_SRCH_HI_DENSE(dv)                           \
231   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
232   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
233   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
234
235 typedef enum {
236         XPT_FLAG_OPEN           = 0x01
237 } xpt_flags;
238
239 struct xpt_softc {
240         xpt_flags       flags;
241         u_int32_t       generation;
242 };
243
244 static const char quantum[] = "QUANTUM";
245 static const char sony[] = "SONY";
246 static const char west_digital[] = "WDIGTL";
247 static const char samsung[] = "SAMSUNG";
248 static const char seagate[] = "SEAGATE";
249 static const char microp[] = "MICROP";
250
251 static struct xpt_quirk_entry xpt_quirk_table[] = 
252 {
253         {
254                 /* Reports QUEUE FULL for temporary resource shortages */
255                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
256                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
257         },
258         {
259                 /* Reports QUEUE FULL for temporary resource shortages */
260                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
261                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
262         },
263         {
264                 /* Reports QUEUE FULL for temporary resource shortages */
265                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
266                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
267         },
268         {
269                 /* Broken tagged queuing drive */
270                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
271                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
272         },
273         {
274                 /* Broken tagged queuing drive */
275                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
276                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
277         },
278         {
279                 /* Broken tagged queuing drive */
280                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
281                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
282         },
283         {
284                 /*
285                  * Unfortunately, the Quantum Atlas III has the same
286                  * problem as the Atlas II drives above.
287                  * Reported by: "Johan Granlund" <johan@granlund.nu>
288                  *
289                  * For future reference, the drive with the problem was:
290                  * QUANTUM QM39100TD-SW N1B0
291                  * 
292                  * It's possible that Quantum will fix the problem in later
293                  * firmware revisions.  If that happens, the quirk entry
294                  * will need to be made specific to the firmware revisions
295                  * with the problem.
296                  * 
297                  */
298                 /* Reports QUEUE FULL for temporary resource shortages */
299                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
300                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
301         },
302         {
303                 /*
304                  * 18 Gig Atlas III, same problem as the 9G version.
305                  * Reported by: Andre Albsmeier
306                  *              <andre.albsmeier@mchp.siemens.de>
307                  *
308                  * For future reference, the drive with the problem was:
309                  * QUANTUM QM318000TD-S N491
310                  */
311                 /* Reports QUEUE FULL for temporary resource shortages */
312                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
313                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
314         },
315         {
316                 /*
317                  * Broken tagged queuing drive
318                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
319                  *         and: Martin Renters <martin@tdc.on.ca>
320                  */
321                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
322                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
323         },
324                 /*
325                  * The Seagate Medalist Pro drives have very poor write
326                  * performance with anything more than 2 tags.
327                  * 
328                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
329                  * Drive:  <SEAGATE ST36530N 1444>
330                  *
331                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
332                  * Drive:  <SEAGATE ST34520W 1281>
333                  *
334                  * No one has actually reported that the 9G version
335                  * (ST39140*) of the Medalist Pro has the same problem, but
336                  * we're assuming that it does because the 4G and 6.5G
337                  * versions of the drive are broken.
338                  */
339         {
340                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
341                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
342         },
343         {
344                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
345                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
346         },
347         {
348                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
349                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
350         },
351         {
352                 /*
353                  * Slow when tagged queueing is enabled.  Write performance
354                  * steadily drops off with more and more concurrent
355                  * transactions.  Best sequential write performance with
356                  * tagged queueing turned off and write caching turned on.
357                  *
358                  * PR:  kern/10398
359                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
360                  * Drive:  DCAS-34330 w/ "S65A" firmware.
361                  *
362                  * The drive with the problem had the "S65A" firmware
363                  * revision, and has also been reported (by Stephen J.
364                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
365                  * firmware revision.
366                  *
367                  * Although no one has reported problems with the 2 gig
368                  * version of the DCAS drive, the assumption is that it
369                  * has the same problems as the 4 gig version.  Therefore
370                  * this quirk entries disables tagged queueing for all
371                  * DCAS drives.
372                  */
373                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
374                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
375         },
376         {
377                 /* Broken tagged queuing drive */
378                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
379                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
380         },
381         {
382                 /* Broken tagged queuing drive */ 
383                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
384                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
385         },
386         {
387                 /*
388                  * Broken tagged queuing drive.
389                  * Submitted by:
390                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
391                  * in PR kern/9535
392                  */
393                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
394                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
395         },
396         {
397                 /*
398                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
399                  * 8MB/sec.)
400                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
401                  * Best performance with these drives is achieved with
402                  * tagged queueing turned off, and write caching turned on.
403                  */
404                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
405                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
406         },
407         {
408                 /*
409                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
410                  * 8MB/sec.)
411                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
412                  * Best performance with these drives is achieved with
413                  * tagged queueing turned off, and write caching turned on.
414                  */
415                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
416                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
417         },
418         {
419                 /*
420                  * Doesn't handle queue full condition correctly,
421                  * so we need to limit maxtags to what the device
422                  * can handle instead of determining this automatically.
423                  */
424                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
425                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
426         },
427         {
428                 /* Really only one LUN */
429                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
430                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
431         },
432         {
433                 /* I can't believe we need a quirk for DPT volumes. */
434                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
435                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
436                 /*mintags*/0, /*maxtags*/255
437         },
438         {
439                 /*
440                  * Many Sony CDROM drives don't like multi-LUN probing.
441                  */
442                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
443                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
444         },
445         {
446                 /*
447                  * This drive doesn't like multiple LUN probing.
448                  * Submitted by:  Parag Patel <parag@cgt.com>
449                  */
450                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
451                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
452         },
453         {
454                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
455                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
456         },
457         {
458                 /*
459                  * The 8200 doesn't like multi-lun probing, and probably
460                  * don't like serial number requests either.
461                  */
462                 {
463                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
464                         "EXB-8200*", "*"
465                 },
466                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
467         },
468         {
469                 /*
470                  * Let's try the same as above, but for a drive that says
471                  * it's an IPL-6860 but is actually an EXB 8200.
472                  */
473                 {
474                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
475                         "IPL-6860*", "*"
476                 },
477                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
478         },
479         {
480                 /*
481                  * These Hitachi drives don't like multi-lun probing.
482                  * The PR submitter has a DK319H, but says that the Linux
483                  * kernel has a similar work-around for the DK312 and DK314,
484                  * so all DK31* drives are quirked here.
485                  * PR:            misc/18793
486                  * Submitted by:  Paul Haddad <paul@pth.com>
487                  */
488                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
489                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
490         },
491         {
492                 /*
493                  * This old revision of the TDC3600 is also SCSI-1, and
494                  * hangs upon serial number probing.
495                  */
496                 {
497                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
498                         " TDC 3600", "U07:"
499                 },
500                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
501         },
502         {
503                 /*
504                  * Would repond to all LUNs if asked for.
505                  */
506                 {
507                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
508                         "CP150", "*"
509                 },
510                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
511         },
512         {
513                 /*
514                  * Would repond to all LUNs if asked for.
515                  */
516                 {
517                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
518                         "96X2*", "*"
519                 },
520                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
521         },
522         {
523                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
524                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
525                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
526         },
527         {
528                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
529                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
530                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
531         },
532         {
533                 /* TeraSolutions special settings for TRC-22 RAID */
534                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
535                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
536         },
537         {
538                 /* Veritas Storage Appliance */
539                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
540                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
541         },
542         {
543                 /*
544                  * Would respond to all LUNs.  Device type and removable
545                  * flag are jumper-selectable.
546                  */
547                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
548                   "Tahiti 1", "*"
549                 },
550                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
551         },
552         {
553                 /* Default tagged queuing parameters for all devices */
554                 {
555                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
556                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
557                 },
558                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
559         },
560 };
561
562 static const int xpt_quirk_table_size =
563         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
564
565 typedef enum {
566         DM_RET_COPY             = 0x01,
567         DM_RET_FLAG_MASK        = 0x0f,
568         DM_RET_NONE             = 0x00,
569         DM_RET_STOP             = 0x10,
570         DM_RET_DESCEND          = 0x20,
571         DM_RET_ERROR            = 0x30,
572         DM_RET_ACTION_MASK      = 0xf0
573 } dev_match_ret;
574
575 typedef enum {
576         XPT_DEPTH_BUS,
577         XPT_DEPTH_TARGET,
578         XPT_DEPTH_DEVICE,
579         XPT_DEPTH_PERIPH
580 } xpt_traverse_depth;
581
582 struct xpt_traverse_config {
583         xpt_traverse_depth      depth;
584         void                    *tr_func;
585         void                    *tr_arg;
586 };
587
588 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
589 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
590 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
591 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
592 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
593
594 /* Transport layer configuration information */
595 static struct xpt_softc xsoftc;
596
597 /* Queues for our software interrupt handler */
598 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
599 static cam_isrq_t cam_bioq;
600
601 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
602 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
603 static u_int xpt_max_ccbs;      /*
604                                  * Maximum size of ccb pool.  Modified as
605                                  * devices are added/removed or have their
606                                  * opening counts changed.
607                                  */
608 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
609
610 struct cam_periph *xpt_periph;
611
612 static periph_init_t xpt_periph_init;
613
614 static periph_init_t probe_periph_init;
615
616 static struct periph_driver xpt_driver =
617 {
618         xpt_periph_init, "xpt",
619         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
620 };
621
622 static struct periph_driver probe_driver =
623 {
624         probe_periph_init, "probe",
625         TAILQ_HEAD_INITIALIZER(probe_driver.units)
626 };
627
628 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
629 PERIPHDRIVER_DECLARE(probe, probe_driver);
630
631 #define XPT_CDEV_MAJOR 104
632
633 static d_open_t xptopen;
634 static d_close_t xptclose;
635 static d_ioctl_t xptioctl;
636
637 static struct dev_ops xpt_ops = {
638         { "xpt", XPT_CDEV_MAJOR, 0 },
639         .d_open = xptopen,
640         .d_close = xptclose,
641         .d_ioctl = xptioctl
642 };
643
644 static struct intr_config_hook *xpt_config_hook;
645
646 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
647 static void dead_sim_poll(struct cam_sim *sim);
648
649 /* Dummy SIM that is used when the real one has gone. */
650 static struct cam_sim cam_dead_sim = {
651         .sim_action =   dead_sim_action,
652         .sim_poll =     dead_sim_poll,
653         .sim_name =     "dead_sim",
654 };
655
656 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
657
658 /* Registered busses */
659 static TAILQ_HEAD(,cam_eb) xpt_busses;
660 static u_int bus_generation;
661
662 /* Storage for debugging datastructures */
663 #ifdef  CAMDEBUG
664 struct cam_path *cam_dpath;
665 u_int32_t cam_dflags;
666 u_int32_t cam_debug_delay;
667 #endif
668
669 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
670 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
671 #endif
672
673 /*
674  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
675  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
676  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
677  */
678 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
679     || defined(CAM_DEBUG_LUN)
680 #ifdef CAMDEBUG
681 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
682     || !defined(CAM_DEBUG_LUN)
683 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
684         and CAM_DEBUG_LUN"
685 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
686 #else /* !CAMDEBUG */
687 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
688 #endif /* CAMDEBUG */
689 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
690
691 /* Our boot-time initialization hook */
692 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
693
694 static moduledata_t cam_moduledata = {
695         "cam",
696         cam_module_event_handler,
697         NULL
698 };
699
700 static void     xpt_init(void *);
701
702 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
703 MODULE_VERSION(cam, 1);
704
705
706 static cam_status       xpt_compile_path(struct cam_path *new_path,
707                                          struct cam_periph *perph,
708                                          path_id_t path_id,
709                                          target_id_t target_id,
710                                          lun_id_t lun_id);
711
712 static void             xpt_release_path(struct cam_path *path);
713
714 static void             xpt_async_bcast(struct async_list *async_head,
715                                         u_int32_t async_code,
716                                         struct cam_path *path,
717                                         void *async_arg);
718 static void             xpt_dev_async(u_int32_t async_code,
719                                       struct cam_eb *bus,
720                                       struct cam_et *target,
721                                       struct cam_ed *device,
722                                       void *async_arg);
723 static path_id_t xptnextfreepathid(void);
724 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
725 static union ccb *xpt_get_ccb(struct cam_ed *device);
726 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
727                                   u_int32_t new_priority);
728 static void      xpt_run_dev_allocq(struct cam_eb *bus);
729 static void      xpt_run_dev_sendq(struct cam_eb *bus);
730 static timeout_t xpt_release_devq_timeout;
731 static void      xpt_release_bus(struct cam_eb *bus);
732 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
733                                          int run_queue);
734 static struct cam_et*
735                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
736 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
737 static struct cam_ed*
738                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
739                                   lun_id_t lun_id);
740 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
741                                     struct cam_ed *device);
742 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
743 static struct cam_eb*
744                  xpt_find_bus(path_id_t path_id);
745 static struct cam_et*
746                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
747 static struct cam_ed*
748                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
749 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
750 static void      xpt_scan_lun(struct cam_periph *periph,
751                               struct cam_path *path, cam_flags flags,
752                               union ccb *ccb);
753 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
754 static xpt_busfunc_t    xptconfigbuscountfunc;
755 static xpt_busfunc_t    xptconfigfunc;
756 static void      xpt_config(void *arg);
757 static xpt_devicefunc_t xptpassannouncefunc;
758 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
759 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
760 static void      xptpoll(struct cam_sim *sim);
761 static inthand2_t swi_cambio;
762 static void      camisr(cam_isrq_t *queue);
763 #if 0
764 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
765 static void      xptasync(struct cam_periph *periph,
766                           u_int32_t code, cam_path *path);
767 #endif
768 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
769                                     u_int num_patterns, struct cam_eb *bus);
770 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
771                                        u_int num_patterns,
772                                        struct cam_ed *device);
773 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
774                                        u_int num_patterns,
775                                        struct cam_periph *periph);
776 static xpt_busfunc_t    xptedtbusfunc;
777 static xpt_targetfunc_t xptedttargetfunc;
778 static xpt_devicefunc_t xptedtdevicefunc;
779 static xpt_periphfunc_t xptedtperiphfunc;
780 static xpt_pdrvfunc_t   xptplistpdrvfunc;
781 static xpt_periphfunc_t xptplistperiphfunc;
782 static int              xptedtmatch(struct ccb_dev_match *cdm);
783 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
784 static int              xptbustraverse(struct cam_eb *start_bus,
785                                        xpt_busfunc_t *tr_func, void *arg);
786 static int              xpttargettraverse(struct cam_eb *bus,
787                                           struct cam_et *start_target,
788                                           xpt_targetfunc_t *tr_func, void *arg);
789 static int              xptdevicetraverse(struct cam_et *target,
790                                           struct cam_ed *start_device,
791                                           xpt_devicefunc_t *tr_func, void *arg);
792 static int              xptperiphtraverse(struct cam_ed *device,
793                                           struct cam_periph *start_periph,
794                                           xpt_periphfunc_t *tr_func, void *arg);
795 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
796                                         xpt_pdrvfunc_t *tr_func, void *arg);
797 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
798                                             struct cam_periph *start_periph,
799                                             xpt_periphfunc_t *tr_func,
800                                             void *arg);
801 static xpt_busfunc_t    xptdefbusfunc;
802 static xpt_targetfunc_t xptdeftargetfunc;
803 static xpt_devicefunc_t xptdefdevicefunc;
804 static xpt_periphfunc_t xptdefperiphfunc;
805 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
806 #ifdef notusedyet
807 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
808                                             void *arg);
809 #endif
810 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
811                                             void *arg);
812 #ifdef notusedyet
813 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
814                                             void *arg);
815 #endif
816 static xpt_devicefunc_t xptsetasyncfunc;
817 static xpt_busfunc_t    xptsetasyncbusfunc;
818 static cam_status       xptregister(struct cam_periph *periph,
819                                     void *arg);
820 static cam_status       proberegister(struct cam_periph *periph,
821                                       void *arg);
822 static void      probeschedule(struct cam_periph *probe_periph);
823 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
824 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
825 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
826 static void      probecleanup(struct cam_periph *periph);
827 static void      xpt_find_quirk(struct cam_ed *device);
828 #ifdef CAM_NEW_TRAN_CODE
829 static void      xpt_devise_transport(struct cam_path *path);
830 #endif /* CAM_NEW_TRAN_CODE */
831 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
832                                            struct cam_ed *device,
833                                            int async_update);
834 static void      xpt_toggle_tags(struct cam_path *path);
835 static void      xpt_start_tags(struct cam_path *path);
836 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
837                                             struct cam_ed *dev);
838 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
839                                            struct cam_ed *dev);
840 static __inline int periph_is_queued(struct cam_periph *periph);
841 static __inline int device_is_alloc_queued(struct cam_ed *device);
842 static __inline int device_is_send_queued(struct cam_ed *device);
843 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
844
845 static __inline int
846 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
847 {
848         int retval;
849
850         if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
851                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
852                         cam_ccbq_resize(&dev->ccbq,
853                                         dev->ccbq.dev_openings
854                                         + dev->ccbq.dev_active);
855                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
856                 }
857                 /*
858                  * The priority of a device waiting for CCB resources
859                  * is that of the the highest priority peripheral driver
860                  * enqueued.
861                  */
862                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
863                                           &dev->alloc_ccb_entry.pinfo,
864                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
865         } else {
866                 retval = 0;
867         }
868
869         return (retval);
870 }
871
872 static __inline int
873 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
874 {
875         int     retval;
876
877         if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
878                 /*
879                  * The priority of a device waiting for controller
880                  * resources is that of the the highest priority CCB
881                  * enqueued.
882                  */
883                 retval =
884                     xpt_schedule_dev(&bus->sim->devq->send_queue,
885                                      &dev->send_ccb_entry.pinfo,
886                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
887         } else {
888                 retval = 0;
889         }
890         return (retval);
891 }
892
893 static __inline int
894 periph_is_queued(struct cam_periph *periph)
895 {
896         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
897 }
898
899 static __inline int
900 device_is_alloc_queued(struct cam_ed *device)
901 {
902         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
903 }
904
905 static __inline int
906 device_is_send_queued(struct cam_ed *device)
907 {
908         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
909 }
910
911 static __inline int
912 dev_allocq_is_runnable(struct cam_devq *devq)
913 {
914         /*
915          * Have work to do.
916          * Have space to do more work.
917          * Allowed to do work.
918          */
919         return ((devq->alloc_queue.qfrozen_cnt == 0)
920              && (devq->alloc_queue.entries > 0)
921              && (devq->alloc_openings > 0));
922 }
923
924 static void
925 xpt_periph_init(void)
926 {
927         dev_ops_add(&xpt_ops, 0, 0);
928         make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
929 }
930
931 static void
932 probe_periph_init(void)
933 {
934 }
935
936
937 static void
938 xptdone(struct cam_periph *periph, union ccb *done_ccb)
939 {
940         /* Caller will release the CCB */
941         wakeup(&done_ccb->ccb_h.cbfcnp);
942 }
943
944 static int
945 xptopen(struct dev_open_args *ap)
946 {
947         cdev_t dev = ap->a_head.a_dev;
948         int unit;
949
950         unit = minor(dev) & 0xff;
951
952         /*
953          * Only allow read-write access.
954          */
955         if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
956                 return(EPERM);
957
958         /*
959          * We don't allow nonblocking access.
960          */
961         if ((ap->a_oflags & O_NONBLOCK) != 0) {
962                 kprintf("xpt%d: can't do nonblocking access\n", unit);
963                 return(ENODEV);
964         }
965
966         /*
967          * We only have one transport layer right now.  If someone accesses
968          * us via something other than minor number 1, point out their
969          * mistake.
970          */
971         if (unit != 0) {
972                 kprintf("xptopen: got invalid xpt unit %d\n", unit);
973                 return(ENXIO);
974         }
975
976         /* Mark ourselves open */
977         xsoftc.flags |= XPT_FLAG_OPEN;
978         
979         return(0);
980 }
981
982 static int
983 xptclose(struct dev_close_args *ap)
984 {
985         cdev_t dev = ap->a_head.a_dev;
986         int unit;
987
988         unit = minor(dev) & 0xff;
989
990         /*
991          * We only have one transport layer right now.  If someone accesses
992          * us via something other than minor number 1, point out their
993          * mistake.
994          */
995         if (unit != 0) {
996                 kprintf("xptclose: got invalid xpt unit %d\n", unit);
997                 return(ENXIO);
998         }
999
1000         /* Mark ourselves closed */
1001         xsoftc.flags &= ~XPT_FLAG_OPEN;
1002
1003         return(0);
1004 }
1005
1006 static int
1007 xptioctl(struct dev_ioctl_args *ap)
1008 {
1009         cdev_t dev = ap->a_head.a_dev;
1010         int unit, error;
1011
1012         error = 0;
1013         unit = minor(dev) & 0xff;
1014
1015         /*
1016          * We only have one transport layer right now.  If someone accesses
1017          * us via something other than minor number 1, point out their
1018          * mistake.
1019          */
1020         if (unit != 0) {
1021                 kprintf("xptioctl: got invalid xpt unit %d\n", unit);
1022                 return(ENXIO);
1023         }
1024
1025         switch(ap->a_cmd) {
1026         /*
1027          * For the transport layer CAMIOCOMMAND ioctl, we really only want
1028          * to accept CCB types that don't quite make sense to send through a
1029          * passthrough driver.
1030          */
1031         case CAMIOCOMMAND: {
1032                 union ccb *ccb;
1033                 union ccb *inccb;
1034
1035                 inccb = (union ccb *)ap->a_data;
1036
1037                 switch(inccb->ccb_h.func_code) {
1038                 case XPT_SCAN_BUS:
1039                 case XPT_RESET_BUS:
1040                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1041                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1042                                 error = EINVAL;
1043                                 break;
1044                         }
1045                         /* FALLTHROUGH */
1046                 case XPT_PATH_INQ:
1047                 case XPT_ENG_INQ:
1048                 case XPT_SCAN_LUN:
1049
1050                         ccb = xpt_alloc_ccb();
1051
1052                         /*
1053                          * Create a path using the bus, target, and lun the
1054                          * user passed in.
1055                          */
1056                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1057                                             inccb->ccb_h.path_id,
1058                                             inccb->ccb_h.target_id,
1059                                             inccb->ccb_h.target_lun) !=
1060                                             CAM_REQ_CMP){
1061                                 error = EINVAL;
1062                                 xpt_free_ccb(ccb);
1063                                 break;
1064                         }
1065                         /* Ensure all of our fields are correct */
1066                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1067                                       inccb->ccb_h.pinfo.priority);
1068                         xpt_merge_ccb(ccb, inccb);
1069                         ccb->ccb_h.cbfcnp = xptdone;
1070                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1071                         bcopy(ccb, inccb, sizeof(union ccb));
1072                         xpt_free_path(ccb->ccb_h.path);
1073                         xpt_free_ccb(ccb);
1074                         break;
1075
1076                 case XPT_DEBUG: {
1077                         union ccb ccb;
1078
1079                         /*
1080                          * This is an immediate CCB, so it's okay to
1081                          * allocate it on the stack.
1082                          */
1083
1084                         /*
1085                          * Create a path using the bus, target, and lun the
1086                          * user passed in.
1087                          */
1088                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1089                                             inccb->ccb_h.path_id,
1090                                             inccb->ccb_h.target_id,
1091                                             inccb->ccb_h.target_lun) !=
1092                                             CAM_REQ_CMP){
1093                                 error = EINVAL;
1094                                 break;
1095                         }
1096                         /* Ensure all of our fields are correct */
1097                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1098                                       inccb->ccb_h.pinfo.priority);
1099                         xpt_merge_ccb(&ccb, inccb);
1100                         ccb.ccb_h.cbfcnp = xptdone;
1101                         xpt_action(&ccb);
1102                         bcopy(&ccb, inccb, sizeof(union ccb));
1103                         xpt_free_path(ccb.ccb_h.path);
1104                         break;
1105
1106                 }
1107                 case XPT_DEV_MATCH: {
1108                         struct cam_periph_map_info mapinfo;
1109                         struct cam_path *old_path;
1110
1111                         /*
1112                          * We can't deal with physical addresses for this
1113                          * type of transaction.
1114                          */
1115                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1116                                 error = EINVAL;
1117                                 break;
1118                         }
1119
1120                         /*
1121                          * Save this in case the caller had it set to
1122                          * something in particular.
1123                          */
1124                         old_path = inccb->ccb_h.path;
1125
1126                         /*
1127                          * We really don't need a path for the matching
1128                          * code.  The path is needed because of the
1129                          * debugging statements in xpt_action().  They
1130                          * assume that the CCB has a valid path.
1131                          */
1132                         inccb->ccb_h.path = xpt_periph->path;
1133
1134                         bzero(&mapinfo, sizeof(mapinfo));
1135
1136                         /*
1137                          * Map the pattern and match buffers into kernel
1138                          * virtual address space.
1139                          */
1140                         error = cam_periph_mapmem(inccb, &mapinfo);
1141
1142                         if (error) {
1143                                 inccb->ccb_h.path = old_path;
1144                                 break;
1145                         }
1146
1147                         /*
1148                          * This is an immediate CCB, we can send it on directly.
1149                          */
1150                         xpt_action(inccb);
1151
1152                         /*
1153                          * Map the buffers back into user space.
1154                          */
1155                         cam_periph_unmapmem(inccb, &mapinfo);
1156
1157                         inccb->ccb_h.path = old_path;
1158
1159                         error = 0;
1160                         break;
1161                 }
1162                 default:
1163                         error = ENOTSUP;
1164                         break;
1165                 }
1166                 break;
1167         }
1168         /*
1169          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1170          * with the periphal driver name and unit name filled in.  The other
1171          * fields don't really matter as input.  The passthrough driver name
1172          * ("pass"), and unit number are passed back in the ccb.  The current
1173          * device generation number, and the index into the device peripheral
1174          * driver list, and the status are also passed back.  Note that
1175          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1176          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1177          * (or rather should be) impossible for the device peripheral driver
1178          * list to change since we look at the whole thing in one pass, and
1179          * we do it within a critical section.
1180          * 
1181          */
1182         case CAMGETPASSTHRU: {
1183                 union ccb *ccb;
1184                 struct cam_periph *periph;
1185                 struct periph_driver **p_drv;
1186                 char   *name;
1187                 u_int unit;
1188                 u_int cur_generation;
1189                 int base_periph_found;
1190                 int splbreaknum;
1191
1192                 ccb = (union ccb *)ap->a_data;
1193                 unit = ccb->cgdl.unit_number;
1194                 name = ccb->cgdl.periph_name;
1195                 /*
1196                  * Every 100 devices, we want to call splz() to check for
1197                  * and allow the software interrupt handler a chance to run.
1198                  *
1199                  * Most systems won't run into this check, but this should
1200                  * avoid starvation in the software interrupt handler in
1201                  * large systems.
1202                  */
1203                 splbreaknum = 100;
1204
1205                 ccb = (union ccb *)ap->a_data;
1206
1207                 base_periph_found = 0;
1208
1209                 /*
1210                  * Sanity check -- make sure we don't get a null peripheral
1211                  * driver name.
1212                  */
1213                 if (*ccb->cgdl.periph_name == '\0') {
1214                         error = EINVAL;
1215                         break;
1216                 }
1217
1218                 /* Keep the list from changing while we traverse it */
1219                 crit_enter();
1220 ptstartover:
1221                 cur_generation = xsoftc.generation;
1222
1223                 /* first find our driver in the list of drivers */
1224                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1225                         if (strcmp((*p_drv)->driver_name, name) == 0)
1226                                 break;
1227                 }
1228
1229                 if (*p_drv == NULL) {
1230                         crit_exit();
1231                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1232                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1233                         *ccb->cgdl.periph_name = '\0';
1234                         ccb->cgdl.unit_number = 0;
1235                         error = ENOENT;
1236                         break;
1237                 }       
1238
1239                 /*
1240                  * Run through every peripheral instance of this driver
1241                  * and check to see whether it matches the unit passed
1242                  * in by the user.  If it does, get out of the loops and
1243                  * find the passthrough driver associated with that
1244                  * peripheral driver.
1245                  */
1246                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1247
1248                         if (periph->unit_number == unit) {
1249                                 break;
1250                         } else if (--splbreaknum == 0) {
1251                                 splz();
1252                                 splbreaknum = 100;
1253                                 if (cur_generation != xsoftc.generation)
1254                                        goto ptstartover;
1255                         }
1256                 }
1257                 /*
1258                  * If we found the peripheral driver that the user passed
1259                  * in, go through all of the peripheral drivers for that
1260                  * particular device and look for a passthrough driver.
1261                  */
1262                 if (periph != NULL) {
1263                         struct cam_ed *device;
1264                         int i;
1265
1266                         base_periph_found = 1;
1267                         device = periph->path->device;
1268                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1269                              periph != NULL;
1270                              periph = SLIST_NEXT(periph, periph_links), i++) {
1271                                 /*
1272                                  * Check to see whether we have a
1273                                  * passthrough device or not. 
1274                                  */
1275                                 if (strcmp(periph->periph_name, "pass") == 0) {
1276                                         /*
1277                                          * Fill in the getdevlist fields.
1278                                          */
1279                                         strcpy(ccb->cgdl.periph_name,
1280                                                periph->periph_name);
1281                                         ccb->cgdl.unit_number =
1282                                                 periph->unit_number;
1283                                         if (SLIST_NEXT(periph, periph_links))
1284                                                 ccb->cgdl.status =
1285                                                         CAM_GDEVLIST_MORE_DEVS;
1286                                         else
1287                                                 ccb->cgdl.status =
1288                                                        CAM_GDEVLIST_LAST_DEVICE;
1289                                         ccb->cgdl.generation =
1290                                                 device->generation;
1291                                         ccb->cgdl.index = i;
1292                                         /*
1293                                          * Fill in some CCB header fields
1294                                          * that the user may want.
1295                                          */
1296                                         ccb->ccb_h.path_id =
1297                                                 periph->path->bus->path_id;
1298                                         ccb->ccb_h.target_id =
1299                                                 periph->path->target->target_id;
1300                                         ccb->ccb_h.target_lun =
1301                                                 periph->path->device->lun_id;
1302                                         ccb->ccb_h.status = CAM_REQ_CMP;
1303                                         break;
1304                                 }
1305                         }
1306                 }
1307
1308                 /*
1309                  * If the periph is null here, one of two things has
1310                  * happened.  The first possibility is that we couldn't
1311                  * find the unit number of the particular peripheral driver
1312                  * that the user is asking about.  e.g. the user asks for
1313                  * the passthrough driver for "da11".  We find the list of
1314                  * "da" peripherals all right, but there is no unit 11.
1315                  * The other possibility is that we went through the list
1316                  * of peripheral drivers attached to the device structure,
1317                  * but didn't find one with the name "pass".  Either way,
1318                  * we return ENOENT, since we couldn't find something.
1319                  */
1320                 if (periph == NULL) {
1321                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1322                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1323                         *ccb->cgdl.periph_name = '\0';
1324                         ccb->cgdl.unit_number = 0;
1325                         error = ENOENT;
1326                         /*
1327                          * It is unfortunate that this is even necessary,
1328                          * but there are many, many clueless users out there.
1329                          * If this is true, the user is looking for the
1330                          * passthrough driver, but doesn't have one in his
1331                          * kernel.
1332                          */
1333                         if (base_periph_found == 1) {
1334                                 kprintf("xptioctl: pass driver is not in the "
1335                                        "kernel\n");
1336                                 kprintf("xptioctl: put \"device pass0\" in "
1337                                        "your kernel config file\n");
1338                         }
1339                 }
1340                 crit_exit();
1341                 break;
1342                 }
1343         default:
1344                 error = ENOTTY;
1345                 break;
1346         }
1347
1348         return(error);
1349 }
1350
1351 static int
1352 cam_module_event_handler(module_t mod, int what, void *arg)
1353 {
1354         if (what == MOD_LOAD) {
1355                 xpt_init(NULL);
1356         } else if (what == MOD_UNLOAD) {
1357                 return EBUSY;
1358         } else {
1359                 return EOPNOTSUPP;
1360         }
1361
1362         return 0;
1363 }
1364
1365 /* Functions accessed by the peripheral drivers */
1366 static void
1367 xpt_init(void *dummy)
1368 {
1369         struct cam_sim *xpt_sim;
1370         struct cam_path *path;
1371         struct cam_devq *devq;
1372         cam_status status;
1373
1374         TAILQ_INIT(&xpt_busses);
1375         TAILQ_INIT(&cam_bioq);
1376         SLIST_INIT(&ccb_freeq);
1377         STAILQ_INIT(&highpowerq);
1378
1379         /*
1380          * The xpt layer is, itself, the equivelent of a SIM.
1381          * Allow 16 ccbs in the ccb pool for it.  This should
1382          * give decent parallelism when we probe busses and
1383          * perform other XPT functions.
1384          */
1385         devq = cam_simq_alloc(16);
1386         xpt_sim = cam_sim_alloc(xptaction,
1387                                 xptpoll,
1388                                 "xpt",
1389                                 /*softc*/NULL,
1390                                 /*unit*/0,
1391                                 /*max_dev_transactions*/0,
1392                                 /*max_tagged_dev_transactions*/0,
1393                                 devq);
1394         cam_simq_release(devq);
1395         xpt_max_ccbs = 16;
1396                                 
1397         xpt_bus_register(xpt_sim, /*bus #*/0);
1398
1399         /*
1400          * Looking at the XPT from the SIM layer, the XPT is
1401          * the equivelent of a peripheral driver.  Allocate
1402          * a peripheral driver entry for us.
1403          */
1404         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1405                                       CAM_TARGET_WILDCARD,
1406                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1407                 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1408                        " failing attach\n", status);
1409                 return;
1410         }
1411
1412         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1413                          path, NULL, 0, NULL);
1414         xpt_free_path(path);
1415
1416         xpt_sim->softc = xpt_periph;
1417
1418         /*
1419          * Register a callback for when interrupts are enabled.
1420          */
1421         xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1422                                   M_TEMP, M_INTWAIT | M_ZERO);
1423         xpt_config_hook->ich_func = xpt_config;
1424         xpt_config_hook->ich_desc = "xpt";
1425         xpt_config_hook->ich_order = 1000;
1426         if (config_intrhook_establish(xpt_config_hook) != 0) {
1427                 kfree (xpt_config_hook, M_TEMP);
1428                 kprintf("xpt_init: config_intrhook_establish failed "
1429                        "- failing attach\n");
1430         }
1431
1432         /* Install our software interrupt handlers */
1433         register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1434 }
1435
1436 static cam_status
1437 xptregister(struct cam_periph *periph, void *arg)
1438 {
1439         if (periph == NULL) {
1440                 kprintf("xptregister: periph was NULL!!\n");
1441                 return(CAM_REQ_CMP_ERR);
1442         }
1443
1444         periph->softc = NULL;
1445
1446         xpt_periph = periph;
1447
1448         return(CAM_REQ_CMP);
1449 }
1450
1451 int32_t
1452 xpt_add_periph(struct cam_periph *periph)
1453 {
1454         struct cam_ed *device;
1455         int32_t  status;
1456         struct periph_list *periph_head;
1457
1458         device = periph->path->device;
1459
1460         periph_head = &device->periphs;
1461
1462         status = CAM_REQ_CMP;
1463
1464         if (device != NULL) {
1465                 /*
1466                  * Make room for this peripheral
1467                  * so it will fit in the queue
1468                  * when it's scheduled to run
1469                  */
1470                 crit_enter();
1471                 status = camq_resize(&device->drvq,
1472                                      device->drvq.array_size + 1);
1473
1474                 device->generation++;
1475
1476                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1477                 crit_exit();
1478         }
1479
1480         xsoftc.generation++;
1481
1482         return (status);
1483 }
1484
1485 void
1486 xpt_remove_periph(struct cam_periph *periph)
1487 {
1488         struct cam_ed *device;
1489
1490         device = periph->path->device;
1491
1492         if (device != NULL) {
1493                 struct periph_list *periph_head;
1494
1495                 periph_head = &device->periphs;
1496                 
1497                 /* Release the slot for this peripheral */
1498                 crit_enter();
1499                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1500
1501                 device->generation++;
1502
1503                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1504                 crit_exit();
1505         }
1506
1507         xsoftc.generation++;
1508
1509 }
1510
1511 #ifdef CAM_NEW_TRAN_CODE
1512
1513 void
1514 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1515 {
1516         struct  ccb_pathinq cpi;
1517         struct  ccb_trans_settings cts;
1518         struct  cam_path *path;
1519         u_int   speed;
1520         u_int   freq;
1521         u_int   mb;
1522
1523         path = periph->path;
1524         /*
1525          * To ensure that this is printed in one piece,
1526          * mask out CAM interrupts.
1527          */
1528         crit_enter();
1529         printf("%s%d at %s%d bus %d target %d lun %d\n",
1530                periph->periph_name, periph->unit_number,
1531                path->bus->sim->sim_name,
1532                path->bus->sim->unit_number,
1533                path->bus->sim->bus_id,
1534                path->target->target_id,
1535                path->device->lun_id);
1536         printf("%s%d: ", periph->periph_name, periph->unit_number);
1537         scsi_print_inquiry(&path->device->inq_data);
1538         if (bootverbose && path->device->serial_num_len > 0) {
1539                 /* Don't wrap the screen  - print only the first 60 chars */
1540                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1541                        periph->unit_number, path->device->serial_num);
1542         }
1543         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1544         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1545         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1546         xpt_action((union ccb*)&cts);
1547
1548         /* Ask the SIM for its base transfer speed */
1549         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1550         cpi.ccb_h.func_code = XPT_PATH_INQ;
1551         xpt_action((union ccb *)&cpi);
1552
1553         speed = cpi.base_transfer_speed;
1554         freq = 0;
1555         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1556                 struct  ccb_trans_settings_spi *spi;
1557
1558                 spi = &cts.xport_specific.spi;
1559                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1560                   && spi->sync_offset != 0) {
1561                         freq = scsi_calc_syncsrate(spi->sync_period);
1562                         speed = freq;
1563                 }
1564
1565                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1566                         speed *= (0x01 << spi->bus_width);
1567         }
1568         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1569                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1570                 if (fc->valid & CTS_FC_VALID_SPEED) {
1571                         speed = fc->bitrate;
1572                 }
1573         }
1574
1575         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1576                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1577                 if (sas->valid & CTS_SAS_VALID_SPEED) {
1578                         speed = sas->bitrate;
1579                 }
1580         }
1581
1582         mb = speed / 1000;
1583         if (mb > 0)
1584                 printf("%s%d: %d.%03dMB/s transfers",
1585                        periph->periph_name, periph->unit_number,
1586                        mb, speed % 1000);
1587         else
1588                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1589                        periph->unit_number, speed);
1590         /* Report additional information about SPI connections */
1591         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1592                 struct  ccb_trans_settings_spi *spi;
1593
1594                 spi = &cts.xport_specific.spi;
1595                 if (freq != 0) {
1596                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1597                                freq % 1000,
1598                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1599                              ? " DT" : "",
1600                                spi->sync_offset);
1601                 }
1602                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1603                  && spi->bus_width > 0) {
1604                         if (freq != 0) {
1605                                 printf(", ");
1606                         } else {
1607                                 printf(" (");
1608                         }
1609                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1610                 } else if (freq != 0) {
1611                         printf(")");
1612                 }
1613         }
1614         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1615                 struct  ccb_trans_settings_fc *fc;
1616
1617                 fc = &cts.xport_specific.fc;
1618                 if (fc->valid & CTS_FC_VALID_WWNN)
1619                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1620                 if (fc->valid & CTS_FC_VALID_WWPN)
1621                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1622                 if (fc->valid & CTS_FC_VALID_PORT)
1623                         printf(" PortID 0x%x", fc->port);
1624         }
1625
1626         if (path->device->inq_flags & SID_CmdQue
1627          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1628                 printf("\n%s%d: Tagged Queueing Enabled",
1629                        periph->periph_name, periph->unit_number);
1630         }
1631         printf("\n");
1632
1633         /*
1634          * We only want to print the caller's announce string if they've
1635          * passed one in..
1636          */
1637         if (announce_string != NULL)
1638                 printf("%s%d: %s\n", periph->periph_name,
1639                        periph->unit_number, announce_string);
1640         crit_exit();
1641 }
1642 #else /* CAM_NEW_TRAN_CODE */
1643 void
1644 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1645 {
1646         u_int mb;
1647         struct cam_path *path;
1648         struct ccb_trans_settings cts;
1649
1650         path = periph->path;
1651         /*
1652          * To ensure that this is printed in one piece,
1653          * mask out CAM interrupts.
1654          */
1655         crit_enter();
1656         kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1657                periph->periph_name, periph->unit_number,
1658                path->bus->sim->sim_name,
1659                path->bus->sim->unit_number,
1660                path->bus->sim->bus_id,
1661                path->target->target_id,
1662                path->device->lun_id);
1663         kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1664         scsi_print_inquiry(&path->device->inq_data);
1665         if ((bootverbose)
1666          && (path->device->serial_num_len > 0)) {
1667                 /* Don't wrap the screen  - print only the first 60 chars */
1668                 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1669                        periph->unit_number, path->device->serial_num);
1670         }
1671         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1672         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1673         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1674         xpt_action((union ccb*)&cts);
1675         if (cts.ccb_h.status == CAM_REQ_CMP) {
1676                 u_int speed;
1677                 u_int freq;
1678
1679                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1680                   && cts.sync_offset != 0) {
1681                         freq = scsi_calc_syncsrate(cts.sync_period);
1682                         speed = freq;
1683                 } else {
1684                         struct ccb_pathinq cpi;
1685
1686                         /* Ask the SIM for its base transfer speed */
1687                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1688                         cpi.ccb_h.func_code = XPT_PATH_INQ;
1689                         xpt_action((union ccb *)&cpi);
1690
1691                         speed = cpi.base_transfer_speed;
1692                         freq = 0;
1693                 }
1694                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1695                         speed *= (0x01 << cts.bus_width);
1696                 mb = speed / 1000;
1697                 if (mb > 0)
1698                         kprintf("%s%d: %d.%03dMB/s transfers",
1699                                periph->periph_name, periph->unit_number,
1700                                mb, speed % 1000);
1701                 else
1702                         kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1703                                periph->unit_number, speed);
1704                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1705                  && cts.sync_offset != 0) {
1706                         kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1707                                freq % 1000, cts.sync_offset);
1708                 }
1709                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1710                  && cts.bus_width > 0) {
1711                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1712                          && cts.sync_offset != 0) {
1713                                 kprintf(", ");
1714                         } else {
1715                                 kprintf(" (");
1716                         }
1717                         kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1718                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1719                         && cts.sync_offset != 0) {
1720                         kprintf(")");
1721                 }
1722
1723                 if (path->device->inq_flags & SID_CmdQue
1724                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1725                         kprintf(", Tagged Queueing Enabled");
1726                 }
1727
1728                 kprintf("\n");
1729         } else if (path->device->inq_flags & SID_CmdQue
1730                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1731                 kprintf("%s%d: Tagged Queueing Enabled\n",
1732                        periph->periph_name, periph->unit_number);
1733         }
1734
1735         /*
1736          * We only want to print the caller's announce string if they've
1737          * passed one in..
1738          */
1739         if (announce_string != NULL)
1740                 kprintf("%s%d: %s\n", periph->periph_name,
1741                        periph->unit_number, announce_string);
1742         crit_exit();
1743 }
1744
1745 #endif /* CAM_NEW_TRAN_CODE */
1746
1747 static dev_match_ret
1748 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1749             struct cam_eb *bus)
1750 {
1751         dev_match_ret retval;
1752         int i;
1753
1754         retval = DM_RET_NONE;
1755
1756         /*
1757          * If we aren't given something to match against, that's an error.
1758          */
1759         if (bus == NULL)
1760                 return(DM_RET_ERROR);
1761
1762         /*
1763          * If there are no match entries, then this bus matches no
1764          * matter what.
1765          */
1766         if ((patterns == NULL) || (num_patterns == 0))
1767                 return(DM_RET_DESCEND | DM_RET_COPY);
1768
1769         for (i = 0; i < num_patterns; i++) {
1770                 struct bus_match_pattern *cur_pattern;
1771
1772                 /*
1773                  * If the pattern in question isn't for a bus node, we
1774                  * aren't interested.  However, we do indicate to the
1775                  * calling routine that we should continue descending the
1776                  * tree, since the user wants to match against lower-level
1777                  * EDT elements.
1778                  */
1779                 if (patterns[i].type != DEV_MATCH_BUS) {
1780                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1781                                 retval |= DM_RET_DESCEND;
1782                         continue;
1783                 }
1784
1785                 cur_pattern = &patterns[i].pattern.bus_pattern;
1786
1787                 /*
1788                  * If they want to match any bus node, we give them any
1789                  * device node.
1790                  */
1791                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1792                         /* set the copy flag */
1793                         retval |= DM_RET_COPY;
1794
1795                         /*
1796                          * If we've already decided on an action, go ahead
1797                          * and return.
1798                          */
1799                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1800                                 return(retval);
1801                 }
1802
1803                 /*
1804                  * Not sure why someone would do this...
1805                  */
1806                 if (cur_pattern->flags == BUS_MATCH_NONE)
1807                         continue;
1808
1809                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1810                  && (cur_pattern->path_id != bus->path_id))
1811                         continue;
1812
1813                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1814                  && (cur_pattern->bus_id != bus->sim->bus_id))
1815                         continue;
1816
1817                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1818                  && (cur_pattern->unit_number != bus->sim->unit_number))
1819                         continue;
1820
1821                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1822                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1823                              DEV_IDLEN) != 0))
1824                         continue;
1825
1826                 /*
1827                  * If we get to this point, the user definitely wants 
1828                  * information on this bus.  So tell the caller to copy the
1829                  * data out.
1830                  */
1831                 retval |= DM_RET_COPY;
1832
1833                 /*
1834                  * If the return action has been set to descend, then we
1835                  * know that we've already seen a non-bus matching
1836                  * expression, therefore we need to further descend the tree.
1837                  * This won't change by continuing around the loop, so we
1838                  * go ahead and return.  If we haven't seen a non-bus
1839                  * matching expression, we keep going around the loop until
1840                  * we exhaust the matching expressions.  We'll set the stop
1841                  * flag once we fall out of the loop.
1842                  */
1843                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1844                         return(retval);
1845         }
1846
1847         /*
1848          * If the return action hasn't been set to descend yet, that means
1849          * we haven't seen anything other than bus matching patterns.  So
1850          * tell the caller to stop descending the tree -- the user doesn't
1851          * want to match against lower level tree elements.
1852          */
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1854                 retval |= DM_RET_STOP;
1855
1856         return(retval);
1857 }
1858
1859 static dev_match_ret
1860 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1861                struct cam_ed *device)
1862 {
1863         dev_match_ret retval;
1864         int i;
1865
1866         retval = DM_RET_NONE;
1867
1868         /*
1869          * If we aren't given something to match against, that's an error.
1870          */
1871         if (device == NULL)
1872                 return(DM_RET_ERROR);
1873
1874         /*
1875          * If there are no match entries, then this device matches no
1876          * matter what.
1877          */
1878         if ((patterns == NULL) || (num_patterns == 0))
1879                 return(DM_RET_DESCEND | DM_RET_COPY);
1880
1881         for (i = 0; i < num_patterns; i++) {
1882                 struct device_match_pattern *cur_pattern;
1883
1884                 /*
1885                  * If the pattern in question isn't for a device node, we
1886                  * aren't interested.
1887                  */
1888                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1889                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1890                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1891                                 retval |= DM_RET_DESCEND;
1892                         continue;
1893                 }
1894
1895                 cur_pattern = &patterns[i].pattern.device_pattern;
1896
1897                 /*
1898                  * If they want to match any device node, we give them any
1899                  * device node.
1900                  */
1901                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1902                         /* set the copy flag */
1903                         retval |= DM_RET_COPY;
1904
1905                         
1906                         /*
1907                          * If we've already decided on an action, go ahead
1908                          * and return.
1909                          */
1910                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1911                                 return(retval);
1912                 }
1913
1914                 /*
1915                  * Not sure why someone would do this...
1916                  */
1917                 if (cur_pattern->flags == DEV_MATCH_NONE)
1918                         continue;
1919
1920                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1921                  && (cur_pattern->path_id != device->target->bus->path_id))
1922                         continue;
1923
1924                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1925                  && (cur_pattern->target_id != device->target->target_id))
1926                         continue;
1927
1928                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1929                  && (cur_pattern->target_lun != device->lun_id))
1930                         continue;
1931
1932                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1933                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1934                                     (caddr_t)&cur_pattern->inq_pat,
1935                                     1, sizeof(cur_pattern->inq_pat),
1936                                     scsi_static_inquiry_match) == NULL))
1937                         continue;
1938
1939                 /*
1940                  * If we get to this point, the user definitely wants 
1941                  * information on this device.  So tell the caller to copy
1942                  * the data out.
1943                  */
1944                 retval |= DM_RET_COPY;
1945
1946                 /*
1947                  * If the return action has been set to descend, then we
1948                  * know that we've already seen a peripheral matching
1949                  * expression, therefore we need to further descend the tree.
1950                  * This won't change by continuing around the loop, so we
1951                  * go ahead and return.  If we haven't seen a peripheral
1952                  * matching expression, we keep going around the loop until
1953                  * we exhaust the matching expressions.  We'll set the stop
1954                  * flag once we fall out of the loop.
1955                  */
1956                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1957                         return(retval);
1958         }
1959
1960         /*
1961          * If the return action hasn't been set to descend yet, that means
1962          * we haven't seen any peripheral matching patterns.  So tell the
1963          * caller to stop descending the tree -- the user doesn't want to
1964          * match against lower level tree elements.
1965          */
1966         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1967                 retval |= DM_RET_STOP;
1968
1969         return(retval);
1970 }
1971
1972 /*
1973  * Match a single peripheral against any number of match patterns.
1974  */
1975 static dev_match_ret
1976 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1977                struct cam_periph *periph)
1978 {
1979         dev_match_ret retval;
1980         int i;
1981
1982         /*
1983          * If we aren't given something to match against, that's an error.
1984          */
1985         if (periph == NULL)
1986                 return(DM_RET_ERROR);
1987
1988         /*
1989          * If there are no match entries, then this peripheral matches no
1990          * matter what.
1991          */
1992         if ((patterns == NULL) || (num_patterns == 0))
1993                 return(DM_RET_STOP | DM_RET_COPY);
1994
1995         /*
1996          * There aren't any nodes below a peripheral node, so there's no
1997          * reason to descend the tree any further.
1998          */
1999         retval = DM_RET_STOP;
2000
2001         for (i = 0; i < num_patterns; i++) {
2002                 struct periph_match_pattern *cur_pattern;
2003
2004                 /*
2005                  * If the pattern in question isn't for a peripheral, we
2006                  * aren't interested.
2007                  */
2008                 if (patterns[i].type != DEV_MATCH_PERIPH)
2009                         continue;
2010
2011                 cur_pattern = &patterns[i].pattern.periph_pattern;
2012
2013                 /*
2014                  * If they want to match on anything, then we will do so.
2015                  */
2016                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2017                         /* set the copy flag */
2018                         retval |= DM_RET_COPY;
2019
2020                         /*
2021                          * We've already set the return action to stop,
2022                          * since there are no nodes below peripherals in
2023                          * the tree.
2024                          */
2025                         return(retval);
2026                 }
2027
2028                 /*
2029                  * Not sure why someone would do this...
2030                  */
2031                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2032                         continue;
2033
2034                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2035                  && (cur_pattern->path_id != periph->path->bus->path_id))
2036                         continue;
2037
2038                 /*
2039                  * For the target and lun id's, we have to make sure the
2040                  * target and lun pointers aren't NULL.  The xpt peripheral
2041                  * has a wildcard target and device.
2042                  */
2043                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2044                  && ((periph->path->target == NULL)
2045                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2046                         continue;
2047
2048                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2049                  && ((periph->path->device == NULL)
2050                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2051                         continue;
2052
2053                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2054                  && (cur_pattern->unit_number != periph->unit_number))
2055                         continue;
2056
2057                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2058                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2059                              DEV_IDLEN) != 0))
2060                         continue;
2061
2062                 /*
2063                  * If we get to this point, the user definitely wants 
2064                  * information on this peripheral.  So tell the caller to
2065                  * copy the data out.
2066                  */
2067                 retval |= DM_RET_COPY;
2068
2069                 /*
2070                  * The return action has already been set to stop, since
2071                  * peripherals don't have any nodes below them in the EDT.
2072                  */
2073                 return(retval);
2074         }
2075
2076         /*
2077          * If we get to this point, the peripheral that was passed in
2078          * doesn't match any of the patterns.
2079          */
2080         return(retval);
2081 }
2082
2083 static int
2084 xptedtbusfunc(struct cam_eb *bus, void *arg)
2085 {
2086         struct ccb_dev_match *cdm;
2087         dev_match_ret retval;
2088
2089         cdm = (struct ccb_dev_match *)arg;
2090
2091         /*
2092          * If our position is for something deeper in the tree, that means
2093          * that we've already seen this node.  So, we keep going down.
2094          */
2095         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2096          && (cdm->pos.cookie.bus == bus)
2097          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2098          && (cdm->pos.cookie.target != NULL))
2099                 retval = DM_RET_DESCEND;
2100         else
2101                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2102
2103         /*
2104          * If we got an error, bail out of the search.
2105          */
2106         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2107                 cdm->status = CAM_DEV_MATCH_ERROR;
2108                 return(0);
2109         }
2110
2111         /*
2112          * If the copy flag is set, copy this bus out.
2113          */
2114         if (retval & DM_RET_COPY) {
2115                 int spaceleft, j;
2116
2117                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2118                         sizeof(struct dev_match_result));
2119
2120                 /*
2121                  * If we don't have enough space to put in another
2122                  * match result, save our position and tell the
2123                  * user there are more devices to check.
2124                  */
2125                 if (spaceleft < sizeof(struct dev_match_result)) {
2126                         bzero(&cdm->pos, sizeof(cdm->pos));
2127                         cdm->pos.position_type = 
2128                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2129
2130                         cdm->pos.cookie.bus = bus;
2131                         cdm->pos.generations[CAM_BUS_GENERATION]=
2132                                 bus_generation;
2133                         cdm->status = CAM_DEV_MATCH_MORE;
2134                         return(0);
2135                 }
2136                 j = cdm->num_matches;
2137                 cdm->num_matches++;
2138                 cdm->matches[j].type = DEV_MATCH_BUS;
2139                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2140                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2141                 cdm->matches[j].result.bus_result.unit_number =
2142                         bus->sim->unit_number;
2143                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2144                         bus->sim->sim_name, DEV_IDLEN);
2145         }
2146
2147         /*
2148          * If the user is only interested in busses, there's no
2149          * reason to descend to the next level in the tree.
2150          */
2151         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2152                 return(1);
2153
2154         /*
2155          * If there is a target generation recorded, check it to
2156          * make sure the target list hasn't changed.
2157          */
2158         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2159          && (bus == cdm->pos.cookie.bus)
2160          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2161          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2162          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2163              bus->generation)) {
2164                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2165                 return(0);
2166         }
2167
2168         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2169          && (cdm->pos.cookie.bus == bus)
2170          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2171          && (cdm->pos.cookie.target != NULL))
2172                 return(xpttargettraverse(bus,
2173                                         (struct cam_et *)cdm->pos.cookie.target,
2174                                          xptedttargetfunc, arg));
2175         else
2176                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2177 }
2178
2179 static int
2180 xptedttargetfunc(struct cam_et *target, void *arg)
2181 {
2182         struct ccb_dev_match *cdm;
2183
2184         cdm = (struct ccb_dev_match *)arg;
2185
2186         /*
2187          * If there is a device list generation recorded, check it to
2188          * make sure the device list hasn't changed.
2189          */
2190         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2191          && (cdm->pos.cookie.bus == target->bus)
2192          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2193          && (cdm->pos.cookie.target == target)
2194          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2195          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2196          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2197              target->generation)) {
2198                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2199                 return(0);
2200         }
2201
2202         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2203          && (cdm->pos.cookie.bus == target->bus)
2204          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2205          && (cdm->pos.cookie.target == target)
2206          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2207          && (cdm->pos.cookie.device != NULL))
2208                 return(xptdevicetraverse(target,
2209                                         (struct cam_ed *)cdm->pos.cookie.device,
2210                                          xptedtdevicefunc, arg));
2211         else
2212                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2213 }
2214
2215 static int
2216 xptedtdevicefunc(struct cam_ed *device, void *arg)
2217 {
2218
2219         struct ccb_dev_match *cdm;
2220         dev_match_ret retval;
2221
2222         cdm = (struct ccb_dev_match *)arg;
2223
2224         /*
2225          * If our position is for something deeper in the tree, that means
2226          * that we've already seen this node.  So, we keep going down.
2227          */
2228         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2229          && (cdm->pos.cookie.device == device)
2230          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2231          && (cdm->pos.cookie.periph != NULL))
2232                 retval = DM_RET_DESCEND;
2233         else
2234                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2235                                         device);
2236
2237         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2238                 cdm->status = CAM_DEV_MATCH_ERROR;
2239                 return(0);
2240         }
2241
2242         /*
2243          * If the copy flag is set, copy this device out.
2244          */
2245         if (retval & DM_RET_COPY) {
2246                 int spaceleft, j;
2247
2248                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2249                         sizeof(struct dev_match_result));
2250
2251                 /*
2252                  * If we don't have enough space to put in another
2253                  * match result, save our position and tell the
2254                  * user there are more devices to check.
2255                  */
2256                 if (spaceleft < sizeof(struct dev_match_result)) {
2257                         bzero(&cdm->pos, sizeof(cdm->pos));
2258                         cdm->pos.position_type = 
2259                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2260                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2261
2262                         cdm->pos.cookie.bus = device->target->bus;
2263                         cdm->pos.generations[CAM_BUS_GENERATION]=
2264                                 bus_generation;
2265                         cdm->pos.cookie.target = device->target;
2266                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2267                                 device->target->bus->generation;
2268                         cdm->pos.cookie.device = device;
2269                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2270                                 device->target->generation;
2271                         cdm->status = CAM_DEV_MATCH_MORE;
2272                         return(0);
2273                 }
2274                 j = cdm->num_matches;
2275                 cdm->num_matches++;
2276                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2277                 cdm->matches[j].result.device_result.path_id =
2278                         device->target->bus->path_id;
2279                 cdm->matches[j].result.device_result.target_id =
2280                         device->target->target_id;
2281                 cdm->matches[j].result.device_result.target_lun =
2282                         device->lun_id;
2283                 bcopy(&device->inq_data,
2284                       &cdm->matches[j].result.device_result.inq_data,
2285                       sizeof(struct scsi_inquiry_data));
2286
2287                 /* Let the user know whether this device is unconfigured */
2288                 if (device->flags & CAM_DEV_UNCONFIGURED)
2289                         cdm->matches[j].result.device_result.flags =
2290                                 DEV_RESULT_UNCONFIGURED;
2291                 else
2292                         cdm->matches[j].result.device_result.flags =
2293                                 DEV_RESULT_NOFLAG;
2294         }
2295
2296         /*
2297          * If the user isn't interested in peripherals, don't descend
2298          * the tree any further.
2299          */
2300         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2301                 return(1);
2302
2303         /*
2304          * If there is a peripheral list generation recorded, make sure
2305          * it hasn't changed.
2306          */
2307         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2308          && (device->target->bus == cdm->pos.cookie.bus)
2309          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2310          && (device->target == cdm->pos.cookie.target)
2311          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2312          && (device == cdm->pos.cookie.device)
2313          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2314          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2315          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2316              device->generation)){
2317                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2318                 return(0);
2319         }
2320
2321         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2322          && (cdm->pos.cookie.bus == device->target->bus)
2323          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2324          && (cdm->pos.cookie.target == device->target)
2325          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2326          && (cdm->pos.cookie.device == device)
2327          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2328          && (cdm->pos.cookie.periph != NULL))
2329                 return(xptperiphtraverse(device,
2330                                 (struct cam_periph *)cdm->pos.cookie.periph,
2331                                 xptedtperiphfunc, arg));
2332         else
2333                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2334 }
2335
2336 static int
2337 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2338 {
2339         struct ccb_dev_match *cdm;
2340         dev_match_ret retval;
2341
2342         cdm = (struct ccb_dev_match *)arg;
2343
2344         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2345
2346         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2347                 cdm->status = CAM_DEV_MATCH_ERROR;
2348                 return(0);
2349         }
2350
2351         /*
2352          * If the copy flag is set, copy this peripheral out.
2353          */
2354         if (retval & DM_RET_COPY) {
2355                 int spaceleft, j;
2356
2357                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2358                         sizeof(struct dev_match_result));
2359
2360                 /*
2361                  * If we don't have enough space to put in another
2362                  * match result, save our position and tell the
2363                  * user there are more devices to check.
2364                  */
2365                 if (spaceleft < sizeof(struct dev_match_result)) {
2366                         bzero(&cdm->pos, sizeof(cdm->pos));
2367                         cdm->pos.position_type = 
2368                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2369                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2370                                 CAM_DEV_POS_PERIPH;
2371
2372                         cdm->pos.cookie.bus = periph->path->bus;
2373                         cdm->pos.generations[CAM_BUS_GENERATION]=
2374                                 bus_generation;
2375                         cdm->pos.cookie.target = periph->path->target;
2376                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2377                                 periph->path->bus->generation;
2378                         cdm->pos.cookie.device = periph->path->device;
2379                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2380                                 periph->path->target->generation;
2381                         cdm->pos.cookie.periph = periph;
2382                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2383                                 periph->path->device->generation;
2384                         cdm->status = CAM_DEV_MATCH_MORE;
2385                         return(0);
2386                 }
2387
2388                 j = cdm->num_matches;
2389                 cdm->num_matches++;
2390                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2391                 cdm->matches[j].result.periph_result.path_id =
2392                         periph->path->bus->path_id;
2393                 cdm->matches[j].result.periph_result.target_id =
2394                         periph->path->target->target_id;
2395                 cdm->matches[j].result.periph_result.target_lun =
2396                         periph->path->device->lun_id;
2397                 cdm->matches[j].result.periph_result.unit_number =
2398                         periph->unit_number;
2399                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2400                         periph->periph_name, DEV_IDLEN);
2401         }
2402
2403         return(1);
2404 }
2405
2406 static int
2407 xptedtmatch(struct ccb_dev_match *cdm)
2408 {
2409         int ret;
2410
2411         cdm->num_matches = 0;
2412
2413         /*
2414          * Check the bus list generation.  If it has changed, the user
2415          * needs to reset everything and start over.
2416          */
2417         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2418          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2419          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2420                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2421                 return(0);
2422         }
2423
2424         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2425          && (cdm->pos.cookie.bus != NULL))
2426                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2427                                      xptedtbusfunc, cdm);
2428         else
2429                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2430
2431         /*
2432          * If we get back 0, that means that we had to stop before fully
2433          * traversing the EDT.  It also means that one of the subroutines
2434          * has set the status field to the proper value.  If we get back 1,
2435          * we've fully traversed the EDT and copied out any matching entries.
2436          */
2437         if (ret == 1)
2438                 cdm->status = CAM_DEV_MATCH_LAST;
2439
2440         return(ret);
2441 }
2442
2443 static int
2444 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2445 {
2446         struct ccb_dev_match *cdm;
2447
2448         cdm = (struct ccb_dev_match *)arg;
2449
2450         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2451          && (cdm->pos.cookie.pdrv == pdrv)
2452          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2453          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2454          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2455              (*pdrv)->generation)) {
2456                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2457                 return(0);
2458         }
2459
2460         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2461          && (cdm->pos.cookie.pdrv == pdrv)
2462          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2463          && (cdm->pos.cookie.periph != NULL))
2464                 return(xptpdperiphtraverse(pdrv,
2465                                 (struct cam_periph *)cdm->pos.cookie.periph,
2466                                 xptplistperiphfunc, arg));
2467         else
2468                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2469 }
2470
2471 static int
2472 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2473 {
2474         struct ccb_dev_match *cdm;
2475         dev_match_ret retval;
2476
2477         cdm = (struct ccb_dev_match *)arg;
2478
2479         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2480
2481         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2482                 cdm->status = CAM_DEV_MATCH_ERROR;
2483                 return(0);
2484         }
2485
2486         /*
2487          * If the copy flag is set, copy this peripheral out.
2488          */
2489         if (retval & DM_RET_COPY) {
2490                 int spaceleft, j;
2491
2492                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2493                         sizeof(struct dev_match_result));
2494
2495                 /*
2496                  * If we don't have enough space to put in another
2497                  * match result, save our position and tell the
2498                  * user there are more devices to check.
2499                  */
2500                 if (spaceleft < sizeof(struct dev_match_result)) {
2501                         struct periph_driver **pdrv;
2502
2503                         pdrv = NULL;
2504                         bzero(&cdm->pos, sizeof(cdm->pos));
2505                         cdm->pos.position_type = 
2506                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2507                                 CAM_DEV_POS_PERIPH;
2508
2509                         /*
2510                          * This may look a bit non-sensical, but it is
2511                          * actually quite logical.  There are very few
2512                          * peripheral drivers, and bloating every peripheral
2513                          * structure with a pointer back to its parent
2514                          * peripheral driver linker set entry would cost
2515                          * more in the long run than doing this quick lookup.
2516                          */
2517                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2518                                 if (strcmp((*pdrv)->driver_name,
2519                                     periph->periph_name) == 0)
2520                                         break;
2521                         }
2522
2523                         if (*pdrv == NULL) {
2524                                 cdm->status = CAM_DEV_MATCH_ERROR;
2525                                 return(0);
2526                         }
2527
2528                         cdm->pos.cookie.pdrv = pdrv;
2529                         /*
2530                          * The periph generation slot does double duty, as
2531                          * does the periph pointer slot.  They are used for
2532                          * both edt and pdrv lookups and positioning.
2533                          */
2534                         cdm->pos.cookie.periph = periph;
2535                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2536                                 (*pdrv)->generation;
2537                         cdm->status = CAM_DEV_MATCH_MORE;
2538                         return(0);
2539                 }
2540
2541                 j = cdm->num_matches;
2542                 cdm->num_matches++;
2543                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2544                 cdm->matches[j].result.periph_result.path_id =
2545                         periph->path->bus->path_id;
2546
2547                 /*
2548                  * The transport layer peripheral doesn't have a target or
2549                  * lun.
2550                  */
2551                 if (periph->path->target)
2552                         cdm->matches[j].result.periph_result.target_id =
2553                                 periph->path->target->target_id;
2554                 else
2555                         cdm->matches[j].result.periph_result.target_id = -1;
2556
2557                 if (periph->path->device)
2558                         cdm->matches[j].result.periph_result.target_lun =
2559                                 periph->path->device->lun_id;
2560                 else
2561                         cdm->matches[j].result.periph_result.target_lun = -1;
2562
2563                 cdm->matches[j].result.periph_result.unit_number =
2564                         periph->unit_number;
2565                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2566                         periph->periph_name, DEV_IDLEN);
2567         }
2568
2569         return(1);
2570 }
2571
2572 static int
2573 xptperiphlistmatch(struct ccb_dev_match *cdm)
2574 {
2575         int ret;
2576
2577         cdm->num_matches = 0;
2578
2579         /*
2580          * At this point in the edt traversal function, we check the bus
2581          * list generation to make sure that no busses have been added or
2582          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2583          * For the peripheral driver list traversal function, however, we
2584          * don't have to worry about new peripheral driver types coming or
2585          * going; they're in a linker set, and therefore can't change
2586          * without a recompile.
2587          */
2588
2589         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2590          && (cdm->pos.cookie.pdrv != NULL))
2591                 ret = xptpdrvtraverse(
2592                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2593                                 xptplistpdrvfunc, cdm);
2594         else
2595                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2596
2597         /*
2598          * If we get back 0, that means that we had to stop before fully
2599          * traversing the peripheral driver tree.  It also means that one of
2600          * the subroutines has set the status field to the proper value.  If
2601          * we get back 1, we've fully traversed the EDT and copied out any
2602          * matching entries.
2603          */
2604         if (ret == 1)
2605                 cdm->status = CAM_DEV_MATCH_LAST;
2606
2607         return(ret);
2608 }
2609
2610 static int
2611 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2612 {
2613         struct cam_eb *bus, *next_bus;
2614         int retval;
2615
2616         retval = 1;
2617
2618         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2619              bus != NULL;
2620              bus = next_bus) {
2621                 next_bus = TAILQ_NEXT(bus, links);
2622
2623                 retval = tr_func(bus, arg);
2624                 if (retval == 0)
2625                         return(retval);
2626         }
2627
2628         return(retval);
2629 }
2630
2631 static int
2632 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2633                   xpt_targetfunc_t *tr_func, void *arg)
2634 {
2635         struct cam_et *target, *next_target;
2636         int retval;
2637
2638         retval = 1;
2639         for (target = (start_target ? start_target :
2640                        TAILQ_FIRST(&bus->et_entries));
2641              target != NULL; target = next_target) {
2642
2643                 next_target = TAILQ_NEXT(target, links);
2644
2645                 retval = tr_func(target, arg);
2646
2647                 if (retval == 0)
2648                         return(retval);
2649         }
2650
2651         return(retval);
2652 }
2653
2654 static int
2655 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2656                   xpt_devicefunc_t *tr_func, void *arg)
2657 {
2658         struct cam_ed *device, *next_device;
2659         int retval;
2660
2661         retval = 1;
2662         for (device = (start_device ? start_device :
2663                        TAILQ_FIRST(&target->ed_entries));
2664              device != NULL;
2665              device = next_device) {
2666
2667                 next_device = TAILQ_NEXT(device, links);
2668
2669                 retval = tr_func(device, arg);
2670
2671                 if (retval == 0)
2672                         return(retval);
2673         }
2674
2675         return(retval);
2676 }
2677
2678 static int
2679 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2680                   xpt_periphfunc_t *tr_func, void *arg)
2681 {
2682         struct cam_periph *periph, *next_periph;
2683         int retval;
2684
2685         retval = 1;
2686
2687         for (periph = (start_periph ? start_periph :
2688                        SLIST_FIRST(&device->periphs));
2689              periph != NULL;
2690              periph = next_periph) {
2691
2692                 next_periph = SLIST_NEXT(periph, periph_links);
2693
2694                 retval = tr_func(periph, arg);
2695                 if (retval == 0)
2696                         return(retval);
2697         }
2698
2699         return(retval);
2700 }
2701
2702 static int
2703 xptpdrvtraverse(struct periph_driver **start_pdrv,
2704                 xpt_pdrvfunc_t *tr_func, void *arg)
2705 {
2706         struct periph_driver **pdrv;
2707         int retval;
2708
2709         retval = 1;
2710
2711         /*
2712          * We don't traverse the peripheral driver list like we do the
2713          * other lists, because it is a linker set, and therefore cannot be
2714          * changed during runtime.  If the peripheral driver list is ever
2715          * re-done to be something other than a linker set (i.e. it can
2716          * change while the system is running), the list traversal should
2717          * be modified to work like the other traversal functions.
2718          */
2719         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2720              *pdrv != NULL; pdrv++) {
2721                 retval = tr_func(pdrv, arg);
2722
2723                 if (retval == 0)
2724                         return(retval);
2725         }
2726
2727         return(retval);
2728 }
2729
2730 static int
2731 xptpdperiphtraverse(struct periph_driver **pdrv,
2732                     struct cam_periph *start_periph,
2733                     xpt_periphfunc_t *tr_func, void *arg)
2734 {
2735         struct cam_periph *periph, *next_periph;
2736         int retval;
2737
2738         retval = 1;
2739
2740         for (periph = (start_periph ? start_periph :
2741              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2742              periph = next_periph) {
2743
2744                 next_periph = TAILQ_NEXT(periph, unit_links);
2745
2746                 retval = tr_func(periph, arg);
2747                 if (retval == 0)
2748                         return(retval);
2749         }
2750         return(retval);
2751 }
2752
2753 static int
2754 xptdefbusfunc(struct cam_eb *bus, void *arg)
2755 {
2756         struct xpt_traverse_config *tr_config;
2757
2758         tr_config = (struct xpt_traverse_config *)arg;
2759
2760         if (tr_config->depth == XPT_DEPTH_BUS) {
2761                 xpt_busfunc_t *tr_func;
2762
2763                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2764
2765                 return(tr_func(bus, tr_config->tr_arg));
2766         } else
2767                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2768 }
2769
2770 static int
2771 xptdeftargetfunc(struct cam_et *target, void *arg)
2772 {
2773         struct xpt_traverse_config *tr_config;
2774
2775         tr_config = (struct xpt_traverse_config *)arg;
2776
2777         if (tr_config->depth == XPT_DEPTH_TARGET) {
2778                 xpt_targetfunc_t *tr_func;
2779
2780                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2781
2782                 return(tr_func(target, tr_config->tr_arg));
2783         } else
2784                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2785 }
2786
2787 static int
2788 xptdefdevicefunc(struct cam_ed *device, void *arg)
2789 {
2790         struct xpt_traverse_config *tr_config;
2791
2792         tr_config = (struct xpt_traverse_config *)arg;
2793
2794         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2795                 xpt_devicefunc_t *tr_func;
2796
2797                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2798
2799                 return(tr_func(device, tr_config->tr_arg));
2800         } else
2801                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2802 }
2803
2804 static int
2805 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2806 {
2807         struct xpt_traverse_config *tr_config;
2808         xpt_periphfunc_t *tr_func;
2809
2810         tr_config = (struct xpt_traverse_config *)arg;
2811
2812         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2813
2814         /*
2815          * Unlike the other default functions, we don't check for depth
2816          * here.  The peripheral driver level is the last level in the EDT,
2817          * so if we're here, we should execute the function in question.
2818          */
2819         return(tr_func(periph, tr_config->tr_arg));
2820 }
2821
2822 /*
2823  * Execute the given function for every bus in the EDT.
2824  */
2825 static int
2826 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2827 {
2828         struct xpt_traverse_config tr_config;
2829
2830         tr_config.depth = XPT_DEPTH_BUS;
2831         tr_config.tr_func = tr_func;
2832         tr_config.tr_arg = arg;
2833
2834         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2835 }
2836
2837 #ifdef notusedyet
2838 /*
2839  * Execute the given function for every target in the EDT.
2840  */
2841 static int
2842 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2843 {
2844         struct xpt_traverse_config tr_config;
2845
2846         tr_config.depth = XPT_DEPTH_TARGET;
2847         tr_config.tr_func = tr_func;
2848         tr_config.tr_arg = arg;
2849
2850         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2851 }
2852 #endif /* notusedyet */
2853
2854 /*
2855  * Execute the given function for every device in the EDT.
2856  */
2857 static int
2858 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2859 {
2860         struct xpt_traverse_config tr_config;
2861
2862         tr_config.depth = XPT_DEPTH_DEVICE;
2863         tr_config.tr_func = tr_func;
2864         tr_config.tr_arg = arg;
2865
2866         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2867 }
2868
2869 #ifdef notusedyet
2870 /*
2871  * Execute the given function for every peripheral in the EDT.
2872  */
2873 static int
2874 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2875 {
2876         struct xpt_traverse_config tr_config;
2877
2878         tr_config.depth = XPT_DEPTH_PERIPH;
2879         tr_config.tr_func = tr_func;
2880         tr_config.tr_arg = arg;
2881
2882         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2883 }
2884 #endif /* notusedyet */
2885
2886 static int
2887 xptsetasyncfunc(struct cam_ed *device, void *arg)
2888 {
2889         struct cam_path path;
2890         struct ccb_getdev cgd;
2891         struct async_node *cur_entry;
2892
2893         cur_entry = (struct async_node *)arg;
2894
2895         /*
2896          * Don't report unconfigured devices (Wildcard devs,
2897          * devices only for target mode, device instances
2898          * that have been invalidated but are waiting for
2899          * their last reference count to be released).
2900          */
2901         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2902                 return (1);
2903
2904         xpt_compile_path(&path,
2905                          NULL,
2906                          device->target->bus->path_id,
2907                          device->target->target_id,
2908                          device->lun_id);
2909         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2910         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2911         xpt_action((union ccb *)&cgd);
2912         cur_entry->callback(cur_entry->callback_arg,
2913                             AC_FOUND_DEVICE,
2914                             &path, &cgd);
2915         xpt_release_path(&path);
2916
2917         return(1);
2918 }
2919
2920 static int
2921 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2922 {
2923         struct cam_path path;
2924         struct ccb_pathinq cpi;
2925         struct async_node *cur_entry;
2926
2927         cur_entry = (struct async_node *)arg;
2928
2929         xpt_compile_path(&path, /*periph*/NULL,
2930                          bus->sim->path_id,
2931                          CAM_TARGET_WILDCARD,
2932                          CAM_LUN_WILDCARD);
2933         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2934         cpi.ccb_h.func_code = XPT_PATH_INQ;
2935         xpt_action((union ccb *)&cpi);
2936         cur_entry->callback(cur_entry->callback_arg,
2937                             AC_PATH_REGISTERED,
2938                             &path, &cpi);
2939         xpt_release_path(&path);
2940
2941         return(1);
2942 }
2943
2944 void
2945 xpt_action(union ccb *start_ccb)
2946 {
2947         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2948
2949         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2950
2951         crit_enter();
2952
2953         switch (start_ccb->ccb_h.func_code) {
2954         case XPT_SCSI_IO:
2955         {
2956 #ifdef CAM_NEW_TRAN_CODE
2957                 struct cam_ed *device;
2958 #endif /* CAM_NEW_TRAN_CODE */
2959 #ifdef CAMDEBUG
2960                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2961                 struct cam_path *path;
2962
2963                 path = start_ccb->ccb_h.path;
2964 #endif
2965
2966                 /*
2967                  * For the sake of compatibility with SCSI-1
2968                  * devices that may not understand the identify
2969                  * message, we include lun information in the
2970                  * second byte of all commands.  SCSI-1 specifies
2971                  * that luns are a 3 bit value and reserves only 3
2972                  * bits for lun information in the CDB.  Later
2973                  * revisions of the SCSI spec allow for more than 8
2974                  * luns, but have deprecated lun information in the
2975                  * CDB.  So, if the lun won't fit, we must omit.
2976                  *
2977                  * Also be aware that during initial probing for devices,
2978                  * the inquiry information is unknown but initialized to 0.
2979                  * This means that this code will be exercised while probing
2980                  * devices with an ANSI revision greater than 2.
2981                  */
2982 #ifdef CAM_NEW_TRAN_CODE
2983                 device = start_ccb->ccb_h.path->device;
2984                 if (device->protocol_version <= SCSI_REV_2
2985 #else /* CAM_NEW_TRAN_CODE */
2986                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2987 #endif /* CAM_NEW_TRAN_CODE */
2988                  && start_ccb->ccb_h.target_lun < 8
2989                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2990
2991                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2992                             start_ccb->ccb_h.target_lun << 5;
2993                 }
2994                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2995                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2996                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2997                                        &path->device->inq_data),
2998                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2999                                           cdb_str, sizeof(cdb_str))));
3000                 /* FALLTHROUGH */
3001         }
3002         case XPT_TARGET_IO:
3003         case XPT_CONT_TARGET_IO:
3004                 start_ccb->csio.sense_resid = 0;
3005                 start_ccb->csio.resid = 0;
3006                 /* FALLTHROUGH */
3007         case XPT_RESET_DEV:
3008         case XPT_ENG_EXEC:
3009         {
3010                 struct cam_path *path;
3011                 struct cam_sim *sim;
3012                 int runq;
3013
3014                 path = start_ccb->ccb_h.path;
3015
3016                 sim = path->bus->sim;
3017                 if (SIM_DEAD(sim)) {
3018                         /* The SIM has gone; just execute the CCB directly. */
3019                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3020                         (*(sim->sim_action))(sim, start_ccb);
3021                         break;
3022                 }
3023
3024                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3025                 if (path->device->qfrozen_cnt == 0)
3026                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
3027                 else
3028                         runq = 0;
3029                 if (runq != 0)
3030                         xpt_run_dev_sendq(path->bus);
3031                 break;
3032         }
3033         case XPT_SET_TRAN_SETTINGS:
3034         {
3035                 xpt_set_transfer_settings(&start_ccb->cts,
3036                                           start_ccb->ccb_h.path->device,
3037                                           /*async_update*/FALSE);
3038                 break;
3039         }
3040         case XPT_CALC_GEOMETRY:
3041         {
3042                 struct cam_sim *sim;
3043
3044                 /* Filter out garbage */
3045                 if (start_ccb->ccg.block_size == 0
3046                  || start_ccb->ccg.volume_size == 0) {
3047                         start_ccb->ccg.cylinders = 0;
3048                         start_ccb->ccg.heads = 0;
3049                         start_ccb->ccg.secs_per_track = 0;
3050                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3051                         break;
3052                 }
3053                 sim = start_ccb->ccb_h.path->bus->sim;
3054                 (*(sim->sim_action))(sim, start_ccb);
3055                 break;
3056         }
3057         case XPT_ABORT:
3058         {
3059                 union ccb* abort_ccb;
3060
3061                 abort_ccb = start_ccb->cab.abort_ccb;
3062                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3063
3064                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3065                                 struct cam_ccbq *ccbq;
3066
3067                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3068                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3069                                 abort_ccb->ccb_h.status =
3070                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3071                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3072                                 xpt_done(abort_ccb);
3073                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3074                                 break;
3075                         }
3076                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3077                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3078                                 /*
3079                                  * We've caught this ccb en route to
3080                                  * the SIM.  Flag it for abort and the
3081                                  * SIM will do so just before starting
3082                                  * real work on the CCB.
3083                                  */
3084                                 abort_ccb->ccb_h.status =
3085                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3086                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3087                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3088                                 break;
3089                         }
3090                 } 
3091                 if (XPT_FC_IS_QUEUED(abort_ccb)
3092                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3093                         /*
3094                          * It's already completed but waiting
3095                          * for our SWI to get to it.
3096                          */
3097                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3098                         break;
3099                 }
3100                 /*
3101                  * If we weren't able to take care of the abort request
3102                  * in the XPT, pass the request down to the SIM for processing.
3103                  */
3104                 /* FALLTHROUGH */
3105         }
3106         case XPT_ACCEPT_TARGET_IO:
3107         case XPT_EN_LUN:
3108         case XPT_IMMED_NOTIFY:
3109         case XPT_NOTIFY_ACK:
3110         case XPT_GET_TRAN_SETTINGS:
3111         case XPT_RESET_BUS:
3112         {
3113                 struct cam_sim *sim;
3114
3115                 sim = start_ccb->ccb_h.path->bus->sim;
3116                 (*(sim->sim_action))(sim, start_ccb);
3117                 break;
3118         }
3119         case XPT_PATH_INQ:
3120         {
3121                 struct cam_sim *sim;
3122
3123                 sim = start_ccb->ccb_h.path->bus->sim;
3124                 (*(sim->sim_action))(sim, start_ccb);
3125                 break;
3126         }
3127         case XPT_PATH_STATS:
3128                 start_ccb->cpis.last_reset =
3129                         start_ccb->ccb_h.path->bus->last_reset;
3130                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3131                 break;
3132         case XPT_GDEV_TYPE:
3133         {
3134                 struct cam_ed *dev;
3135
3136                 dev = start_ccb->ccb_h.path->device;
3137                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3138                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3139                 } else {
3140                         struct ccb_getdev *cgd;
3141                         struct cam_eb *bus;
3142                         struct cam_et *tar;
3143
3144                         cgd = &start_ccb->cgd;
3145                         bus = cgd->ccb_h.path->bus;
3146                         tar = cgd->ccb_h.path->target;
3147                         cgd->inq_data = dev->inq_data;
3148                         cgd->ccb_h.status = CAM_REQ_CMP;
3149                         cgd->serial_num_len = dev->serial_num_len;
3150                         if ((dev->serial_num_len > 0)
3151                          && (dev->serial_num != NULL))
3152                                 bcopy(dev->serial_num, cgd->serial_num,
3153                                       dev->serial_num_len);
3154                 }
3155                 break; 
3156         }
3157         case XPT_GDEV_STATS:
3158         {
3159                 struct cam_ed *dev;
3160
3161                 dev = start_ccb->ccb_h.path->device;
3162                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3163                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3164                 } else {
3165                         struct ccb_getdevstats *cgds;
3166                         struct cam_eb *bus;
3167                         struct cam_et *tar;
3168
3169                         cgds = &start_ccb->cgds;
3170                         bus = cgds->ccb_h.path->bus;
3171                         tar = cgds->ccb_h.path->target;
3172                         cgds->dev_openings = dev->ccbq.dev_openings;
3173                         cgds->dev_active = dev->ccbq.dev_active;
3174                         cgds->devq_openings = dev->ccbq.devq_openings;
3175                         cgds->devq_queued = dev->ccbq.queue.entries;
3176                         cgds->held = dev->ccbq.held;
3177                         cgds->last_reset = tar->last_reset;
3178                         cgds->maxtags = dev->quirk->maxtags;
3179                         cgds->mintags = dev->quirk->mintags;
3180                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3181                                 cgds->last_reset = bus->last_reset;
3182                         cgds->ccb_h.status = CAM_REQ_CMP;
3183                 }
3184                 break;
3185         }
3186         case XPT_GDEVLIST:
3187         {
3188                 struct cam_periph       *nperiph;
3189                 struct periph_list      *periph_head;
3190                 struct ccb_getdevlist   *cgdl;
3191                 u_int                   i;
3192                 struct cam_ed           *device;
3193                 int                     found;
3194
3195
3196                 found = 0;
3197
3198                 /*
3199                  * Don't want anyone mucking with our data.
3200                  */
3201                 device = start_ccb->ccb_h.path->device;
3202                 periph_head = &device->periphs;
3203                 cgdl = &start_ccb->cgdl;
3204
3205                 /*
3206                  * Check and see if the list has changed since the user
3207                  * last requested a list member.  If so, tell them that the
3208                  * list has changed, and therefore they need to start over 
3209                  * from the beginning.
3210                  */
3211                 if ((cgdl->index != 0) && 
3212                     (cgdl->generation != device->generation)) {
3213                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3214                         break;
3215                 }
3216
3217                 /*
3218                  * Traverse the list of peripherals and attempt to find 
3219                  * the requested peripheral.
3220                  */
3221                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3222                      (nperiph != NULL) && (i <= cgdl->index);
3223                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3224                         if (i == cgdl->index) {
3225                                 strncpy(cgdl->periph_name,
3226                                         nperiph->periph_name,
3227                                         DEV_IDLEN);
3228                                 cgdl->unit_number = nperiph->unit_number;
3229                                 found = 1;
3230                         }
3231                 }
3232                 if (found == 0) {
3233                         cgdl->status = CAM_GDEVLIST_ERROR;
3234                         break;
3235                 }
3236
3237                 if (nperiph == NULL)
3238                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3239                 else
3240                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3241
3242                 cgdl->index++;
3243                 cgdl->generation = device->generation;
3244
3245                 cgdl->ccb_h.status = CAM_REQ_CMP;
3246                 break;
3247         }
3248         case XPT_DEV_MATCH:
3249         {
3250                 dev_pos_type position_type;
3251                 struct ccb_dev_match *cdm;
3252                 int ret;
3253
3254                 cdm = &start_ccb->cdm;
3255
3256                 /*
3257                  * Prevent EDT changes while we traverse it.
3258                  */
3259                 /*
3260                  * There are two ways of getting at information in the EDT.
3261                  * The first way is via the primary EDT tree.  It starts
3262                  * with a list of busses, then a list of targets on a bus,
3263                  * then devices/luns on a target, and then peripherals on a
3264                  * device/lun.  The "other" way is by the peripheral driver
3265                  * lists.  The peripheral driver lists are organized by
3266                  * peripheral driver.  (obviously)  So it makes sense to
3267                  * use the peripheral driver list if the user is looking
3268                  * for something like "da1", or all "da" devices.  If the
3269                  * user is looking for something on a particular bus/target
3270                  * or lun, it's generally better to go through the EDT tree.
3271                  */
3272
3273                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3274                         position_type = cdm->pos.position_type;
3275                 else {
3276                         u_int i;
3277
3278                         position_type = CAM_DEV_POS_NONE;
3279
3280                         for (i = 0; i < cdm->num_patterns; i++) {
3281                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3282                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3283                                         position_type = CAM_DEV_POS_EDT;
3284                                         break;
3285                                 }
3286                         }
3287
3288                         if (cdm->num_patterns == 0)
3289                                 position_type = CAM_DEV_POS_EDT;
3290                         else if (position_type == CAM_DEV_POS_NONE)
3291                                 position_type = CAM_DEV_POS_PDRV;
3292                 }
3293
3294                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3295                 case CAM_DEV_POS_EDT:
3296                         ret = xptedtmatch(cdm);
3297                         break;
3298                 case CAM_DEV_POS_PDRV:
3299                         ret = xptperiphlistmatch(cdm);
3300                         break;
3301                 default:
3302                         cdm->status = CAM_DEV_MATCH_ERROR;
3303                         break;
3304                 }
3305
3306                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3307                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3308                 else
3309                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3310
3311                 break;
3312         }
3313         case XPT_SASYNC_CB:
3314         {
3315                 struct ccb_setasync *csa;
3316                 struct async_node *cur_entry;
3317                 struct async_list *async_head;
3318                 u_int32_t added;
3319
3320                 csa = &start_ccb->csa;
3321                 added = csa->event_enable;
3322                 async_head = &csa->ccb_h.path->device->asyncs;
3323
3324                 /*
3325                  * If there is already an entry for us, simply
3326                  * update it.
3327                  */
3328                 cur_entry = SLIST_FIRST(async_head);
3329                 while (cur_entry != NULL) {
3330                         if ((cur_entry->callback_arg == csa->callback_arg)
3331                          && (cur_entry->callback == csa->callback))
3332                                 break;
3333                         cur_entry = SLIST_NEXT(cur_entry, links);
3334                 }
3335
3336                 if (cur_entry != NULL) {
3337                         /*
3338                          * If the request has no flags set,
3339                          * remove the entry.
3340                          */
3341                         added &= ~cur_entry->event_enable;
3342                         if (csa->event_enable == 0) {
3343                                 SLIST_REMOVE(async_head, cur_entry,
3344                                              async_node, links);
3345                                 csa->ccb_h.path->device->refcount--;
3346                                 kfree(cur_entry, M_CAMXPT);
3347                         } else {
3348                                 cur_entry->event_enable = csa->event_enable;
3349                         }
3350                 } else {
3351                         cur_entry = kmalloc(sizeof(*cur_entry), 
3352                                             M_CAMXPT, M_INTWAIT);
3353                         cur_entry->event_enable = csa->event_enable;
3354                         cur_entry->callback_arg = csa->callback_arg;
3355                         cur_entry->callback = csa->callback;
3356                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3357                         csa->ccb_h.path->device->refcount++;
3358                 }
3359
3360                 if ((added & AC_FOUND_DEVICE) != 0) {
3361                         /*
3362                          * Get this peripheral up to date with all
3363                          * the currently existing devices.
3364                          */
3365                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3366                 }
3367                 if ((added & AC_PATH_REGISTERED) != 0) {
3368                         /*
3369                          * Get this peripheral up to date with all
3370                          * the currently existing busses.
3371                          */
3372                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3373                 }
3374                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3375                 break;
3376         }
3377         case XPT_REL_SIMQ:
3378         {
3379                 struct ccb_relsim *crs;
3380                 struct cam_ed *dev;
3381
3382                 crs = &start_ccb->crs;
3383                 dev = crs->ccb_h.path->device;
3384                 if (dev == NULL) {
3385
3386                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3387                         break;
3388                 }
3389
3390                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3391
3392                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3393
3394                                 /* Don't ever go below one opening */
3395                                 if (crs->openings > 0) {
3396                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3397                                                             crs->openings);
3398
3399                                         if (bootverbose) {
3400                                                 xpt_print_path(crs->ccb_h.path);
3401                                                 kprintf("tagged openings "
3402                                                        "now %d\n",
3403                                                        crs->openings);
3404                                         }
3405                                 }
3406                         }
3407                 }
3408
3409                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3410
3411                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3412
3413                                 /*
3414                                  * Just extend the old timeout and decrement
3415                                  * the freeze count so that a single timeout
3416                                  * is sufficient for releasing the queue.
3417                                  */
3418                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3419                                 callout_stop(&dev->c_handle);
3420                         } else {
3421
3422                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3423                         }
3424
3425                         callout_reset(&dev->c_handle,
3426                                       (crs->release_timeout * hz) / 1000, 
3427                                       xpt_release_devq_timeout, dev);
3428
3429                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3430
3431                 }
3432
3433                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3434
3435                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3436                                 /*
3437                                  * Decrement the freeze count so that a single
3438                                  * completion is still sufficient to unfreeze
3439                                  * the queue.
3440                                  */
3441                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3442                         } else {
3443                                 
3444                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3445                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3446                         }
3447                 }
3448
3449                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3450
3451                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3452                          || (dev->ccbq.dev_active == 0)) {
3453
3454                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3455                         } else {
3456                                 
3457                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3458                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3459                         }
3460                 }
3461                 
3462                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3463
3464                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3465                                          /*run_queue*/TRUE);
3466                 }
3467                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3468                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3469                 break;
3470         }
3471         case XPT_SCAN_BUS:
3472                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3473                 break;
3474         case XPT_SCAN_LUN:
3475                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3476                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3477                              start_ccb);
3478                 break;
3479         case XPT_DEBUG: {
3480 #ifdef CAMDEBUG
3481 #ifdef CAM_DEBUG_DELAY
3482                 cam_debug_delay = CAM_DEBUG_DELAY;
3483 #endif
3484                 cam_dflags = start_ccb->cdbg.flags;
3485                 if (cam_dpath != NULL) {
3486                         xpt_free_path(cam_dpath);
3487                         cam_dpath = NULL;
3488                 }
3489
3490                 if (cam_dflags != CAM_DEBUG_NONE) {
3491                         if (xpt_create_path(&cam_dpath, xpt_periph,
3492                                             start_ccb->ccb_h.path_id,
3493                                             start_ccb->ccb_h.target_id,
3494                                             start_ccb->ccb_h.target_lun) !=
3495                                             CAM_REQ_CMP) {
3496                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3497                                 cam_dflags = CAM_DEBUG_NONE;
3498                         } else {
3499                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3500                                 xpt_print_path(cam_dpath);
3501                                 kprintf("debugging flags now %x\n", cam_dflags);
3502                         }
3503                 } else {
3504                         cam_dpath = NULL;
3505                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3506                 }
3507 #else /* !CAMDEBUG */
3508                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3509 #endif /* CAMDEBUG */
3510                 break;
3511         }
3512         case XPT_NOOP:
3513                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3514                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3515                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3516                 break;
3517         default:
3518         case XPT_SDEV_TYPE:
3519         case XPT_TERM_IO:
3520         case XPT_ENG_INQ:
3521                 /* XXX Implement */
3522                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3523                 break;
3524         }
3525         crit_exit();
3526 }
3527
3528 void
3529 xpt_polled_action(union ccb *start_ccb)
3530 {
3531         u_int32_t timeout;
3532         struct    cam_sim *sim; 
3533         struct    cam_devq *devq;
3534         struct    cam_ed *dev;
3535
3536         timeout = start_ccb->ccb_h.timeout;
3537         sim = start_ccb->ccb_h.path->bus->sim;
3538         devq = sim->devq;
3539         dev = start_ccb->ccb_h.path->device;
3540
3541         crit_enter();
3542
3543         /*
3544          * Steal an opening so that no other queued requests
3545          * can get it before us while we simulate interrupts.
3546          */
3547         dev->ccbq.devq_openings--;
3548         dev->ccbq.dev_openings--;       
3549         
3550         while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3551            && (--timeout > 0)) {
3552                 DELAY(1000);
3553                 (*(sim->sim_poll))(sim);
3554                 swi_cambio(NULL, NULL);         
3555         }
3556         
3557         dev->ccbq.devq_openings++;
3558         dev->ccbq.dev_openings++;
3559         
3560         if (timeout != 0) {
3561                 xpt_action(start_ccb);
3562                 while(--timeout > 0) {
3563                         (*(sim->sim_poll))(sim);
3564                         swi_cambio(NULL, NULL);
3565                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3566                             != CAM_REQ_INPROG)
3567                                 break;
3568                         DELAY(1000);
3569                 }
3570                 if (timeout == 0) {
3571                         /*
3572                          * XXX Is it worth adding a sim_timeout entry
3573                          * point so we can attempt recovery?  If
3574                          * this is only used for dumps, I don't think
3575                          * it is.
3576                          */
3577                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3578                 }
3579         } else {
3580                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3581         }
3582         crit_exit();
3583 }
3584         
3585 /*
3586  * Schedule a peripheral driver to receive a ccb when it's
3587  * target device has space for more transactions.
3588  */
3589 void
3590 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3591 {
3592         struct cam_ed *device;
3593         union ccb *work_ccb;
3594         int runq;
3595
3596         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3597         device = perph->path->device;
3598         crit_enter();
3599         if (periph_is_queued(perph)) {
3600                 /* Simply reorder based on new priority */
3601                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3602                           ("   change priority to %d\n", new_priority));
3603                 if (new_priority < perph->pinfo.priority) {
3604                         camq_change_priority(&device->drvq,
3605                                              perph->pinfo.index,
3606                                              new_priority);
3607                 }
3608                 runq = 0;
3609         } else if (SIM_DEAD(perph->path->bus->sim)) {
3610                 /* The SIM is gone so just call periph_start directly. */
3611                 work_ccb = xpt_get_ccb(perph->path->device);
3612                 crit_exit();
3613                 if (work_ccb == NULL)
3614                         return; /* XXX */
3615                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3616                 perph->pinfo.priority = new_priority;
3617                 perph->periph_start(perph, work_ccb);
3618                 return;
3619         } else {
3620                 /* New entry on the queue */
3621                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3622                           ("   added periph to queue\n"));
3623                 perph->pinfo.priority = new_priority;
3624                 perph->pinfo.generation = ++device->drvq.generation;
3625                 camq_insert(&device->drvq, &perph->pinfo);
3626                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3627         }
3628         crit_exit();
3629         if (runq != 0) {
3630                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3631                           ("   calling xpt_run_devq\n"));
3632                 xpt_run_dev_allocq(perph->path->bus);
3633         }
3634 }
3635
3636
3637 /*
3638  * Schedule a device to run on a given queue.
3639  * If the device was inserted as a new entry on the queue,
3640  * return 1 meaning the device queue should be run. If we
3641  * were already queued, implying someone else has already
3642  * started the queue, return 0 so the caller doesn't attempt
3643  * to run the queue.  Must be run in a critical section.
3644  */
3645 static int
3646 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3647                  u_int32_t new_priority)
3648 {
3649         int retval;
3650         u_int32_t old_priority;
3651
3652         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3653
3654         old_priority = pinfo->priority;
3655
3656         /*
3657          * Are we already queued?
3658          */
3659         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3660                 /* Simply reorder based on new priority */
3661                 if (new_priority < old_priority) {
3662                         camq_change_priority(queue, pinfo->index,
3663                                              new_priority);
3664                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3665                                         ("changed priority to %d\n",
3666                                          new_priority));
3667                 }
3668                 retval = 0;
3669         } else {
3670                 /* New entry on the queue */
3671                 if (new_priority < old_priority)
3672                         pinfo->priority = new_priority;
3673
3674                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3675                                 ("Inserting onto queue\n"));
3676                 pinfo->generation = ++queue->generation;
3677                 camq_insert(queue, pinfo);
3678                 retval = 1;
3679         }
3680         return (retval);
3681 }
3682
3683 static void
3684 xpt_run_dev_allocq(struct cam_eb *bus)
3685 {
3686         struct  cam_devq *devq;
3687
3688         if ((devq = bus->sim->devq) == NULL) {
3689                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3690                 return;
3691         }
3692         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3693
3694         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3695                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3696                          "openings == %d, active == %d\n",
3697                          devq->alloc_queue.qfrozen_cnt,
3698                          devq->alloc_queue.entries,
3699                          devq->alloc_openings,
3700                          devq->alloc_active));
3701
3702         crit_enter();
3703         devq->alloc_queue.qfrozen_cnt++;
3704         while ((devq->alloc_queue.entries > 0)
3705             && (devq->alloc_openings > 0)
3706             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3707                 struct  cam_ed_qinfo *qinfo;
3708                 struct  cam_ed *device;
3709                 union   ccb *work_ccb;
3710                 struct  cam_periph *drv;
3711                 struct  camq *drvq;
3712                 
3713                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3714                                                            CAMQ_HEAD);
3715                 device = qinfo->device;
3716
3717                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3718                                 ("running device %p\n", device));
3719
3720                 drvq = &device->drvq;
3721
3722 #ifdef CAMDEBUG
3723                 if (drvq->entries <= 0) {
3724                         panic("xpt_run_dev_allocq: "
3725                               "Device on queue without any work to do");
3726                 }
3727 #endif
3728                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3729                         devq->alloc_openings--;
3730                         devq->alloc_active++;
3731                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3732                         crit_exit();
3733                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3734                                       drv->pinfo.priority);
3735                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3736                                         ("calling periph start\n"));
3737                         drv->periph_start(drv, work_ccb);
3738                 } else {
3739                         /*
3740                          * Malloc failure in alloc_ccb
3741                          */
3742                         /*
3743                          * XXX add us to a list to be run from free_ccb
3744                          * if we don't have any ccbs active on this
3745                          * device queue otherwise we may never get run
3746                          * again.
3747                          */
3748                         break;
3749                 }
3750         
3751                 /* Raise IPL for possible insertion and test at top of loop */
3752                 crit_enter();
3753
3754                 if (drvq->entries > 0) {
3755                         /* We have more work.  Attempt to reschedule */
3756                         xpt_schedule_dev_allocq(bus, device);
3757                 }
3758         }
3759         devq->alloc_queue.qfrozen_cnt--;
3760         crit_exit();
3761 }
3762
3763 static void
3764 xpt_run_dev_sendq(struct cam_eb *bus)
3765 {
3766         struct  cam_devq *devq;
3767
3768         if ((devq = bus->sim->devq) == NULL) {
3769                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3770                 return;
3771         }
3772         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3773
3774         crit_enter();
3775         devq->send_queue.qfrozen_cnt++;
3776         while ((devq->send_queue.entries > 0)
3777             && (devq->send_openings > 0)) {
3778                 struct  cam_ed_qinfo *qinfo;
3779                 struct  cam_ed *device;
3780                 union ccb *work_ccb;
3781                 struct  cam_sim *sim;
3782
3783                 if (devq->send_queue.qfrozen_cnt > 1) {
3784                         break;
3785                 }
3786
3787                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3788                                                            CAMQ_HEAD);
3789                 device = qinfo->device;
3790
3791                 /*
3792                  * If the device has been "frozen", don't attempt
3793                  * to run it.
3794                  */
3795                 if (device->qfrozen_cnt > 0) {
3796                         continue;
3797                 }
3798
3799                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3800                                 ("running device %p\n", device));
3801
3802                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3803                 if (work_ccb == NULL) {
3804                         kprintf("device on run queue with no ccbs???\n");
3805                         continue;
3806                 }
3807
3808                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3809
3810                         if (num_highpower <= 0) {
3811                                 /*
3812                                  * We got a high power command, but we
3813                                  * don't have any available slots.  Freeze
3814                                  * the device queue until we have a slot
3815                                  * available.
3816                                  */
3817                                 device->qfrozen_cnt++;
3818                                 STAILQ_INSERT_TAIL(&highpowerq, 
3819                                                    &work_ccb->ccb_h, 
3820                                                    xpt_links.stqe);
3821
3822                                 continue;
3823                         } else {
3824                                 /*
3825                                  * Consume a high power slot while
3826                                  * this ccb runs.
3827                                  */
3828                                 num_highpower--;
3829                         }
3830                 }
3831                 devq->active_dev = device;
3832                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3833
3834                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3835
3836                 devq->send_openings--;
3837                 devq->send_active++;            
3838                 
3839                 if (device->ccbq.queue.entries > 0)
3840                         xpt_schedule_dev_sendq(bus, device);
3841
3842                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3843                         /*
3844                          * The client wants to freeze the queue
3845                          * after this CCB is sent.
3846                          */
3847                         device->qfrozen_cnt++;
3848                 }
3849
3850                 /* In Target mode, the peripheral driver knows best... */
3851                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3852                         if ((device->inq_flags & SID_CmdQue) != 0
3853                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3854                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3855                         else
3856                                 /*
3857                                  * Clear this in case of a retried CCB that
3858                                  * failed due to a rejected tag.
3859                                  */
3860                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3861                 }
3862
3863                 /*
3864                  * Device queues can be shared among multiple sim instances
3865                  * that reside on different busses.  Use the SIM in the queue
3866                  * CCB's path, rather than the one in the bus that was passed
3867                  * into this function.
3868                  */
3869                 sim = work_ccb->ccb_h.path->bus->sim;
3870                 (*(sim->sim_action))(sim, work_ccb);
3871
3872                 devq->active_dev = NULL;
3873                 /* Raise IPL for possible insertion and test at top of loop */
3874         }
3875         devq->send_queue.qfrozen_cnt--;
3876         crit_exit();
3877 }
3878
3879 /*
3880  * This function merges stuff from the slave ccb into the master ccb, while
3881  * keeping important fields in the master ccb constant.
3882  */
3883 void
3884 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3885 {
3886         /*
3887          * Pull fields that are valid for peripheral drivers to set
3888          * into the master CCB along with the CCB "payload".
3889          */
3890         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3891         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3892         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3893         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3894         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3895               sizeof(union ccb) - sizeof(struct ccb_hdr));
3896 }
3897
3898 void
3899 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3900 {
3901         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3902         callout_init(&ccb_h->timeout_ch);
3903         ccb_h->pinfo.priority = priority;
3904         ccb_h->path = path;
3905         ccb_h->path_id = path->bus->path_id;
3906         if (path->target)
3907                 ccb_h->target_id = path->target->target_id;
3908         else
3909                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3910         if (path->device) {
3911                 ccb_h->target_lun = path->device->lun_id;
3912                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3913         } else {
3914                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3915         }
3916         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3917         ccb_h->flags = 0;
3918 }
3919
3920 /* Path manipulation functions */
3921 cam_status
3922 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3923                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3924 {
3925         struct     cam_path *path;
3926         cam_status status;
3927
3928         path = kmalloc(sizeof(*path), M_CAMXPT, M_INTWAIT);
3929         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3930         if (status != CAM_REQ_CMP) {
3931                 kfree(path, M_CAMXPT);
3932                 path = NULL;
3933         }
3934         *new_path_ptr = path;
3935         return (status);
3936 }
3937
3938 static cam_status
3939 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3940                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3941 {
3942         struct       cam_eb *bus;
3943         struct       cam_et *target;
3944         struct       cam_ed *device;
3945         cam_status   status;
3946
3947         status = CAM_REQ_CMP;   /* Completed without error */
3948         target = NULL;          /* Wildcarded */
3949         device = NULL;          /* Wildcarded */
3950
3951         /*
3952          * We will potentially modify the EDT, so block interrupts
3953          * that may attempt to create cam paths.
3954          */
3955         crit_enter();
3956         bus = xpt_find_bus(path_id);
3957         if (bus == NULL) {
3958                 status = CAM_PATH_INVALID;
3959         } else {
3960                 target = xpt_find_target(bus, target_id);
3961                 if (target == NULL) {
3962                         /* Create one */
3963                         struct cam_et *new_target;
3964
3965                         new_target = xpt_alloc_target(bus, target_id);
3966                         if (new_target == NULL) {
3967                                 status = CAM_RESRC_UNAVAIL;
3968                         } else {
3969                                 target = new_target;
3970                         }
3971                 }
3972                 if (target != NULL) {
3973                         device = xpt_find_device(target, lun_id);
3974                         if (device == NULL) {
3975                                 /* Create one */
3976                                 struct cam_ed *new_device;
3977
3978                                 new_device = xpt_alloc_device(bus,
3979                                                               target,
3980                                                               lun_id);
3981                                 if (new_device == NULL) {
3982                                         status = CAM_RESRC_UNAVAIL;
3983                                 } else {
3984                                         device = new_device;
3985                                 }
3986                         }
3987                 }
3988         }
3989         crit_exit();
3990
3991         /*
3992          * Only touch the user's data if we are successful.
3993          */
3994         if (status == CAM_REQ_CMP) {
3995                 new_path->periph = perph;
3996                 new_path->bus = bus;
3997                 new_path->target = target;
3998                 new_path->device = device;
3999                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4000         } else {
4001                 if (device != NULL)
4002                         xpt_release_device(bus, target, device);
4003                 if (target != NULL)
4004                         xpt_release_target(bus, target);
4005                 if (bus != NULL)
4006                         xpt_release_bus(bus);
4007         }
4008         return (status);
4009 }
4010
4011 static void
4012 xpt_release_path(struct cam_path *path)
4013 {
4014         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4015         if (path->device != NULL) {
4016                 xpt_release_device(path->bus, path->target, path->device);
4017                 path->device = NULL;
4018         }
4019         if (path->target != NULL) {
4020                 xpt_release_target(path->bus, path->target);
4021                 path->target = NULL;
4022         }
4023         if (path->bus != NULL) {
4024                 xpt_release_bus(path->bus);
4025                 path->bus = NULL;
4026         }
4027 }
4028
4029 void
4030 xpt_free_path(struct cam_path *path)
4031 {
4032         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4033         xpt_release_path(path);
4034         kfree(path, M_CAMXPT);
4035 }
4036
4037
4038 /*
4039  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4040  * in path1, 2 for match with wildcards in path2.
4041  */
4042 int
4043 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4044 {
4045         int retval = 0;
4046
4047         if (path1->bus != path2->bus) {
4048                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4049                         retval = 1;
4050                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4051                         retval = 2;
4052                 else
4053                         return (-1);
4054         }
4055         if (path1->target != path2->target) {
4056                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4057                         if (retval == 0)
4058                                 retval = 1;
4059                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4060                         retval = 2;
4061                 else
4062                         return (-1);
4063         }
4064         if (path1->device != path2->device) {
4065                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4066                         if (retval == 0)
4067                                 retval = 1;
4068                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4069                         retval = 2;
4070                 else
4071                         return (-1);
4072         }
4073         return (retval);
4074 }
4075
4076 void
4077 xpt_print_path(struct cam_path *path)
4078 {
4079         if (path == NULL)
4080                 kprintf("(nopath): ");
4081         else {
4082                 if (path->periph != NULL)
4083                         kprintf("(%s%d:", path->periph->periph_name,
4084                                path->periph->unit_number);
4085                 else
4086                         kprintf("(noperiph:");
4087
4088                 if (path->bus != NULL)
4089                         kprintf("%s%d:%d:", path->bus->sim->sim_name,
4090                                path->bus->sim->unit_number,
4091                                path->bus->sim->bus_id);
4092                 else
4093                         kprintf("nobus:");
4094
4095                 if (path->target != NULL)
4096                         kprintf("%d:", path->target->target_id);
4097                 else
4098                         kprintf("X:");
4099
4100                 if (path->device != NULL)
4101                         kprintf("%d): ", path->device->lun_id);
4102                 else
4103                         kprintf("X): ");
4104         }
4105 }
4106
4107 int
4108 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4109 {
4110         struct sbuf sb;
4111
4112         sbuf_new(&sb, str, str_len, 0);
4113
4114         if (path == NULL)
4115                 sbuf_printf(&sb, "(nopath): ");
4116         else {
4117                 if (path->periph != NULL)
4118                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4119                                     path->periph->unit_number);
4120                 else
4121                         sbuf_printf(&sb, "(noperiph:");
4122
4123                 if (path->bus != NULL)
4124                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4125                                     path->bus->sim->unit_number,
4126                                     path->bus->sim->bus_id);
4127                 else
4128                         sbuf_printf(&sb, "nobus:");
4129
4130                 if (path->target != NULL)
4131                         sbuf_printf(&sb, "%d:", path->target->target_id);
4132                 else
4133                         sbuf_printf(&sb, "X:");
4134
4135                 if (path->device != NULL)
4136                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
4137                 else
4138                         sbuf_printf(&sb, "X): ");
4139         }
4140         sbuf_finish(&sb);
4141
4142         return(sbuf_len(&sb));
4143 }
4144
4145 path_id_t
4146 xpt_path_path_id(struct cam_path *path)
4147 {
4148         return(path->bus->path_id);
4149 }
4150
4151 target_id_t
4152 xpt_path_target_id(struct cam_path *path)
4153 {
4154         if (path->target != NULL)
4155                 return (path->target->target_id);
4156         else
4157                 return (CAM_TARGET_WILDCARD);
4158 }
4159
4160 lun_id_t
4161 xpt_path_lun_id(struct cam_path *path)
4162 {
4163         if (path->device != NULL)
4164                 return (path->device->lun_id);
4165         else
4166                 return (CAM_LUN_WILDCARD);
4167 }
4168
4169 struct cam_sim *
4170 xpt_path_sim(struct cam_path *path)
4171 {
4172         return (path->bus->sim);
4173 }
4174
4175 struct cam_periph*
4176 xpt_path_periph(struct cam_path *path)
4177 {
4178         return (path->periph);
4179 }
4180
4181 /*
4182  * Release a CAM control block for the caller.  Remit the cost of the structure
4183  * to the device referenced by the path.  If the this device had no 'credits'
4184  * and peripheral drivers have registered async callbacks for this notification
4185  * call them now.
4186  */
4187 void
4188 xpt_release_ccb(union ccb *free_ccb)
4189 {
4190         struct   cam_path *path;
4191         struct   cam_ed *device;
4192         struct   cam_eb *bus;
4193
4194         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4195         path = free_ccb->ccb_h.path;
4196         device = path->device;
4197         bus = path->bus;
4198         crit_enter();
4199         cam_ccbq_release_opening(&device->ccbq);
4200         if (xpt_ccb_count > xpt_max_ccbs) {
4201                 xpt_free_ccb(free_ccb);
4202                 xpt_ccb_count--;
4203         } else {
4204                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4205         }
4206         if (bus->sim->devq == NULL) {
4207                 crit_exit();
4208                 return;
4209         }
4210         bus->sim->devq->alloc_openings++;
4211         bus->sim->devq->alloc_active--;
4212         /* XXX Turn this into an inline function - xpt_run_device?? */
4213         if ((device_is_alloc_queued(device) == 0)
4214          && (device->drvq.entries > 0)) {
4215                 xpt_schedule_dev_allocq(bus, device);
4216         }
4217         crit_exit();
4218         if (bus->sim->devq && dev_allocq_is_runnable(bus->sim->devq))
4219                 xpt_run_dev_allocq(bus);
4220 }
4221
4222 /* Functions accessed by SIM drivers */
4223
4224 /*
4225  * A sim structure, listing the SIM entry points and instance
4226  * identification info is passed to xpt_bus_register to hook the SIM
4227  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4228  * for this new bus and places it in the array of busses and assigns
4229  * it a path_id.  The path_id may be influenced by "hard wiring"
4230  * information specified by the user.  Once interrupt services are
4231  * availible, the bus will be probed.
4232  */
4233 int32_t
4234 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4235 {
4236         struct cam_eb *new_bus;
4237         struct cam_eb *old_bus;
4238         struct ccb_pathinq cpi;
4239
4240         sim->bus_id = bus;
4241         new_bus = kmalloc(sizeof(*new_bus), M_CAMXPT, M_INTWAIT);
4242
4243         if (strcmp(sim->sim_name, "xpt") != 0) {
4244                 sim->path_id =
4245                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4246         }
4247
4248         TAILQ_INIT(&new_bus->et_entries);
4249         new_bus->path_id = sim->path_id;
4250         new_bus->sim = sim;
4251         ++sim->refcount;
4252         timevalclear(&new_bus->last_reset);
4253         new_bus->flags = 0;
4254         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4255         new_bus->generation = 0;
4256         crit_enter();
4257         old_bus = TAILQ_FIRST(&xpt_busses);
4258         while (old_bus != NULL
4259             && old_bus->path_id < new_bus->path_id)
4260                 old_bus = TAILQ_NEXT(old_bus, links);
4261         if (old_bus != NULL)
4262                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4263         else
4264                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4265         bus_generation++;
4266         crit_exit();
4267
4268         /* Notify interested parties */
4269         if (sim->path_id != CAM_XPT_PATH_ID) {
4270                 struct cam_path path;
4271
4272                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4273                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4274                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4275                 cpi.ccb_h.func_code = XPT_PATH_INQ;
4276                 xpt_action((union ccb *)&cpi);
4277                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4278                 xpt_release_path(&path);
4279         }
4280         return (CAM_SUCCESS);
4281 }
4282
4283 /*
4284  * Deregister a bus.  We must clean out all transactions pending on the bus.
4285  * This routine is typically called prior to cam_sim_free() (e.g. see
4286  * dev/usbmisc/umass/umass.c)
4287  */
4288 int32_t
4289 xpt_bus_deregister(path_id_t pathid)
4290 {
4291         struct cam_path bus_path;
4292         struct cam_ed *device;
4293         struct cam_ed_qinfo *qinfo;
4294         struct cam_devq *devq;
4295         struct cam_periph *periph;
4296         struct cam_sim *ccbsim;
4297         union ccb *work_ccb;
4298         cam_status status;
4299
4300         status = xpt_compile_path(&bus_path, NULL, pathid,
4301                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4302         if (status != CAM_REQ_CMP)
4303                 return (status);
4304
4305         /*
4306          * This should clear out all pending requests and timeouts, but
4307          * the ccb's may be queued to a software interrupt.
4308          *
4309          * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4310          * and it really ought to.
4311          */
4312         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4313         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4314
4315         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4316         devq = bus_path.bus->sim->devq;
4317         bus_path.bus->sim = &cam_dead_sim;
4318
4319         /* Execute any pending operations now. */
4320         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4321             CAMQ_HEAD)) != NULL ||
4322             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4323             CAMQ_HEAD)) != NULL) {
4324                 do {
4325                         device = qinfo->device;
4326                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4327                         if (work_ccb != NULL) {
4328                                 devq->active_dev = device;
4329                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4330                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4331                                 ccbsim = work_ccb->ccb_h.path->bus->sim;
4332                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4333                         }
4334
4335                         periph = (struct cam_periph *)camq_remove(&device->drvq,
4336                             CAMQ_HEAD);
4337                         if (periph != NULL)
4338                                 xpt_schedule(periph, periph->pinfo.priority);
4339                 } while (work_ccb != NULL || periph != NULL);
4340         }
4341
4342         /* Make sure all completed CCBs are processed. */
4343         while (!TAILQ_EMPTY(&cam_bioq)) {
4344                 camisr(&cam_bioq);
4345
4346                 /* Repeat the async's for the benefit of any new devices. */
4347                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4348                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4349         }
4350
4351         /* Release the reference count held while registered. */
4352         xpt_release_bus(bus_path.bus);
4353         xpt_release_path(&bus_path);
4354
4355         /* Recheck for more completed CCBs. */
4356         while (!TAILQ_EMPTY(&cam_bioq))
4357                 camisr(&cam_bioq);
4358
4359         return (CAM_REQ_CMP);
4360 }
4361
4362 static path_id_t
4363 xptnextfreepathid(void)
4364 {
4365         struct cam_eb *bus;
4366         path_id_t pathid;
4367         char *strval;
4368
4369         pathid = 0;
4370         bus = TAILQ_FIRST(&xpt_busses);
4371 retry:
4372         /* Find an unoccupied pathid */
4373         while (bus != NULL
4374             && bus->path_id <= pathid) {
4375                 if (bus->path_id == pathid)
4376                         pathid++;
4377                 bus = TAILQ_NEXT(bus, links);
4378         }
4379
4380         /*
4381          * Ensure that this pathid is not reserved for
4382          * a bus that may be registered in the future.
4383          */
4384         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4385                 ++pathid;
4386                 /* Start the search over */
4387                 goto retry;
4388         }
4389         return (pathid);
4390 }
4391
4392 static path_id_t
4393 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4394 {
4395         path_id_t pathid;
4396         int i, dunit, val;
4397         char buf[32];
4398
4399         pathid = CAM_XPT_PATH_ID;
4400         ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4401         i = -1;
4402         while ((i = resource_query_string(i, "at", buf)) != -1) {
4403                 if (strcmp(resource_query_name(i), "scbus")) {
4404                         /* Avoid a bit of foot shooting. */
4405                         continue;
4406                 }
4407                 dunit = resource_query_unit(i);
4408                 if (dunit < 0)          /* unwired?! */
4409                         continue;
4410                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4411                         if (sim_bus == val) {
4412                                 pathid = dunit;
4413                                 break;
4414                         }
4415                 } else if (sim_bus == 0) {
4416                         /* Unspecified matches bus 0 */
4417                         pathid = dunit;
4418                         break;
4419                 } else {
4420                         kprintf("Ambiguous scbus configuration for %s%d "
4421                                "bus %d, cannot wire down.  The kernel "
4422                                "config entry for scbus%d should "
4423                                "specify a controller bus.\n"
4424                                "Scbus will be assigned dynamically.\n",
4425                                sim_name, sim_unit, sim_bus, dunit);
4426                         break;
4427                 }
4428         }
4429
4430         if (pathid == CAM_XPT_PATH_ID)
4431                 pathid = xptnextfreepathid();
4432         return (pathid);
4433 }
4434
4435 void
4436 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4437 {
4438         struct cam_eb *bus;
4439         struct cam_et *target, *next_target;
4440         struct cam_ed *device, *next_device;
4441
4442         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4443
4444         /*
4445          * Most async events come from a CAM interrupt context.  In
4446          * a few cases, the error recovery code at the peripheral layer,
4447          * which may run from our SWI or a process context, may signal
4448          * deferred events with a call to xpt_async. Ensure async
4449          * notifications are serialized by blocking cam interrupts.
4450          */
4451         crit_enter();
4452
4453         bus = path->bus;
4454
4455         if (async_code == AC_BUS_RESET) { 
4456                 /* Update our notion of when the last reset occurred */
4457                 microuptime(&bus->last_reset);
4458         }
4459
4460         for (target = TAILQ_FIRST(&bus->et_entries);
4461              target != NULL;
4462              target = next_target) {
4463
4464                 next_target = TAILQ_NEXT(target, links);
4465
4466                 if (path->target != target
4467                  && path->target->target_id != CAM_TARGET_WILDCARD
4468                  && target->target_id != CAM_TARGET_WILDCARD)
4469                         continue;
4470
4471                 if (async_code == AC_SENT_BDR) {
4472                         /* Update our notion of when the last reset occurred */
4473                         microuptime(&path->target->last_reset);
4474                 }
4475
4476                 for (device = TAILQ_FIRST(&target->ed_entries);
4477                      device != NULL;
4478                      device = next_device) {
4479
4480                         next_device = TAILQ_NEXT(device, links);
4481
4482                         if (path->device != device 
4483                          && path->device->lun_id != CAM_LUN_WILDCARD
4484                          && device->lun_id != CAM_LUN_WILDCARD)
4485                                 continue;
4486
4487                         xpt_dev_async(async_code, bus, target,
4488                                       device, async_arg);
4489
4490                         xpt_async_bcast(&device->asyncs, async_code,
4491                                         path, async_arg);
4492                 }
4493         }
4494         
4495         /*
4496          * If this wasn't a fully wildcarded async, tell all
4497          * clients that want all async events.
4498          */
4499         if (bus != xpt_periph->path->bus)
4500                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4501                                 path, async_arg);
4502         crit_exit();
4503 }
4504
4505 static void
4506 xpt_async_bcast(struct async_list *async_head,
4507                 u_int32_t async_code,
4508                 struct cam_path *path, void *async_arg)
4509 {
4510         struct async_node *cur_entry;
4511
4512         cur_entry = SLIST_FIRST(async_head);
4513         while (cur_entry != NULL) {
4514                 struct async_node *next_entry;
4515                 /*
4516                  * Grab the next list entry before we call the current
4517                  * entry's callback.  This is because the callback function
4518                  * can delete its async callback entry.
4519                  */
4520                 next_entry = SLIST_NEXT(cur_entry, links);
4521                 if ((cur_entry->event_enable & async_code) != 0)
4522                         cur_entry->callback(cur_entry->callback_arg,
4523                                             async_code, path,
4524                                             async_arg);
4525                 cur_entry = next_entry;
4526         }
4527 }
4528
4529 /*
4530  * Handle any per-device event notifications that require action by the XPT.
4531  */
4532 static void
4533 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4534               struct cam_ed *device, void *async_arg)
4535 {
4536         cam_status status;
4537         struct cam_path newpath;
4538
4539         /*
4540          * We only need to handle events for real devices.
4541          */
4542         if (target->target_id == CAM_TARGET_WILDCARD
4543          || device->lun_id == CAM_LUN_WILDCARD)
4544                 return;
4545
4546         /*
4547          * We need our own path with wildcards expanded to
4548          * handle certain types of events.
4549          */
4550         if ((async_code == AC_SENT_BDR)
4551          || (async_code == AC_BUS_RESET)
4552          || (async_code == AC_INQ_CHANGED))
4553                 status = xpt_compile_path(&newpath, NULL,
4554                                           bus->path_id,
4555                                           target->target_id,
4556                                           device->lun_id);
4557         else
4558                 status = CAM_REQ_CMP_ERR;
4559
4560         if (status == CAM_REQ_CMP) {
4561
4562                 /*
4563                  * Allow transfer negotiation to occur in a
4564                  * tag free environment.
4565                  */
4566                 if (async_code == AC_SENT_BDR
4567                  || async_code == AC_BUS_RESET)
4568                         xpt_toggle_tags(&newpath);
4569
4570                 if (async_code == AC_INQ_CHANGED) {
4571                         /*
4572                          * We've sent a start unit command, or
4573                          * something similar to a device that
4574                          * may have caused its inquiry data to
4575                          * change. So we re-scan the device to
4576                          * refresh the inquiry data for it.
4577                          */
4578                         xpt_scan_lun(newpath.periph, &newpath,
4579                                      CAM_EXPECT_INQ_CHANGE, NULL);
4580                 }
4581                 xpt_release_path(&newpath);
4582         } else if (async_code == AC_LOST_DEVICE) {
4583                 /*
4584                  * When we lose a device the device may be about to detach
4585                  * the sim, we have to clear out all pending timeouts and
4586                  * requests before that happens.  XXX it would be nice if
4587                  * we could abort the requests pertaining to the device.
4588                  */
4589                 xpt_release_devq_timeout(device);
4590                 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4591                         device->flags |= CAM_DEV_UNCONFIGURED;
4592                         xpt_release_device(bus, target, device);
4593                 }
4594         } else if (async_code == AC_TRANSFER_NEG) {
4595                 struct ccb_trans_settings *settings;
4596
4597                 settings = (struct ccb_trans_settings *)async_arg;
4598                 xpt_set_transfer_settings(settings, device,
4599                                           /*async_update*/TRUE);
4600         }
4601 }
4602
4603 u_int32_t
4604 xpt_freeze_devq(struct cam_path *path, u_int count)
4605 {
4606         struct ccb_hdr *ccbh;
4607
4608         crit_enter();
4609         path->device->qfrozen_cnt += count;
4610
4611         /*
4612          * Mark the last CCB in the queue as needing
4613          * to be requeued if the driver hasn't
4614          * changed it's state yet.  This fixes a race
4615          * where a ccb is just about to be queued to
4616          * a controller driver when it's interrupt routine
4617          * freezes the queue.  To completly close the
4618          * hole, controller drives must check to see
4619          * if a ccb's status is still CAM_REQ_INPROG
4620          * under critical section protection just before they queue
4621          * the CCB.  See ahc_action/ahc_freeze_devq for
4622          * an example.
4623          */
4624         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4625         if (ccbh && ccbh->status == CAM_REQ_INPROG)
4626                 ccbh->status = CAM_REQUEUE_REQ;
4627         crit_exit();
4628         return (path->device->qfrozen_cnt);
4629 }
4630
4631 u_int32_t
4632 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4633 {
4634         if (sim->devq == NULL)
4635                 return(count);
4636         sim->devq->send_queue.qfrozen_cnt += count;
4637         if (sim->devq->active_dev != NULL) {
4638                 struct ccb_hdr *ccbh;
4639                 
4640                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4641                                   ccb_hdr_tailq);
4642                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4643                         ccbh->status = CAM_REQUEUE_REQ;
4644         }
4645         return (sim->devq->send_queue.qfrozen_cnt);
4646 }
4647
4648 /*
4649  * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4650  * We ref-count the sim (and the bus only NULLs it out when the bus has been
4651  * freed, which is not the case here), but the device queue is also freed XXX
4652  * and we have to check that here.
4653  *
4654  * XXX fixme: could we simply not null-out the device queue via 
4655  * cam_sim_free()?
4656  */
4657 static void
4658 xpt_release_devq_timeout(void *arg)
4659 {
4660         struct cam_ed *device;
4661
4662         device = (struct cam_ed *)arg;
4663
4664         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4665 }
4666
4667 void
4668 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4669 {
4670         xpt_release_devq_device(path->device, count, run_queue);
4671 }
4672
4673 static void
4674 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4675 {
4676         int     rundevq;
4677
4678         rundevq = 0;
4679         crit_enter();
4680
4681         if (dev->qfrozen_cnt > 0) {
4682
4683                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4684                 dev->qfrozen_cnt -= count;
4685                 if (dev->qfrozen_cnt == 0) {
4686
4687                         /*
4688                          * No longer need to wait for a successful
4689                          * command completion.
4690                          */
4691                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4692
4693                         /*
4694                          * Remove any timeouts that might be scheduled
4695                          * to release this queue.
4696                          */
4697                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4698                                 callout_stop(&dev->c_handle);
4699                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4700                         }
4701
4702                         /*
4703                          * Now that we are unfrozen schedule the
4704                          * device so any pending transactions are
4705                          * run.
4706                          */
4707                         if ((dev->ccbq.queue.entries > 0)
4708                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4709                          && (run_queue != 0)) {
4710                                 rundevq = 1;
4711                         }
4712                 }
4713         }
4714         if (rundevq != 0)
4715                 xpt_run_dev_sendq(dev->target->bus);
4716         crit_exit();
4717 }
4718
4719 void
4720 xpt_release_simq(struct cam_sim *sim, int run_queue)
4721 {
4722         struct  camq *sendq;
4723
4724         if (sim->devq == NULL)
4725                 return;
4726
4727         sendq = &(sim->devq->send_queue);
4728         crit_enter();
4729
4730         if (sendq->qfrozen_cnt > 0) {
4731                 sendq->qfrozen_cnt--;
4732                 if (sendq->qfrozen_cnt == 0) {
4733                         struct cam_eb *bus;
4734
4735                         /*
4736                          * If there is a timeout scheduled to release this
4737                          * sim queue, remove it.  The queue frozen count is
4738                          * already at 0.
4739                          */
4740                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4741                                 callout_stop(&sim->c_handle);
4742                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4743                         }
4744                         bus = xpt_find_bus(sim->path_id);
4745                         crit_exit();
4746
4747                         if (run_queue) {
4748                                 /*
4749                                  * Now that we are unfrozen run the send queue.
4750                                  */
4751                                 xpt_run_dev_sendq(bus);
4752                         }
4753                         xpt_release_bus(bus);
4754                 } else {
4755                         crit_exit();
4756                 }
4757         } else {
4758                 crit_exit();
4759         }
4760 }
4761
4762 void
4763 xpt_done(union ccb *done_ccb)
4764 {
4765         crit_enter();
4766
4767         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4768         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4769                 /*
4770                  * Queue up the request for handling by our SWI handler
4771                  * any of the "non-immediate" type of ccbs.
4772                  */
4773                 switch (done_ccb->ccb_h.path->periph->type) {
4774                 case CAM_PERIPH_BIO:
4775                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4776                                           sim_links.tqe);
4777                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4778                         setsoftcambio();
4779                         break;
4780                 default:
4781                         panic("unknown periph type %d",
4782                                 done_ccb->ccb_h.path->periph->type);
4783                 }
4784         }
4785         crit_exit();
4786 }
4787
4788 union ccb *
4789 xpt_alloc_ccb(void)
4790 {
4791         union ccb *new_ccb;
4792
4793         new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT);
4794         return (new_ccb);
4795 }
4796
4797 void
4798 xpt_free_ccb(union ccb *free_ccb)
4799 {
4800         kfree(free_ccb, M_CAMXPT);
4801 }
4802
4803
4804
4805 /* Private XPT functions */
4806
4807 /*
4808  * Get a CAM control block for the caller. Charge the structure to the device
4809  * referenced by the path.  If the this device has no 'credits' then the
4810  * device already has the maximum number of outstanding operations under way
4811  * and we return NULL. If we don't have sufficient resources to allocate more
4812  * ccbs, we also return NULL.
4813  */
4814 static union ccb *
4815 xpt_get_ccb(struct cam_ed *device)
4816 {
4817         union ccb *new_ccb;
4818
4819         crit_enter();
4820         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4821                 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT);
4822                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4823                                   xpt_links.sle);
4824                 xpt_ccb_count++;
4825         }
4826         cam_ccbq_take_opening(&device->ccbq);
4827         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4828         crit_exit();
4829         return (new_ccb);
4830 }
4831
4832 static void
4833 xpt_release_bus(struct cam_eb *bus)
4834 {
4835
4836         crit_enter();
4837         if (bus->refcount == 1) {
4838                 KKASSERT(TAILQ_FIRST(&bus->et_entries) == NULL);
4839                 TAILQ_REMOVE(&xpt_busses, bus, links);
4840                 if (bus->sim) {
4841                         cam_sim_release(bus->sim, 0);
4842                         bus->sim = NULL;
4843                 }
4844                 bus_generation++;
4845                 KKASSERT(bus->refcount == 1);
4846                 kfree(bus, M_CAMXPT);
4847         } else {
4848                 --bus->refcount;
4849         }
4850         crit_exit();
4851 }
4852
4853 static struct cam_et *
4854 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4855 {
4856         struct cam_et *target;
4857         struct cam_et *cur_target;
4858
4859         target = kmalloc(sizeof(*target), M_CAMXPT, M_INTWAIT);
4860
4861         TAILQ_INIT(&target->ed_entries);
4862         target->bus = bus;
4863         target->target_id = target_id;
4864         target->refcount = 1;
4865         target->generation = 0;
4866         timevalclear(&target->last_reset);
4867         /*
4868          * Hold a reference to our parent bus so it
4869          * will not go away before we do.
4870          */
4871         bus->refcount++;
4872
4873         /* Insertion sort into our bus's target list */
4874         cur_target = TAILQ_FIRST(&bus->et_entries);
4875         while (cur_target != NULL && cur_target->target_id < target_id)
4876                 cur_target = TAILQ_NEXT(cur_target, links);
4877
4878         if (cur_target != NULL) {
4879                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4880         } else {
4881                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4882         }
4883         bus->generation++;
4884         return (target);
4885 }
4886
4887 static void
4888 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4889 {
4890         crit_enter();
4891         if (target->refcount == 1) {
4892                 KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
4893                 TAILQ_REMOVE(&bus->et_entries, target, links);
4894                 bus->generation++;
4895                 xpt_release_bus(bus);
4896                 KKASSERT(target->refcount == 1);
4897                 kfree(target, M_CAMXPT);
4898         } else {
4899                 --target->refcount;
4900         }
4901         crit_exit();
4902 }
4903
4904 static struct cam_ed *
4905 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4906 {
4907 #ifdef CAM_NEW_TRAN_CODE
4908         struct     cam_path path;
4909 #endif /* CAM_NEW_TRAN_CODE */
4910         struct     cam_ed *device;
4911         struct     cam_devq *devq;
4912         cam_status status;
4913
4914         if (SIM_DEAD(bus->sim))
4915                 return (NULL);
4916
4917         /* Make space for us in the device queue on our bus */
4918         if (bus->sim->devq == NULL)
4919                 return(NULL);
4920         devq = bus->sim->devq;
4921         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4922
4923         if (status != CAM_REQ_CMP) {
4924                 device = NULL;
4925         } else {
4926                 device = kmalloc(sizeof(*device), M_CAMXPT, M_INTWAIT);
4927         }
4928
4929         if (device != NULL) {
4930                 struct cam_ed *cur_device;
4931
4932                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4933                 device->alloc_ccb_entry.device = device;
4934                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4935                 device->send_ccb_entry.device = device;
4936                 device->target = target;
4937                 device->lun_id = lun_id;
4938                 /* Initialize our queues */
4939                 if (camq_init(&device->drvq, 0) != 0) {
4940                         kfree(device, M_CAMXPT);
4941                         return (NULL);
4942                 }
4943                 if (cam_ccbq_init(&device->ccbq,
4944                                   bus->sim->max_dev_openings) != 0) {
4945                         camq_fini(&device->drvq);
4946                         kfree(device, M_CAMXPT);
4947                         return (NULL);
4948                 }
4949                 SLIST_INIT(&device->asyncs);
4950                 SLIST_INIT(&device->periphs);
4951                 device->generation = 0;
4952                 device->owner = NULL;
4953                 /*
4954                  * Take the default quirk entry until we have inquiry
4955                  * data and can determine a better quirk to use.
4956                  */
4957                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4958                 bzero(&device->inq_data, sizeof(device->inq_data));
4959                 device->inq_flags = 0;
4960                 device->queue_flags = 0;
4961                 device->serial_num = NULL;
4962                 device->serial_num_len = 0;
4963                 device->qfrozen_cnt = 0;
4964                 device->flags = CAM_DEV_UNCONFIGURED;
4965                 device->tag_delay_count = 0;
4966                 device->tag_saved_openings = 0;
4967                 device->refcount = 1;
4968                 callout_init(&device->c_handle);
4969
4970                 /*
4971                  * Hold a reference to our parent target so it
4972                  * will not go away before we do.
4973                  */
4974                 target->refcount++;
4975
4976                 /*
4977                  * XXX should be limited by number of CCBs this bus can
4978                  * do.
4979                  */
4980                 xpt_max_ccbs += device->ccbq.devq_openings;
4981                 /* Insertion sort into our target's device list */
4982                 cur_device = TAILQ_FIRST(&target->ed_entries);
4983                 while (cur_device != NULL && cur_device->lun_id < lun_id)
4984                         cur_device = TAILQ_NEXT(cur_device, links);
4985                 if (cur_device != NULL) {
4986                         TAILQ_INSERT_BEFORE(cur_device, device, links);
4987                 } else {
4988                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4989                 }
4990                 target->generation++;
4991 #ifdef CAM_NEW_TRAN_CODE
4992                 if (lun_id != CAM_LUN_WILDCARD) {
4993                         xpt_compile_path(&path,
4994                                          NULL,
4995                                          bus->path_id,
4996                                          target->target_id,
4997                                          lun_id);
4998                         xpt_devise_transport(&path);
4999                         xpt_release_path(&path);
5000                 }
5001 #endif /* CAM_NEW_TRAN_CODE */
5002         }
5003         return (device);
5004 }
5005
5006 static void
5007 xpt_reference_device(struct cam_ed *device)
5008 {
5009         ++device->refcount;
5010 }
5011
5012 static void
5013 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5014                    struct cam_ed *device)
5015 {
5016         struct cam_devq *devq;
5017
5018         crit_enter();
5019         if (device->refcount == 1) {
5020                 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
5021
5022                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5023                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5024                         panic("Removing device while still queued for ccbs");
5025
5026                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
5027                         device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
5028                         callout_stop(&device->c_handle);
5029                 }
5030
5031                 TAILQ_REMOVE(&target->ed_entries, device,links);
5032                 target->generation++;
5033                 xpt_max_ccbs -= device->ccbq.devq_openings;
5034                 if (!SIM_DEAD(bus->sim)) {
5035                         /* Release our slot in the devq */
5036                         devq = bus->sim->devq;
5037                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5038                 }
5039                 camq_fini(&device->drvq);
5040                 camq_fini(&device->ccbq.queue);
5041                 xpt_release_target(bus, target);
5042                 KKASSERT(device->refcount == 1);
5043                 kfree(device, M_CAMXPT);
5044         } else {
5045                 --device->refcount;
5046         }
5047         crit_exit();
5048 }
5049
5050 static u_int32_t
5051 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5052 {
5053         int     diff;
5054         int     result;
5055         struct  cam_ed *dev;
5056
5057         dev = path->device;
5058
5059         crit_enter();
5060
5061         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5062         result = cam_ccbq_resize(&dev->ccbq, newopenings);
5063         if (result == CAM_REQ_CMP && (diff < 0)) {
5064                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5065         }
5066         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5067          || (dev->inq_flags & SID_CmdQue) != 0)
5068                 dev->tag_saved_openings = newopenings;
5069         /* Adjust the global limit */
5070         xpt_max_ccbs += diff;
5071         crit_exit();
5072         return (result);
5073 }
5074
5075 static struct cam_eb *
5076 xpt_find_bus(path_id_t path_id)
5077 {
5078         struct cam_eb *bus;
5079
5080         TAILQ_FOREACH(bus, &xpt_busses, links) {
5081                 if (bus->path_id == path_id) {
5082                         bus->refcount++;
5083                         break;
5084                 }
5085         }
5086         return (bus);
5087 }
5088
5089 static struct cam_et *
5090 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5091 {
5092         struct cam_et *target;
5093
5094         TAILQ_FOREACH(target, &bus->et_entries, links) {
5095                 if (target->target_id == target_id) {
5096                         target->refcount++;
5097                         break;
5098                 }
5099         }
5100         return (target);
5101 }
5102
5103 static struct cam_ed *
5104 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5105 {
5106         struct cam_ed *device;
5107
5108         TAILQ_FOREACH(device, &target->ed_entries, links) {
5109                 if (device->lun_id == lun_id) {
5110                         device->refcount++;
5111                         break;
5112                 }
5113         }
5114         return (device);
5115 }
5116
5117 typedef struct {
5118         union   ccb *request_ccb;
5119         struct  ccb_pathinq *cpi;
5120         int     pending_count;
5121 } xpt_scan_bus_info;
5122
5123 /*
5124  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5125  * As the scan progresses, xpt_scan_bus is used as the
5126  * callback on completion function.
5127  */
5128 static void
5129 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5130 {
5131         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5132                   ("xpt_scan_bus\n"));
5133         switch (request_ccb->ccb_h.func_code) {
5134         case XPT_SCAN_BUS:
5135         {
5136                 xpt_scan_bus_info *scan_info;
5137                 union   ccb *work_ccb;
5138                 struct  cam_path *path;
5139                 u_int   i;
5140                 u_int   max_target;
5141                 u_int   initiator_id;
5142
5143                 /* Find out the characteristics of the bus */
5144                 work_ccb = xpt_alloc_ccb();
5145                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5146                               request_ccb->ccb_h.pinfo.priority);
5147                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5148                 xpt_action(work_ccb);
5149                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5150                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5151                         xpt_free_ccb(work_ccb);
5152                         xpt_done(request_ccb);
5153                         return;
5154                 }
5155
5156                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5157                         /*
5158                          * Can't scan the bus on an adapter that
5159                          * cannot perform the initiator role.
5160                          */
5161                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5162                         xpt_free_ccb(work_ccb);
5163                         xpt_done(request_ccb);
5164                         return;
5165                 }
5166
5167                 /* Save some state for use while we probe for devices */
5168                 scan_info = (xpt_scan_bus_info *)
5169                     kmalloc(sizeof(xpt_scan_bus_info), M_TEMP, M_INTWAIT);
5170                 scan_info->request_ccb = request_ccb;
5171                 scan_info->cpi = &work_ccb->cpi;
5172
5173                 /* Cache on our stack so we can work asynchronously */
5174                 max_target = scan_info->cpi->max_target;
5175                 initiator_id = scan_info->cpi->initiator_id;
5176
5177                 /*
5178                  * Don't count the initiator if the
5179                  * initiator is addressable.
5180                  */
5181                 scan_info->pending_count = max_target + 1;
5182                 if (initiator_id <= max_target)
5183                         scan_info->pending_count--;
5184
5185                 for (i = 0; i <= max_target; i++) {
5186                         cam_status status;
5187                         if (i == initiator_id)
5188                                 continue;
5189
5190                         status = xpt_create_path(&path, xpt_periph,
5191                                                  request_ccb->ccb_h.path_id,
5192                                                  i, 0);
5193                         if (status != CAM_REQ_CMP) {
5194                                 kprintf("xpt_scan_bus: xpt_create_path failed"
5195                                        " with status %#x, bus scan halted\n",
5196                                        status);
5197                                 break;
5198                         }
5199                         work_ccb = xpt_alloc_ccb();
5200                         xpt_setup_ccb(&work_ccb->ccb_h, path,
5201                                       request_ccb->ccb_h.pinfo.priority);
5202                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5203                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5204                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5205                         work_ccb->crcn.flags = request_ccb->crcn.flags;
5206                         xpt_action(work_ccb);
5207                 }
5208                 break;
5209         }
5210         case XPT_SCAN_LUN:
5211         {
5212                 xpt_scan_bus_info *scan_info;
5213                 path_id_t path_id;
5214                 target_id_t target_id;
5215                 lun_id_t lun_id;
5216
5217                 /* Reuse the same CCB to query if a device was really found */
5218                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5219                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5220                               request_ccb->ccb_h.pinfo.priority);
5221                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5222
5223                 path_id = request_ccb->ccb_h.path_id;
5224                 target_id = request_ccb->ccb_h.target_id;
5225                 lun_id = request_ccb->ccb_h.target_lun;
5226                 xpt_action(request_ccb);
5227
5228                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5229                         struct cam_ed *device;
5230                         struct cam_et *target;
5231                         int phl;
5232
5233                         /*
5234                          * If we already probed lun 0 successfully, or
5235                          * we have additional configured luns on this
5236                          * target that might have "gone away", go onto
5237                          * the next lun.
5238                          */
5239                         target = request_ccb->ccb_h.path->target;
5240                         /*
5241                          * We may touch devices that we don't
5242                          * hold references too, so ensure they
5243                          * don't disappear out from under us.
5244                          * The target above is referenced by the
5245                          * path in the request ccb.
5246                          */
5247                         phl = 0;
5248                         crit_enter();
5249                         device = TAILQ_FIRST(&target->ed_entries);
5250                         if (device != NULL) {
5251                                 phl = CAN_SRCH_HI_SPARSE(device);
5252                                 if (device->lun_id == 0)
5253                                         device = TAILQ_NEXT(device, links);
5254                         }
5255                         crit_exit();
5256                         if ((lun_id != 0) || (device != NULL)) {
5257                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5258                                         lun_id++;
5259                         }
5260                 } else {
5261                         struct cam_ed *device;
5262                         
5263                         device = request_ccb->ccb_h.path->device;
5264
5265                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5266                                 /* Try the next lun */
5267                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5268                                   || CAN_SRCH_HI_DENSE(device))
5269                                         lun_id++;
5270                         }
5271                 }
5272
5273                 xpt_free_path(request_ccb->ccb_h.path);
5274
5275                 /* Check Bounds */
5276                 if ((lun_id == request_ccb->ccb_h.target_lun)
5277                  || lun_id > scan_info->cpi->max_lun) {
5278                         /* We're done */
5279
5280                         xpt_free_ccb(request_ccb);
5281                         scan_info->pending_count--;
5282                         if (scan_info->pending_count == 0) {
5283                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5284                                 request_ccb = scan_info->request_ccb;
5285                                 kfree(scan_info, M_TEMP);
5286                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
5287                                 xpt_done(request_ccb);
5288                         }
5289                 } else {
5290                         /* Try the next device */
5291                         struct cam_path *path;
5292                         cam_status status;
5293
5294                         status = xpt_create_path(&path, xpt_periph,
5295                                                  path_id, target_id, lun_id);
5296                         if (status != CAM_REQ_CMP) {
5297                                 kprintf("xpt_scan_bus: xpt_create_path failed "
5298                                        "with status %#x, halting LUN scan\n",
5299                                        status);
5300                                 xpt_free_ccb(request_ccb);
5301                                 scan_info->pending_count--;
5302                                 if (scan_info->pending_count == 0) {
5303                                         xpt_free_ccb(
5304                                                 (union ccb *)scan_info->cpi);
5305                                         request_ccb = scan_info->request_ccb;
5306                                         kfree(scan_info, M_TEMP);
5307                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5308                                         xpt_done(request_ccb);
5309                                 }
5310                                 break;
5311                         }
5312                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5313                                       request_ccb->ccb_h.pinfo.priority);
5314                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5315                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5316                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5317                         request_ccb->crcn.flags =
5318                                 scan_info->request_ccb->crcn.flags;
5319                         xpt_action(request_ccb);
5320                 }
5321                 break;
5322         }
5323         default:
5324                 break;
5325         }
5326 }
5327
5328 typedef enum {
5329         PROBE_TUR,
5330         PROBE_INQUIRY,
5331         PROBE_FULL_INQUIRY,
5332         PROBE_MODE_SENSE,
5333         PROBE_SERIAL_NUM,
5334         PROBE_TUR_FOR_NEGOTIATION
5335 } probe_action;
5336
5337 typedef enum {
5338         PROBE_INQUIRY_CKSUM     = 0x01,
5339         PROBE_SERIAL_CKSUM      = 0x02,
5340         PROBE_NO_ANNOUNCE       = 0x04
5341 } probe_flags;
5342
5343 typedef struct {
5344         TAILQ_HEAD(, ccb_hdr) request_ccbs;
5345         probe_action    action;
5346         union ccb       saved_ccb;
5347         probe_flags     flags;
5348         MD5_CTX         context;
5349         u_int8_t        digest[16];
5350 } probe_softc;
5351
5352 static void
5353 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5354              cam_flags flags, union ccb *request_ccb)
5355 {
5356         struct ccb_pathinq cpi;
5357         cam_status status;
5358         struct cam_path *new_path;
5359         struct cam_periph *old_periph;
5360         
5361         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5362                   ("xpt_scan_lun\n"));
5363         
5364         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5365         cpi.ccb_h.func_code = XPT_PATH_INQ;
5366         xpt_action((union ccb *)&cpi);
5367
5368         if (cpi.ccb_h.status != CAM_REQ_CMP) {
5369                 if (request_ccb != NULL) {
5370                         request_ccb->ccb_h.status = cpi.ccb_h.status;
5371                         xpt_done(request_ccb);
5372                 }
5373                 return;
5374         }
5375
5376         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5377                 /*
5378                  * Can't scan the bus on an adapter that
5379                  * cannot perform the initiator role.
5380                  */
5381                 if (request_ccb != NULL) {
5382                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5383                         xpt_done(request_ccb);
5384                 }
5385                 return;
5386         }
5387
5388         if (request_ccb == NULL) {
5389                 request_ccb = kmalloc(sizeof(union ccb), M_TEMP, M_INTWAIT);
5390                 new_path = kmalloc(sizeof(*new_path), M_TEMP, M_INTWAIT);
5391                 status = xpt_compile_path(new_path, xpt_periph,
5392                                           path->bus->path_id,
5393                                           path->target->target_id,
5394                                           path->device->lun_id);
5395
5396                 if (status != CAM_REQ_CMP) {
5397                         xpt_print_path(path);
5398                         kprintf("xpt_scan_lun: can't compile path, can't "
5399                                "continue\n");
5400                         kfree(request_ccb, M_TEMP);
5401                         kfree(new_path, M_TEMP);
5402                         return;
5403                 }
5404                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5405                 request_ccb->ccb_h.cbfcnp = xptscandone;
5406                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5407                 request_ccb->crcn.flags = flags;
5408         }
5409
5410         crit_enter();
5411         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5412                 probe_softc *softc;
5413
5414                 softc = (probe_softc *)old_periph->softc;
5415                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5416                                   periph_links.tqe);
5417         } else {
5418                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5419                                           probestart, "probe",
5420                                           CAM_PERIPH_BIO,
5421                                           request_ccb->ccb_h.path, NULL, 0,
5422                                           request_ccb);
5423
5424                 if (status != CAM_REQ_CMP) {
5425                         xpt_print_path(path);
5426                         kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5427                                "error, can't continue probe\n");
5428                         request_ccb->ccb_h.status = status;
5429                         xpt_done(request_ccb);
5430                 }
5431         }
5432         crit_exit();
5433 }
5434
5435 static void
5436 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5437 {
5438         xpt_release_path(done_ccb->ccb_h.path);
5439         kfree(done_ccb->ccb_h.path, M_TEMP);
5440         kfree(done_ccb, M_TEMP);
5441 }
5442
5443 static cam_status
5444 proberegister(struct cam_periph *periph, void *arg)
5445 {
5446         union ccb *request_ccb; /* CCB representing the probe request */
5447         probe_softc *softc;
5448
5449         request_ccb = (union ccb *)arg;
5450         if (periph == NULL) {
5451                 kprintf("proberegister: periph was NULL!!\n");
5452                 return(CAM_REQ_CMP_ERR);
5453         }
5454
5455         if (request_ccb == NULL) {
5456                 kprintf("proberegister: no probe CCB, "
5457                        "can't register device\n");
5458                 return(CAM_REQ_CMP_ERR);
5459         }
5460
5461         softc = kmalloc(sizeof(*softc), M_TEMP, M_INTWAIT | M_ZERO);
5462         TAILQ_INIT(&softc->request_ccbs);
5463         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5464                           periph_links.tqe);
5465         softc->flags = 0;
5466         periph->softc = softc;
5467         cam_periph_acquire(periph);
5468         /*
5469          * Ensure we've waited at least a bus settle
5470          * delay before attempting to probe the device.
5471          * For HBAs that don't do bus resets, this won't make a difference.
5472          */
5473         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5474                                       scsi_delay);
5475         probeschedule(periph);
5476         return(CAM_REQ_CMP);
5477 }
5478
5479 static void
5480 probeschedule(struct cam_periph *periph)
5481 {
5482         struct ccb_pathinq cpi;
5483         union ccb *ccb;
5484         probe_softc *softc;
5485
5486         softc = (probe_softc *)periph->softc;
5487         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5488
5489         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5490         cpi.ccb_h.func_code = XPT_PATH_INQ;
5491         xpt_action((union ccb *)&cpi);
5492
5493         /*
5494          * If a device has gone away and another device, or the same one,
5495          * is back in the same place, it should have a unit attention
5496          * condition pending.  It will not report the unit attention in
5497          * response to an inquiry, which may leave invalid transfer
5498          * negotiations in effect.  The TUR will reveal the unit attention
5499          * condition.  Only send the TUR for lun 0, since some devices 
5500          * will get confused by commands other than inquiry to non-existent
5501          * luns.  If you think a device has gone away start your scan from
5502          * lun 0.  This will insure that any bogus transfer settings are
5503          * invalidated.
5504          *
5505          * If we haven't seen the device before and the controller supports
5506          * some kind of transfer negotiation, negotiate with the first
5507          * sent command if no bus reset was performed at startup.  This
5508          * ensures that the device is not confused by transfer negotiation
5509          * settings left over by loader or BIOS action.
5510          */
5511         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5512          && (ccb->ccb_h.target_lun == 0)) {
5513                 softc->action = PROBE_TUR;
5514         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5515               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5516                 proberequestdefaultnegotiation(periph);
5517                 softc->action = PROBE_INQUIRY;
5518         } else {
5519                 softc->action = PROBE_INQUIRY;
5520         }
5521
5522         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5523                 softc->flags |= PROBE_NO_ANNOUNCE;
5524         else
5525                 softc->flags &= ~PROBE_NO_ANNOUNCE;
5526
5527         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5528 }
5529
5530 static void
5531 probestart(struct cam_periph *periph, union ccb *start_ccb)
5532 {
5533         /* Probe the device that our peripheral driver points to */
5534         struct ccb_scsiio *csio;
5535         probe_softc *softc;
5536
5537         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5538
5539         softc = (probe_softc *)periph->softc;
5540         csio = &start_ccb->csio;
5541
5542         switch (softc->action) {
5543         case PROBE_TUR:
5544         case PROBE_TUR_FOR_NEGOTIATION:
5545         {
5546                 scsi_test_unit_ready(csio,
5547                                      /*retries*/4,
5548                                      probedone,
5549                                      MSG_SIMPLE_Q_TAG,
5550                                      SSD_FULL_SIZE,
5551                                      /*timeout*/60000);
5552                 break;
5553         }
5554         case PROBE_INQUIRY:
5555         case PROBE_FULL_INQUIRY:
5556         {
5557                 u_int inquiry_len;
5558                 struct scsi_inquiry_data *inq_buf;
5559
5560                 inq_buf = &periph->path->device->inq_data;
5561                 /*
5562                  * If the device is currently configured, we calculate an
5563                  * MD5 checksum of the inquiry data, and if the serial number
5564                  * length is greater than 0, add the serial number data
5565                  * into the checksum as well.  Once the inquiry and the
5566                  * serial number check finish, we attempt to figure out
5567                  * whether we still have the same device.
5568                  */
5569                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5570                         
5571                         MD5Init(&softc->context);
5572                         MD5Update(&softc->context, (unsigned char *)inq_buf,
5573                                   sizeof(struct scsi_inquiry_data));
5574                         softc->flags |= PROBE_INQUIRY_CKSUM;
5575                         if (periph->path->device->serial_num_len > 0) {
5576                                 MD5Update(&softc->context,
5577                                           periph->path->device->serial_num,
5578                                           periph->path->device->serial_num_len);
5579                                 softc->flags |= PROBE_SERIAL_CKSUM;
5580                         }
5581                         MD5Final(softc->digest, &softc->context);
5582                 } 
5583
5584                 if (softc->action == PROBE_INQUIRY)
5585                         inquiry_len = SHORT_INQUIRY_LENGTH;
5586                 else
5587                         inquiry_len = inq_buf->additional_length
5588                                     + offsetof(struct scsi_inquiry_data,
5589                                                additional_length) + 1;
5590
5591                 /*
5592                  * Some parallel SCSI devices fail to send an
5593                  * ignore wide residue message when dealing with
5594                  * odd length inquiry requests.  Round up to be
5595                  * safe.
5596                  */
5597                 inquiry_len = roundup2(inquiry_len, 2);
5598         
5599                 scsi_inquiry(csio,
5600                              /*retries*/4,
5601                              probedone,
5602                              MSG_SIMPLE_Q_TAG,
5603                              (u_int8_t *)inq_buf,
5604                              inquiry_len,
5605                              /*evpd*/FALSE,
5606                              /*page_code*/0,
5607                              SSD_MIN_SIZE,
5608                              /*timeout*/60 * 1000);
5609                 break;
5610         }
5611         case PROBE_MODE_SENSE:
5612         {
5613                 void  *mode_buf;
5614                 int    mode_buf_len;
5615
5616                 mode_buf_len = sizeof(struct scsi_mode_header_6)
5617                              + sizeof(struct scsi_mode_blk_desc)
5618                              + sizeof(struct scsi_control_page);
5619                 mode_buf = kmalloc(mode_buf_len, M_TEMP, M_INTWAIT);
5620                 scsi_mode_sense(csio,
5621                                 /*retries*/4,
5622                                 probedone,
5623                                 MSG_SIMPLE_Q_TAG,
5624                                 /*dbd*/FALSE,
5625                                 SMS_PAGE_CTRL_CURRENT,
5626                                 SMS_CONTROL_MODE_PAGE,
5627                                 mode_buf,
5628                                 mode_buf_len,
5629                                 SSD_FULL_SIZE,
5630                                 /*timeout*/60000);
5631                 break;
5632         }
5633         case PROBE_SERIAL_NUM:
5634         {
5635                 struct scsi_vpd_unit_serial_number *serial_buf;
5636                 struct cam_ed* device;
5637
5638                 serial_buf = NULL;
5639                 device = periph->path->device;
5640                 device->serial_num = NULL;
5641                 device->serial_num_len = 0;
5642
5643                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5644                         serial_buf = kmalloc(sizeof(*serial_buf), M_TEMP,
5645                                             M_INTWAIT | M_ZERO);
5646                         scsi_inquiry(csio,
5647                                      /*retries*/4,
5648                                      probedone,
5649                                      MSG_SIMPLE_Q_TAG,
5650                                      (u_int8_t *)serial_buf,
5651                                      sizeof(*serial_buf),
5652                                      /*evpd*/TRUE,
5653                                      SVPD_UNIT_SERIAL_NUMBER,
5654                                      SSD_MIN_SIZE,
5655                                      /*timeout*/60 * 1000);
5656                         break;
5657                 }
5658                 /*
5659                  * We'll have to do without, let our probedone
5660                  * routine finish up for us.
5661                  */
5662                 start_ccb->csio.data_ptr = NULL;
5663                 probedone(periph, start_ccb);
5664                 return;
5665         }
5666         }
5667         xpt_action(start_ccb);
5668 }
5669
5670 static void
5671 proberequestdefaultnegotiation(struct cam_periph *periph)
5672 {
5673         struct ccb_trans_settings cts;
5674
5675         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5676         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5677 #ifdef CAM_NEW_TRAN_CODE
5678         cts.type = CTS_TYPE_USER_SETTINGS;
5679 #else /* CAM_NEW_TRAN_CODE */
5680         cts.flags = CCB_TRANS_USER_SETTINGS;
5681 #endif /* CAM_NEW_TRAN_CODE */
5682         xpt_action((union ccb *)&cts);
5683         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5684 #ifdef CAM_NEW_TRAN_CODE
5685         cts.type = CTS_TYPE_CURRENT_SETTINGS;
5686 #else /* CAM_NEW_TRAN_CODE */
5687         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5688         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5689 #endif /* CAM_NEW_TRAN_CODE */
5690         xpt_action((union ccb *)&cts);
5691 }
5692
5693 static void
5694 probedone(struct cam_periph *periph, union ccb *done_ccb)
5695 {
5696         probe_softc *softc;
5697         struct cam_path *path;
5698         u_int32_t  priority;
5699
5700         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5701
5702         softc = (probe_softc *)periph->softc;
5703         path = done_ccb->ccb_h.path;
5704         priority = done_ccb->ccb_h.pinfo.priority;
5705
5706         switch (softc->action) {
5707         case PROBE_TUR:
5708         {
5709                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5710
5711                         if (cam_periph_error(done_ccb, 0,
5712                                              SF_NO_PRINT, NULL) == ERESTART)
5713                                 return;
5714                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5715                                 /* Don't wedge the queue */
5716                                 xpt_release_devq(done_ccb->ccb_h.path,
5717                                                  /*count*/1,
5718                                                  /*run_queue*/TRUE);
5719                 }
5720                 softc->action = PROBE_INQUIRY;
5721                 xpt_release_ccb(done_ccb);
5722                 xpt_schedule(periph, priority);
5723                 return;
5724         }
5725         case PROBE_INQUIRY:
5726         case PROBE_FULL_INQUIRY:
5727         {
5728                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5729                         struct scsi_inquiry_data *inq_buf;
5730                         u_int8_t periph_qual;
5731
5732                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5733                         inq_buf = &path->device->inq_data;
5734
5735                         periph_qual = SID_QUAL(inq_buf);
5736                         
5737                         switch(periph_qual) {
5738                         case SID_QUAL_LU_CONNECTED:
5739                         {
5740                                 u_int8_t len;
5741
5742                                 /*
5743                                  * We conservatively request only
5744                                  * SHORT_INQUIRY_LEN bytes of inquiry
5745                                  * information during our first try
5746                                  * at sending an INQUIRY. If the device
5747                                  * has more information to give,
5748                                  * perform a second request specifying
5749                                  * the amount of information the device
5750                                  * is willing to give.
5751                                  */
5752                                 len = inq_buf->additional_length
5753                                     + offsetof(struct scsi_inquiry_data,
5754                                                 additional_length) + 1;
5755                                 if (softc->action == PROBE_INQUIRY
5756                                  && len > SHORT_INQUIRY_LENGTH) {
5757                                         softc->action = PROBE_FULL_INQUIRY;
5758                                         xpt_release_ccb(done_ccb);
5759                                         xpt_schedule(periph, priority);
5760                                         return;
5761                                 }
5762
5763                                 xpt_find_quirk(path->device);
5764
5765 #ifdef CAM_NEW_TRAN_CODE
5766                                 xpt_devise_transport(path);
5767 #endif /* CAM_NEW_TRAN_CODE */
5768                                 if ((inq_buf->flags & SID_CmdQue) != 0)
5769                                         softc->action = PROBE_MODE_SENSE;
5770                                 else
5771                                         softc->action = PROBE_SERIAL_NUM;
5772
5773                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5774                                 xpt_reference_device(path->device);
5775
5776                                 xpt_release_ccb(done_ccb);
5777                                 xpt_schedule(periph, priority);
5778                                 return;
5779                         }
5780                         default:
5781                                 break;
5782                         }
5783                 } else if (cam_periph_error(done_ccb, 0,
5784                                             done_ccb->ccb_h.target_lun > 0
5785                                             ? SF_RETRY_UA|SF_QUIET_IR
5786                                             : SF_RETRY_UA,
5787                                             &softc->saved_ccb) == ERESTART) {
5788                         return;
5789                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5790                         /* Don't wedge the queue */
5791                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5792                                          /*run_queue*/TRUE);
5793                 }
5794                 /*
5795                  * If we get to this point, we got an error status back
5796                  * from the inquiry and the error status doesn't require
5797                  * automatically retrying the command.  Therefore, the
5798                  * inquiry failed.  If we had inquiry information before
5799                  * for this device, but this latest inquiry command failed,
5800                  * the device has probably gone away.  If this device isn't
5801                  * already marked unconfigured, notify the peripheral
5802                  * drivers that this device is no more.
5803                  */
5804                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5805                         /* Send the async notification. */
5806                         xpt_async(AC_LOST_DEVICE, path, NULL);
5807                 }
5808
5809                 xpt_release_ccb(done_ccb);
5810                 break;
5811         }
5812         case PROBE_MODE_SENSE:
5813         {
5814                 struct ccb_scsiio *csio;
5815                 struct scsi_mode_header_6 *mode_hdr;
5816
5817                 csio = &done_ccb->csio;
5818                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5819                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5820                         struct scsi_control_page *page;
5821                         u_int8_t *offset;
5822
5823                         offset = ((u_int8_t *)&mode_hdr[1])
5824                             + mode_hdr->blk_desc_len;
5825                         page = (struct scsi_control_page *)offset;
5826                         path->device->queue_flags = page->queue_flags;
5827                 } else if (cam_periph_error(done_ccb, 0,
5828                                             SF_RETRY_UA|SF_NO_PRINT,
5829                                             &softc->saved_ccb) == ERESTART) {
5830                         return;
5831                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5832                         /* Don't wedge the queue */
5833                         xpt_release_devq(done_ccb->ccb_h.path,
5834                                          /*count*/1, /*run_queue*/TRUE);
5835                 }
5836                 xpt_release_ccb(done_ccb);
5837                 kfree(mode_hdr, M_TEMP);
5838                 softc->action = PROBE_SERIAL_NUM;
5839                 xpt_schedule(periph, priority);
5840                 return;
5841         }
5842         case PROBE_SERIAL_NUM:
5843         {
5844                 struct ccb_scsiio *csio;
5845                 struct scsi_vpd_unit_serial_number *serial_buf;
5846                 u_int32_t  priority;
5847                 int changed;
5848                 int have_serialnum;
5849
5850                 changed = 1;
5851                 have_serialnum = 0;
5852                 csio = &done_ccb->csio;
5853                 priority = done_ccb->ccb_h.pinfo.priority;
5854                 serial_buf =
5855                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5856
5857                 /* Clean up from previous instance of this device */
5858                 if (path->device->serial_num != NULL) {
5859                         kfree(path->device->serial_num, M_CAMXPT);
5860                         path->device->serial_num = NULL;
5861                         path->device->serial_num_len = 0;
5862                 }
5863
5864                 if (serial_buf == NULL) {
5865                         /*
5866                          * Don't process the command as it was never sent
5867                          */
5868                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5869                         && (serial_buf->length > 0)) {
5870
5871                         have_serialnum = 1;
5872                         path->device->serial_num =
5873                                 kmalloc((serial_buf->length + 1),
5874                                        M_CAMXPT, M_INTWAIT);
5875                         bcopy(serial_buf->serial_num,
5876                               path->device->serial_num,
5877                               serial_buf->length);
5878                         path->device->serial_num_len = serial_buf->length;
5879                         path->device->serial_num[serial_buf->length] = '\0';
5880                 } else if (cam_periph_error(done_ccb, 0,
5881                                             SF_RETRY_UA|SF_NO_PRINT,
5882                                             &softc->saved_ccb) == ERESTART) {
5883                         return;
5884                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5885                         /* Don't wedge the queue */
5886                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5887                                          /*run_queue*/TRUE);
5888                 }
5889                 
5890                 /*
5891                  * Let's see if we have seen this device before.
5892                  */
5893                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5894                         MD5_CTX context;
5895                         u_int8_t digest[16];
5896
5897                         MD5Init(&context);
5898                         
5899                         MD5Update(&context,
5900                                   (unsigned char *)&path->device->inq_data,
5901                                   sizeof(struct scsi_inquiry_data));
5902
5903                         if (have_serialnum)
5904                                 MD5Update(&context, serial_buf->serial_num,
5905                                           serial_buf->length);
5906
5907                         MD5Final(digest, &context);
5908                         if (bcmp(softc->digest, digest, 16) == 0)
5909                                 changed = 0;
5910
5911                         /*
5912                          * XXX Do we need to do a TUR in order to ensure
5913                          *     that the device really hasn't changed???
5914                          */
5915                         if ((changed != 0)
5916                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5917                                 xpt_async(AC_LOST_DEVICE, path, NULL);
5918                 }
5919                 if (serial_buf != NULL)
5920                         kfree(serial_buf, M_TEMP);
5921
5922                 if (changed != 0) {
5923                         /*
5924                          * Now that we have all the necessary
5925                          * information to safely perform transfer
5926                          * negotiations... Controllers don't perform
5927                          * any negotiation or tagged queuing until
5928                          * after the first XPT_SET_TRAN_SETTINGS ccb is
5929                          * received.  So, on a new device, just retreive
5930                          * the user settings, and set them as the current
5931                          * settings to set the device up.
5932                          */
5933                         proberequestdefaultnegotiation(periph);
5934                         xpt_release_ccb(done_ccb);
5935
5936                         /*
5937                          * Perform a TUR to allow the controller to
5938                          * perform any necessary transfer negotiation.
5939                          */
5940                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
5941                         xpt_schedule(periph, priority);
5942                         return;
5943                 }
5944                 xpt_release_ccb(done_ccb);
5945                 break;
5946         }
5947         case PROBE_TUR_FOR_NEGOTIATION:
5948                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5949                         /* Don't wedge the queue */
5950                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5951                                          /*run_queue*/TRUE);
5952                 }
5953
5954                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5955                 xpt_reference_device(path->device);
5956
5957                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5958                         /* Inform the XPT that a new device has been found */
5959                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5960                         xpt_action(done_ccb);
5961
5962                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
5963                                   done_ccb);
5964                 }
5965                 xpt_release_ccb(done_ccb);
5966                 break;
5967         }
5968         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5969         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5970         done_ccb->ccb_h.status = CAM_REQ_CMP;
5971         xpt_done(done_ccb);
5972         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5973                 cam_periph_invalidate(periph);
5974                 cam_periph_release(periph);
5975         } else {
5976                 probeschedule(periph);
5977         }
5978 }
5979
5980 static void
5981 probecleanup(struct cam_periph *periph)
5982 {
5983         kfree(periph->softc, M_TEMP);
5984 }
5985
5986 static void
5987 xpt_find_quirk(struct cam_ed *device)
5988 {
5989         caddr_t match;
5990
5991         match = cam_quirkmatch((caddr_t)&device->inq_data,
5992                                (caddr_t)xpt_quirk_table,
5993                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5994                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
5995
5996         if (match == NULL)
5997                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
5998
5999         device->quirk = (struct xpt_quirk_entry *)match;
6000 }
6001
6002 static int
6003 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6004 {
6005         int error, bool;
6006
6007         bool = cam_srch_hi;
6008         error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6009         if (error != 0 || req->newptr == NULL)
6010                 return (error);
6011         if (bool == 0 || bool == 1) {
6012                 cam_srch_hi = bool;
6013                 return (0);
6014         } else {
6015                 return (EINVAL);
6016         }
6017 }
6018
6019 #ifdef CAM_NEW_TRAN_CODE
6020
6021 static void
6022 xpt_devise_transport(struct cam_path *path)
6023 {
6024         struct ccb_pathinq cpi;
6025         struct ccb_trans_settings cts;
6026         struct scsi_inquiry_data *inq_buf;
6027
6028         /* Get transport information from the SIM */
6029         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6030         cpi.ccb_h.func_code = XPT_PATH_INQ;
6031         xpt_action((union ccb *)&cpi);
6032
6033         inq_buf = NULL;
6034         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6035                 inq_buf = &path->device->inq_data;
6036         path->device->protocol = PROTO_SCSI;
6037         path->device->protocol_version =
6038             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6039         path->device->transport = cpi.transport;
6040         path->device->transport_version = cpi.transport_version;
6041
6042         /*
6043          * Any device not using SPI3 features should
6044          * be considered SPI2 or lower.
6045          */
6046         if (inq_buf != NULL) {
6047                 if (path->device->transport == XPORT_SPI
6048                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
6049                  && path->device->transport_version > 2)
6050                         path->device->transport_version = 2;
6051         } else {
6052                 struct cam_ed* otherdev;
6053
6054                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6055                      otherdev != NULL;
6056                      otherdev = TAILQ_NEXT(otherdev, links)) {
6057                         if (otherdev != path->device)
6058                                 break;
6059                 }
6060
6061                 if (otherdev != NULL) {
6062                         /*
6063                          * Initially assume the same versioning as
6064                          * prior luns for this target.
6065                          */
6066                         path->device->protocol_version =
6067                             otherdev->protocol_version;
6068                         path->device->transport_version =
6069                             otherdev->transport_version;
6070                 } else {
6071                         /* Until we know better, opt for safty */
6072                         path->device->protocol_version = 2;
6073                         if (path->device->transport == XPORT_SPI)
6074                                 path->device->transport_version = 2;
6075                         else
6076                                 path->device->transport_version = 0;
6077                 }
6078         }
6079
6080         /*
6081          * XXX
6082          * For a device compliant with SPC-2 we should be able
6083          * to determine the transport version supported by
6084          * scrutinizing the version descriptors in the
6085          * inquiry buffer.
6086          */
6087
6088         /* Tell the controller what we think */
6089         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6090         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6091         cts.type = CTS_TYPE_CURRENT_SETTINGS;
6092         cts.transport = path->device->transport;
6093         cts.transport_version = path->device->transport_version;
6094         cts.protocol = path->device->protocol;
6095         cts.protocol_version = path->device->protocol_version;
6096         cts.proto_specific.valid = 0;
6097         cts.xport_specific.valid = 0;
6098         xpt_action((union ccb *)&cts);
6099 }
6100
6101 static void
6102 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6103                           int async_update)
6104 {
6105         struct  ccb_pathinq cpi;
6106         struct  ccb_trans_settings cur_cts;
6107         struct  ccb_trans_settings_scsi *scsi;
6108         struct  ccb_trans_settings_scsi *cur_scsi;
6109         struct  cam_sim *sim;
6110         struct  scsi_inquiry_data *inq_data;
6111
6112         if (device == NULL) {
6113                 cts->ccb_h.status = CAM_PATH_INVALID;
6114                 xpt_done((union ccb *)cts);
6115                 return;
6116         }
6117
6118         if (cts->protocol == PROTO_UNKNOWN
6119          || cts->protocol == PROTO_UNSPECIFIED) {
6120                 cts->protocol = device->protocol;
6121                 cts->protocol_version = device->protocol_version;
6122         }
6123
6124         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6125          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6126                 cts->protocol_version = device->protocol_version;
6127
6128         if (cts->protocol != device->protocol) {
6129                 xpt_print_path(cts->ccb_h.path);
6130                 printf("Uninitialized Protocol %x:%x?\n",
6131                        cts->protocol, device->protocol);
6132                 cts->protocol = device->protocol;
6133         }
6134
6135         if (cts->protocol_version > device->protocol_version) {
6136                 if (bootverbose) {
6137                         xpt_print_path(cts->ccb_h.path);
6138                         printf("Down reving Protocol Version from %d to %d?\n",
6139                                cts->protocol_version, device->protocol_version);
6140                 }
6141                 cts->protocol_version = device->protocol_version;
6142         }
6143
6144         if (cts->transport == XPORT_UNKNOWN
6145          || cts->transport == XPORT_UNSPECIFIED) {
6146                 cts->transport = device->transport;
6147                 cts->transport_version = device->transport_version;
6148         }
6149
6150         if (cts->transport_version == XPORT_VERSION_UNKNOWN
6151          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6152                 cts->transport_version = device->transport_version;
6153
6154         if (cts->transport != device->transport) {
6155                 xpt_print_path(cts->ccb_h.path);
6156                 printf("Uninitialized Transport %x:%x?\n",
6157                        cts->transport, device->transport);
6158                 cts->transport = device->transport;
6159         }
6160
6161         if (cts->transport_version > device->transport_version) {
6162                 if (bootverbose) {
6163                         xpt_print_path(cts->ccb_h.path);
6164                         printf("Down reving Transport Version from %d to %d?\n",
6165                                cts->transport_version,
6166                                device->transport_version);
6167                 }
6168                 cts->transport_version = device->transport_version;
6169         }
6170
6171         sim = cts->ccb_h.path->bus->sim;
6172
6173         /*
6174          * Nothing more of interest to do unless
6175          * this is a device connected via the
6176          * SCSI protocol.
6177          */
6178         if (cts->protocol != PROTO_SCSI) {
6179                 if (async_update == FALSE)
6180                         (*(sim->sim_action))(sim, (union ccb *)cts);
6181                 return;
6182         }
6183
6184         inq_data = &device->inq_data;
6185         scsi = &cts->proto_specific.scsi;
6186         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6187         cpi.ccb_h.func_code = XPT_PATH_INQ;
6188         xpt_action((union ccb *)&cpi);
6189
6190         /* SCSI specific sanity checking */
6191         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6192          || (inq_data->flags & SID_CmdQue) == 0
6193          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6194          || (device->quirk->mintags == 0)) {
6195                 /*
6196                  * Can't tag on hardware that doesn't support tags,
6197                  * doesn't have it enabled, or has broken tag support.
6198                  */
6199                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6200         }
6201
6202         if (async_update == FALSE) {
6203                 /*
6204                  * Perform sanity checking against what the
6205                  * controller and device can do.
6206                  */
6207                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6208                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6209                 cur_cts.type = cts->type;
6210                 xpt_action((union ccb *)&cur_cts);
6211
6212                 cur_scsi = &cur_cts.proto_specific.scsi;
6213                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6214                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6215                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6216                 }
6217                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6218                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6219         }
6220
6221         /* SPI specific sanity checking */
6222         if (cts->transport == XPORT_SPI && async_update == FALSE) {
6223                 u_int spi3caps;
6224                 struct ccb_trans_settings_spi *spi;
6225                 struct ccb_trans_settings_spi *cur_spi;
6226
6227                 spi = &cts->xport_specific.spi;
6228
6229                 cur_spi = &cur_cts.xport_specific.spi;
6230
6231                 /* Fill in any gaps in what the user gave us */
6232                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6233                         spi->sync_period = cur_spi->sync_period;
6234                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6235                         spi->sync_period = 0;
6236                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6237                         spi->sync_offset = cur_spi->sync_offset;
6238                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6239                         spi->sync_offset = 0;
6240                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6241                         spi->ppr_options = cur_spi->ppr_options;
6242                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6243                         spi->ppr_options = 0;
6244                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6245                         spi->bus_width = cur_spi->bus_width;
6246                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6247                         spi->bus_width = 0;
6248                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6249                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6250                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6251                 }
6252                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6253                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6254                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6255                   && (inq_data->flags & SID_Sync) == 0
6256                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6257                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6258                  || (cur_spi->sync_offset == 0)
6259                  || (cur_spi->sync_period == 0)) {
6260                         /* Force async */
6261                         spi->sync_period = 0;
6262                         spi->sync_offset = 0;
6263                 }
6264
6265                 switch (spi->bus_width) {
6266                 case MSG_EXT_WDTR_BUS_32_BIT:
6267                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6268                           || (inq_data->flags & SID_WBus32) != 0
6269                           || cts->type == CTS_TYPE_USER_SETTINGS)
6270                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6271                                 break;
6272                         /* Fall Through to 16-bit */
6273                 case MSG_EXT_WDTR_BUS_16_BIT:
6274                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6275                           || (inq_data->flags & SID_WBus16) != 0
6276                           || cts->type == CTS_TYPE_USER_SETTINGS)
6277                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6278                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6279                                 break;
6280                         }
6281                         /* Fall Through to 8-bit */
6282                 default: /* New bus width?? */
6283                 case MSG_EXT_WDTR_BUS_8_BIT:
6284                         /* All targets can do this */
6285                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6286                         break;
6287                 }
6288
6289                 spi3caps = cpi.xport_specific.spi.ppr_options;
6290                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6291                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6292                         spi3caps &= inq_data->spi3data;
6293
6294                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6295                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6296
6297                 if ((spi3caps & SID_SPI_IUS) == 0)
6298                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6299
6300                 if ((spi3caps & SID_SPI_QAS) == 0)
6301                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6302
6303                 /* No SPI Transfer settings are allowed unless we are wide */
6304                 if (spi->bus_width == 0)
6305                         spi->ppr_options = 0;
6306
6307                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6308                         /*
6309                          * Can't tag queue without disconnection.
6310                          */
6311                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6312                         scsi->valid |= CTS_SCSI_VALID_TQ;
6313                 }
6314
6315                 /*
6316                  * If we are currently performing tagged transactions to
6317                  * this device and want to change its negotiation parameters,
6318                  * go non-tagged for a bit to give the controller a chance to
6319                  * negotiate unhampered by tag messages.
6320                  */
6321                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6322                  && (device->inq_flags & SID_CmdQue) != 0
6323                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6324                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6325                                    CTS_SPI_VALID_SYNC_OFFSET|
6326                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
6327                         xpt_toggle_tags(cts->ccb_h.path);
6328         }
6329
6330         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6331          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6332                 int device_tagenb;
6333
6334                 /*
6335                  * If we are transitioning from tags to no-tags or
6336                  * vice-versa, we need to carefully freeze and restart
6337                  * the queue so that we don't overlap tagged and non-tagged
6338                  * commands.  We also temporarily stop tags if there is
6339                  * a change in transfer negotiation settings to allow
6340                  * "tag-less" negotiation.
6341                  */
6342                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6343                  || (device->inq_flags & SID_CmdQue) != 0)
6344                         device_tagenb = TRUE;
6345                 else
6346                         device_tagenb = FALSE;
6347
6348                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6349                   && device_tagenb == FALSE)
6350                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6351                   && device_tagenb == TRUE)) {
6352
6353                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6354                                 /*
6355                                  * Delay change to use tags until after a
6356                                  * few commands have gone to this device so
6357                                  * the controller has time to perform transfer
6358                                  * negotiations without tagged messages getting
6359                                  * in the way.
6360                                  */
6361                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6362                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6363                         } else {
6364                                 struct ccb_relsim crs;
6365
6366                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6367                                 device->inq_flags &= ~SID_CmdQue;
6368                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6369                                                     sim->max_dev_openings);
6370                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6371                                 device->tag_delay_count = 0;
6372
6373                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6374                                               /*priority*/1);
6375                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6376                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6377                                 crs.openings
6378                                     = crs.release_timeout
6379                                     = crs.qfrozen_cnt
6380                                     = 0;
6381                                 xpt_action((union ccb *)&crs);
6382                         }
6383                 }
6384         }
6385         if (async_update == FALSE)
6386                 (*(sim->sim_action))(sim, (union ccb *)cts);
6387 }
6388
6389 #else /* CAM_NEW_TRAN_CODE */
6390
6391 static void
6392 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6393                           int async_update)
6394 {
6395         struct  cam_sim *sim;
6396         int     qfrozen;
6397
6398         sim = cts->ccb_h.path->bus->sim;
6399         if (async_update == FALSE) {
6400                 struct  scsi_inquiry_data *inq_data;
6401                 struct  ccb_pathinq cpi;
6402                 struct  ccb_trans_settings cur_cts;
6403
6404                 if (device == NULL) {
6405                         cts->ccb_h.status = CAM_PATH_INVALID;
6406                         xpt_done((union ccb *)cts);
6407                         return;
6408                 }
6409
6410                 /*
6411                  * Perform sanity checking against what the
6412                  * controller and device can do.
6413                  */
6414                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6415                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6416                 xpt_action((union ccb *)&cpi);
6417                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6418                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6419                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6420                 xpt_action((union ccb *)&cur_cts);
6421                 inq_data = &device->inq_data;
6422
6423                 /* Fill in any gaps in what the user gave us */
6424                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6425                         cts->sync_period = cur_cts.sync_period;
6426                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6427                         cts->sync_offset = cur_cts.sync_offset;
6428                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6429                         cts->bus_width = cur_cts.bus_width;
6430                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6431                         cts->flags &= ~CCB_TRANS_DISC_ENB;
6432                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6433                 }
6434                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6435                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6436                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6437                 }
6438
6439                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6440                   && (inq_data->flags & SID_Sync) == 0)
6441                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6442                  || (cts->sync_offset == 0)
6443                  || (cts->sync_period == 0)) {
6444                         /* Force async */
6445                         cts->sync_period = 0;
6446                         cts->sync_offset = 0;
6447                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
6448
6449                         if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6450                          && cts->sync_period <= 0x9) {
6451                                 /*
6452                                  * Don't allow DT transmission rates if the
6453                                  * device does not support it.
6454                                  */
6455                                 cts->sync_period = 0xa;
6456                         }
6457                         if ((inq_data->spi3data & SID_SPI_IUS) == 0
6458                          && cts->sync_period <= 0x8) {
6459                                 /*
6460                                  * Don't allow PACE transmission rates
6461                                  * if the device does support packetized
6462                                  * transfers.
6463                                  */
6464                                 cts->sync_period = 0x9;
6465                         }
6466                 }
6467
6468                 switch (cts->bus_width) {
6469                 case MSG_EXT_WDTR_BUS_32_BIT:
6470                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6471                           || (inq_data->flags & SID_WBus32) != 0)
6472                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6473                                 break;
6474                         /* Fall Through to 16-bit */
6475                 case MSG_EXT_WDTR_BUS_16_BIT:
6476                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6477                           || (inq_data->flags & SID_WBus16) != 0)
6478                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6479                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6480                                 break;
6481                         }
6482                         /* Fall Through to 8-bit */
6483                 default: /* New bus width?? */
6484                 case MSG_EXT_WDTR_BUS_8_BIT:
6485                         /* All targets can do this */
6486                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6487                         break;
6488                 }
6489
6490                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6491                         /*
6492                          * Can't tag queue without disconnection.
6493                          */
6494                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6495                         cts->valid |= CCB_TRANS_TQ_VALID;
6496                 }
6497
6498                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6499                  || (inq_data->flags & SID_CmdQue) == 0
6500                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6501                  || (device->quirk->mintags == 0)) {
6502                         /*
6503                          * Can't tag on hardware that doesn't support,
6504                          * doesn't have it enabled, or has broken tag support.
6505                          */
6506                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6507                 }
6508         }
6509
6510         qfrozen = FALSE;
6511         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6512                 int device_tagenb;
6513
6514                 /*
6515                  * If we are transitioning from tags to no-tags or
6516                  * vice-versa, we need to carefully freeze and restart
6517                  * the queue so that we don't overlap tagged and non-tagged
6518                  * commands.  We also temporarily stop tags if there is
6519                  * a change in transfer negotiation settings to allow
6520                  * "tag-less" negotiation.
6521                  */
6522                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6523                  || (device->inq_flags & SID_CmdQue) != 0)
6524                         device_tagenb = TRUE;
6525                 else
6526                         device_tagenb = FALSE;
6527
6528                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6529                   && device_tagenb == FALSE)
6530                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6531                   && device_tagenb == TRUE)) {
6532
6533                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6534                                 /*
6535                                  * Delay change to use tags until after a
6536                                  * few commands have gone to this device so
6537                                  * the controller has time to perform transfer
6538                                  * negotiations without tagged messages getting
6539                                  * in the way.
6540                                  */
6541                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6542                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6543                         } else {
6544                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6545                                 qfrozen = TRUE;
6546                                 device->inq_flags &= ~SID_CmdQue;
6547                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6548                                                     sim->max_dev_openings);
6549                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6550                                 device->tag_delay_count = 0;
6551                         }
6552                 }
6553         }
6554
6555         if (async_update == FALSE) {
6556                 /*
6557                  * If we are currently performing tagged transactions to
6558                  * this device and want to change its negotiation parameters,
6559                  * go non-tagged for a bit to give the controller a chance to
6560                  * negotiate unhampered by tag messages.
6561                  */
6562                 if ((device->inq_flags & SID_CmdQue) != 0
6563                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6564                                    CCB_TRANS_SYNC_OFFSET_VALID|
6565                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6566                         xpt_toggle_tags(cts->ccb_h.path);
6567
6568                 (*(sim->sim_action))(sim, (union ccb *)cts);
6569         }
6570
6571         if (qfrozen) {
6572                 struct ccb_relsim crs;
6573
6574                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6575                               /*priority*/1);
6576                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6577                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6578                 crs.openings
6579                     = crs.release_timeout 
6580                     = crs.qfrozen_cnt
6581                     = 0;
6582                 xpt_action((union ccb *)&crs);
6583         }
6584 }
6585
6586
6587 #endif /* CAM_NEW_TRAN_CODE */
6588
6589 static void
6590 xpt_toggle_tags(struct cam_path *path)
6591 {
6592         struct cam_ed *dev;
6593
6594         /*
6595          * Give controllers a chance to renegotiate
6596          * before starting tag operations.  We
6597          * "toggle" tagged queuing off then on
6598          * which causes the tag enable command delay
6599          * counter to come into effect.
6600          */
6601         dev = path->device;
6602         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6603          || ((dev->inq_flags & SID_CmdQue) != 0
6604           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6605                 struct ccb_trans_settings cts;
6606
6607                 xpt_setup_ccb(&cts.ccb_h, path, 1);
6608 #ifdef CAM_NEW_TRAN_CODE
6609                 cts.protocol = PROTO_SCSI;
6610                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6611                 cts.transport = XPORT_UNSPECIFIED;
6612                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6613                 cts.proto_specific.scsi.flags = 0;
6614                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6615 #else /* CAM_NEW_TRAN_CODE */
6616                 cts.flags = 0;
6617                 cts.valid = CCB_TRANS_TQ_VALID;
6618 #endif /* CAM_NEW_TRAN_CODE */
6619                 xpt_set_transfer_settings(&cts, path->device,
6620                                           /*async_update*/TRUE);
6621 #ifdef CAM_NEW_TRAN_CODE
6622                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6623 #else /* CAM_NEW_TRAN_CODE */
6624                 cts.flags = CCB_TRANS_TAG_ENB;
6625 #endif /* CAM_NEW_TRAN_CODE */
6626                 xpt_set_transfer_settings(&cts, path->device,
6627                                           /*async_update*/TRUE);
6628         }
6629 }
6630
6631 static void
6632 xpt_start_tags(struct cam_path *path)
6633 {
6634         struct ccb_relsim crs;
6635         struct cam_ed *device;
6636         struct cam_sim *sim;
6637         int    newopenings;
6638
6639         device = path->device;
6640         sim = path->bus->sim;
6641         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6642         xpt_freeze_devq(path, /*count*/1);
6643         device->inq_flags |= SID_CmdQue;
6644         if (device->tag_saved_openings != 0)
6645                 newopenings = device->tag_saved_openings;
6646         else
6647                 newopenings = min(device->quirk->maxtags,
6648                                   sim->max_tagged_dev_openings);
6649         xpt_dev_ccbq_resize(path, newopenings);
6650         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6651         crs.ccb_h.func_code = XPT_REL_SIMQ;
6652         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6653         crs.openings
6654             = crs.release_timeout 
6655             = crs.qfrozen_cnt
6656             = 0;
6657         xpt_action((union ccb *)&crs);
6658 }
6659
6660 static int busses_to_config;
6661 static int busses_to_reset;
6662
6663 static int
6664 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6665 {
6666         if (bus->path_id != CAM_XPT_PATH_ID) {
6667                 struct cam_path path;
6668                 struct ccb_pathinq cpi;
6669                 int can_negotiate;
6670
6671                 busses_to_config++;
6672                 xpt_compile_path(&path, NULL, bus->path_id,
6673                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6674                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6675                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6676                 xpt_action((union ccb *)&cpi);
6677                 can_negotiate = cpi.hba_inquiry;
6678                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6679                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6680                  && can_negotiate)
6681                         busses_to_reset++;
6682                 xpt_release_path(&path);
6683         }
6684
6685         return(1);
6686 }
6687
6688 static int
6689 xptconfigfunc(struct cam_eb *bus, void *arg)
6690 {
6691         struct  cam_path *path;
6692         union   ccb *work_ccb;
6693
6694         if (bus->path_id != CAM_XPT_PATH_ID) {
6695                 cam_status status;
6696                 int can_negotiate;
6697
6698                 work_ccb = xpt_alloc_ccb();
6699                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6700                                               CAM_TARGET_WILDCARD,
6701                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6702                         kprintf("xptconfigfunc: xpt_create_path failed with "
6703                                "status %#x for bus %d\n", status, bus->path_id);
6704                         kprintf("xptconfigfunc: halting bus configuration\n");
6705                         xpt_free_ccb(work_ccb);
6706                         busses_to_config--;
6707                         xpt_finishconfig(xpt_periph, NULL);
6708                         return(0);
6709                 }
6710                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6711                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6712                 xpt_action(work_ccb);
6713                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6714                         kprintf("xptconfigfunc: CPI failed on bus %d "
6715                                "with status %d\n", bus->path_id,
6716                                work_ccb->ccb_h.status);
6717                         xpt_finishconfig(xpt_periph, work_ccb);
6718                         return(1);
6719                 }
6720
6721                 can_negotiate = work_ccb->cpi.hba_inquiry;
6722                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6723                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6724                  && (can_negotiate != 0)) {
6725                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6726                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6727                         work_ccb->ccb_h.cbfcnp = NULL;
6728                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6729                                   ("Resetting Bus\n"));
6730                         xpt_action(work_ccb);
6731                         xpt_finishconfig(xpt_periph, work_ccb);
6732                 } else {
6733                         /* Act as though we performed a successful BUS RESET */
6734                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6735                         xpt_finishconfig(xpt_periph, work_ccb);
6736                 }
6737         }
6738
6739         return(1);
6740 }
6741
6742 static void
6743 xpt_config(void *arg)
6744 {
6745         /*
6746          * Now that interrupts are enabled, go find our devices
6747          */
6748
6749 #ifdef CAMDEBUG
6750         /* Setup debugging flags and path */
6751 #ifdef CAM_DEBUG_FLAGS
6752         cam_dflags = CAM_DEBUG_FLAGS;
6753 #else /* !CAM_DEBUG_FLAGS */
6754         cam_dflags = CAM_DEBUG_NONE;
6755 #endif /* CAM_DEBUG_FLAGS */
6756 #ifdef CAM_DEBUG_BUS
6757         if (cam_dflags != CAM_DEBUG_NONE) {
6758                 if (xpt_create_path(&cam_dpath, xpt_periph,
6759                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6760                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6761                         kprintf("xpt_config: xpt_create_path() failed for debug"
6762                                " target %d:%d:%d, debugging disabled\n",
6763                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6764                         cam_dflags = CAM_DEBUG_NONE;
6765                 }
6766         } else
6767                 cam_dpath = NULL;
6768 #else /* !CAM_DEBUG_BUS */
6769         cam_dpath = NULL;
6770 #endif /* CAM_DEBUG_BUS */
6771 #endif /* CAMDEBUG */
6772
6773         /*
6774          * Scan all installed busses.
6775          */
6776         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6777
6778         if (busses_to_config == 0) {
6779                 /* Call manually because we don't have any busses */
6780                 xpt_finishconfig(xpt_periph, NULL);
6781         } else  {
6782                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6783                         kprintf("Waiting %d seconds for SCSI "
6784                                "devices to settle\n", scsi_delay/1000);
6785                 }
6786                 xpt_for_all_busses(xptconfigfunc, NULL);
6787         }
6788 }
6789
6790 /*
6791  * If the given device only has one peripheral attached to it, and if that
6792  * peripheral is the passthrough driver, announce it.  This insures that the
6793  * user sees some sort of announcement for every peripheral in their system.
6794  */
6795 static int
6796 xptpassannouncefunc(struct cam_ed *device, void *arg)
6797 {
6798         struct cam_periph *periph;
6799         int i;
6800
6801         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6802              periph = SLIST_NEXT(periph, periph_links), i++);
6803
6804         periph = SLIST_FIRST(&device->periphs);
6805         if ((i == 1)
6806          && (strncmp(periph->periph_name, "pass", 4) == 0))
6807                 xpt_announce_periph(periph, NULL);
6808
6809         return(1);
6810 }
6811
6812 static void
6813 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6814 {
6815         struct  periph_driver **p_drv;
6816         int     i;
6817
6818         if (done_ccb != NULL) {
6819                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6820                           ("xpt_finishconfig\n"));
6821                 switch(done_ccb->ccb_h.func_code) {
6822                 case XPT_RESET_BUS:
6823                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6824                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6825                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6826                                 done_ccb->crcn.flags = 0;
6827                                 xpt_action(done_ccb);
6828                                 return;
6829                         }
6830                         /* FALLTHROUGH */
6831                 case XPT_SCAN_BUS:
6832                 default:
6833                         xpt_free_path(done_ccb->ccb_h.path);
6834                         busses_to_config--;
6835                         break;
6836                 }
6837         }
6838
6839         if (busses_to_config == 0) {
6840                 /* Register all the peripheral drivers */
6841                 /* XXX This will have to change when we have loadable modules */
6842                 p_drv = periph_drivers;
6843                 for (i = 0; p_drv[i] != NULL; i++) {
6844                         (*p_drv[i]->init)();
6845                 }
6846
6847                 /*
6848                  * Check for devices with no "standard" peripheral driver
6849                  * attached.  For any devices like that, announce the
6850                  * passthrough driver so the user will see something.
6851                  */
6852                 xpt_for_all_devices(xptpassannouncefunc, NULL);
6853
6854                 /* Release our hook so that the boot can continue. */
6855                 config_intrhook_disestablish(xpt_config_hook);
6856                 kfree(xpt_config_hook, M_TEMP);
6857                 xpt_config_hook = NULL;
6858         }
6859         if (done_ccb != NULL)
6860                 xpt_free_ccb(done_ccb);
6861 }
6862
6863 static void
6864 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6865 {
6866         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6867
6868         switch (work_ccb->ccb_h.func_code) {
6869         /* Common cases first */
6870         case XPT_PATH_INQ:              /* Path routing inquiry */
6871         {
6872                 struct ccb_pathinq *cpi;
6873
6874                 cpi = &work_ccb->cpi;
6875                 cpi->version_num = 1; /* XXX??? */
6876                 cpi->hba_inquiry = 0;
6877                 cpi->target_sprt = 0;
6878                 cpi->hba_misc = 0;
6879                 cpi->hba_eng_cnt = 0;
6880                 cpi->max_target = 0;
6881                 cpi->max_lun = 0;
6882                 cpi->initiator_id = 0;
6883                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6884                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6885                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6886                 cpi->unit_number = sim->unit_number;
6887                 cpi->bus_id = sim->bus_id;
6888                 cpi->base_transfer_speed = 0;
6889 #ifdef CAM_NEW_TRAN_CODE
6890                 cpi->protocol = PROTO_UNSPECIFIED;
6891                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
6892                 cpi->transport = XPORT_UNSPECIFIED;
6893                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
6894 #endif /* CAM_NEW_TRAN_CODE */
6895                 cpi->ccb_h.status = CAM_REQ_CMP;
6896                 xpt_done(work_ccb);
6897                 break;
6898         }
6899         default:
6900                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6901                 xpt_done(work_ccb);
6902                 break;
6903         }
6904 }
6905
6906 /*
6907  * The xpt as a "controller" has no interrupt sources, so polling
6908  * is a no-op.
6909  */
6910 static void
6911 xptpoll(struct cam_sim *sim)
6912 {
6913 }
6914
6915 /*
6916  * Should only be called by the machine interrupt dispatch routines,
6917  * so put these prototypes here instead of in the header.
6918  */
6919
6920 static void
6921 swi_cambio(void *arg, void *frame)
6922 {
6923         camisr(&cam_bioq);
6924 }
6925
6926 static void
6927 camisr(cam_isrq_t *queue)
6928 {
6929         struct  ccb_hdr *ccb_h;
6930
6931         crit_enter();
6932         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6933                 int     runq;
6934
6935                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6936                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6937                 splz();
6938
6939                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6940                           ("camisr\n"));
6941
6942                 runq = FALSE;
6943
6944                 if (ccb_h->flags & CAM_HIGH_POWER) {
6945                         struct highpowerlist    *hphead;
6946                         struct cam_ed           *device;
6947                         union ccb               *send_ccb;
6948
6949                         hphead = &highpowerq;
6950
6951                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6952
6953                         /*
6954                          * Increment the count since this command is done.
6955                          */
6956                         num_highpower++;
6957
6958                         /* 
6959                          * Any high powered commands queued up?
6960                          */
6961                         if (send_ccb != NULL) {
6962                                 device = send_ccb->ccb_h.path->device;
6963
6964                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6965
6966                                 xpt_release_devq(send_ccb->ccb_h.path,
6967                                                  /*count*/1, /*runqueue*/TRUE);
6968                         }
6969                 }
6970                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6971                         struct cam_ed *dev;
6972
6973                         dev = ccb_h->path->device;
6974
6975                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6976
6977                         if (!SIM_DEAD(ccb_h->path->bus->sim)) {
6978                                 ccb_h->path->bus->sim->devq->send_active--;
6979                                 ccb_h->path->bus->sim->devq->send_openings++;
6980                         }
6981                         
6982                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6983                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
6984                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6985                           && (dev->ccbq.dev_active == 0))) {
6986                                 
6987                                 xpt_release_devq(ccb_h->path, /*count*/1,
6988                                                  /*run_queue*/TRUE);
6989                         }
6990
6991                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6992                          && (--dev->tag_delay_count == 0))
6993                                 xpt_start_tags(ccb_h->path);
6994
6995                         if ((dev->ccbq.queue.entries > 0)
6996                          && (dev->qfrozen_cnt == 0)
6997                          && (device_is_send_queued(dev) == 0)) {
6998                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6999                                                               dev);
7000                         }
7001                 }
7002
7003                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7004                         xpt_release_simq(ccb_h->path->bus->sim,
7005                                          /*run_queue*/TRUE);
7006                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
7007                         runq = FALSE;
7008                 } 
7009
7010                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7011                  && (ccb_h->status & CAM_DEV_QFRZN)) {
7012                         xpt_release_devq(ccb_h->path, /*count*/1,
7013                                          /*run_queue*/TRUE);
7014                         ccb_h->status &= ~CAM_DEV_QFRZN;
7015                 } else if (runq) {
7016                         xpt_run_dev_sendq(ccb_h->path->bus);
7017                 }
7018
7019                 /* Call the peripheral driver's callback */
7020                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7021         }
7022         crit_exit();
7023 }
7024
7025 static void
7026 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7027 {
7028
7029         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7030         xpt_done(ccb);
7031 }
7032
7033 static void
7034 dead_sim_poll(struct cam_sim *sim)
7035 {
7036 }