Handle some of the inquiry flags that have come into
[dragonfly.git] / sys / bus / cam / cam_xpt.c
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.58 2007/12/02 03:41:58 pavalos Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
48
49 #include <machine/clock.h>
50
51 #include "cam.h"
52 #include "cam_ccb.h"
53 #include "cam_periph.h"
54 #include "cam_sim.h"
55 #include "cam_xpt.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
59
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
63 #include "opt_cam.h"
64
65 /* Datastructures internal to the xpt layer */
66 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
67
68 /*
69  * Definition of an async handler callback block.  These are used to add
70  * SIMs and peripherals to the async callback lists.
71  */
72 struct async_node {
73         SLIST_ENTRY(async_node) links;
74         u_int32_t       event_enable;   /* Async Event enables */
75         void            (*callback)(void *arg, u_int32_t code,
76                                     struct cam_path *path, void *args);
77         void            *callback_arg;
78 };
79
80 SLIST_HEAD(async_list, async_node);
81 SLIST_HEAD(periph_list, cam_periph);
82 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
83
84 /*
85  * This is the maximum number of high powered commands (e.g. start unit)
86  * that can be outstanding at a particular time.
87  */
88 #ifndef CAM_MAX_HIGHPOWER
89 #define CAM_MAX_HIGHPOWER  4
90 #endif
91
92 /* number of high powered commands that can go through right now */
93 static int num_highpower = CAM_MAX_HIGHPOWER;
94
95 /*
96  * Structure for queueing a device in a run queue.
97  * There is one run queue for allocating new ccbs,
98  * and another for sending ccbs to the controller.
99  */
100 struct cam_ed_qinfo {
101         cam_pinfo pinfo;
102         struct    cam_ed *device;
103 };
104
105 /*
106  * The CAM EDT (Existing Device Table) contains the device information for
107  * all devices for all busses in the system.  The table contains a
108  * cam_ed structure for each device on the bus.
109  */
110 struct cam_ed {
111         TAILQ_ENTRY(cam_ed) links;
112         struct  cam_ed_qinfo alloc_ccb_entry;
113         struct  cam_ed_qinfo send_ccb_entry;
114         struct  cam_et   *target;
115         lun_id_t         lun_id;
116         struct  camq drvq;              /*
117                                          * Queue of type drivers wanting to do
118                                          * work on this device.
119                                          */
120         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
121         struct  async_list asyncs;      /* Async callback info for this B/T/L */
122         struct  periph_list periphs;    /* All attached devices */
123         u_int   generation;             /* Generation number */
124         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
125         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
126                                         /* Storage for the inquiry data */
127 #ifdef CAM_NEW_TRAN_CODE
128         cam_proto        protocol;
129         u_int            protocol_version;
130         cam_xport        transport;
131         u_int            transport_version;
132 #endif /* CAM_NEW_TRAN_CODE */
133         struct           scsi_inquiry_data inq_data;
134         u_int8_t         inq_flags;     /*
135                                          * Current settings for inquiry flags.
136                                          * This allows us to override settings
137                                          * like disconnection and tagged
138                                          * queuing for a device.
139                                          */
140         u_int8_t         queue_flags;   /* Queue flags from the control page */
141         u_int8_t         serial_num_len;
142         u_int8_t        *serial_num;
143         u_int32_t        qfrozen_cnt;
144         u_int32_t        flags;
145 #define CAM_DEV_UNCONFIGURED            0x01
146 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
147 #define CAM_DEV_REL_ON_COMPLETE         0x04
148 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
149 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
150 #define CAM_DEV_TAG_AFTER_COUNT         0x20
151 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
152         u_int32_t        tag_delay_count;
153 #define CAM_TAG_DELAY_COUNT             5
154         u_int32_t        tag_saved_openings;
155         u_int32_t        refcount;
156         struct           callout c_handle;
157 };
158
159 /*
160  * Each target is represented by an ET (Existing Target).  These
161  * entries are created when a target is successfully probed with an
162  * identify, and removed when a device fails to respond after a number
163  * of retries, or a bus rescan finds the device missing.
164  */
165 struct cam_et { 
166         TAILQ_HEAD(, cam_ed) ed_entries;
167         TAILQ_ENTRY(cam_et) links;
168         struct  cam_eb  *bus;   
169         target_id_t     target_id;
170         u_int32_t       refcount;       
171         u_int           generation;
172         struct          timeval last_reset;     /* uptime of last reset */
173 };
174
175 /*
176  * Each bus is represented by an EB (Existing Bus).  These entries
177  * are created by calls to xpt_bus_register and deleted by calls to
178  * xpt_bus_deregister.
179  */
180 struct cam_eb { 
181         TAILQ_HEAD(, cam_et) et_entries;
182         TAILQ_ENTRY(cam_eb)  links;
183         path_id_t            path_id;
184         struct cam_sim       *sim;
185         struct timeval       last_reset;        /* uptime of last reset */
186         u_int32_t            flags;
187 #define CAM_EB_RUNQ_SCHEDULED   0x01
188         u_int32_t            refcount;
189         u_int                generation;
190 };
191
192 struct cam_path {
193         struct cam_periph *periph;
194         struct cam_eb     *bus;
195         struct cam_et     *target;
196         struct cam_ed     *device;
197 };
198
199 struct xpt_quirk_entry {
200         struct scsi_inquiry_pattern inq_pat;
201         u_int8_t quirks;
202 #define CAM_QUIRK_NOLUNS        0x01
203 #define CAM_QUIRK_NOSERIAL      0x02
204 #define CAM_QUIRK_HILUNS        0x04
205 #define CAM_QUIRK_NOHILUNS      0x08
206         u_int mintags;
207         u_int maxtags;
208 };
209
210 static int cam_srch_hi = 0;
211 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
212 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
213 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
214     sysctl_cam_search_luns, "I",
215     "allow search above LUN 7 for SCSI3 and greater devices");
216
217 #define CAM_SCSI2_MAXLUN        8
218 /*
219  * If we're not quirked to search <= the first 8 luns
220  * and we are either quirked to search above lun 8,
221  * or we're > SCSI-2 and we've enabled hilun searching,
222  * or we're > SCSI-2 and the last lun was a success,
223  * we can look for luns above lun 8.
224  */
225 #define CAN_SRCH_HI_SPARSE(dv)                          \
226   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
227   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
228   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
229
230 #define CAN_SRCH_HI_DENSE(dv)                           \
231   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
232   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
233   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
234
235 typedef enum {
236         XPT_FLAG_OPEN           = 0x01
237 } xpt_flags;
238
239 struct xpt_softc {
240         xpt_flags       flags;
241         u_int32_t       generation;
242 };
243
244 static const char quantum[] = "QUANTUM";
245 static const char sony[] = "SONY";
246 static const char west_digital[] = "WDIGTL";
247 static const char samsung[] = "SAMSUNG";
248 static const char seagate[] = "SEAGATE";
249 static const char microp[] = "MICROP";
250
251 static struct xpt_quirk_entry xpt_quirk_table[] = 
252 {
253         {
254                 /* Reports QUEUE FULL for temporary resource shortages */
255                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
256                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
257         },
258         {
259                 /* Reports QUEUE FULL for temporary resource shortages */
260                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
261                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
262         },
263         {
264                 /* Reports QUEUE FULL for temporary resource shortages */
265                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
266                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
267         },
268         {
269                 /* Broken tagged queuing drive */
270                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
271                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
272         },
273         {
274                 /* Broken tagged queuing drive */
275                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
276                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
277         },
278         {
279                 /* Broken tagged queuing drive */
280                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
281                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
282         },
283         {
284                 /*
285                  * Unfortunately, the Quantum Atlas III has the same
286                  * problem as the Atlas II drives above.
287                  * Reported by: "Johan Granlund" <johan@granlund.nu>
288                  *
289                  * For future reference, the drive with the problem was:
290                  * QUANTUM QM39100TD-SW N1B0
291                  * 
292                  * It's possible that Quantum will fix the problem in later
293                  * firmware revisions.  If that happens, the quirk entry
294                  * will need to be made specific to the firmware revisions
295                  * with the problem.
296                  * 
297                  */
298                 /* Reports QUEUE FULL for temporary resource shortages */
299                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
300                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
301         },
302         {
303                 /*
304                  * 18 Gig Atlas III, same problem as the 9G version.
305                  * Reported by: Andre Albsmeier
306                  *              <andre.albsmeier@mchp.siemens.de>
307                  *
308                  * For future reference, the drive with the problem was:
309                  * QUANTUM QM318000TD-S N491
310                  */
311                 /* Reports QUEUE FULL for temporary resource shortages */
312                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
313                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
314         },
315         {
316                 /*
317                  * Broken tagged queuing drive
318                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
319                  *         and: Martin Renters <martin@tdc.on.ca>
320                  */
321                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
322                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
323         },
324                 /*
325                  * The Seagate Medalist Pro drives have very poor write
326                  * performance with anything more than 2 tags.
327                  * 
328                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
329                  * Drive:  <SEAGATE ST36530N 1444>
330                  *
331                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
332                  * Drive:  <SEAGATE ST34520W 1281>
333                  *
334                  * No one has actually reported that the 9G version
335                  * (ST39140*) of the Medalist Pro has the same problem, but
336                  * we're assuming that it does because the 4G and 6.5G
337                  * versions of the drive are broken.
338                  */
339         {
340                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
341                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
342         },
343         {
344                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
345                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
346         },
347         {
348                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
349                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
350         },
351         {
352                 /*
353                  * Slow when tagged queueing is enabled.  Write performance
354                  * steadily drops off with more and more concurrent
355                  * transactions.  Best sequential write performance with
356                  * tagged queueing turned off and write caching turned on.
357                  *
358                  * PR:  kern/10398
359                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
360                  * Drive:  DCAS-34330 w/ "S65A" firmware.
361                  *
362                  * The drive with the problem had the "S65A" firmware
363                  * revision, and has also been reported (by Stephen J.
364                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
365                  * firmware revision.
366                  *
367                  * Although no one has reported problems with the 2 gig
368                  * version of the DCAS drive, the assumption is that it
369                  * has the same problems as the 4 gig version.  Therefore
370                  * this quirk entries disables tagged queueing for all
371                  * DCAS drives.
372                  */
373                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
374                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
375         },
376         {
377                 /* Broken tagged queuing drive */
378                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
379                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
380         },
381         {
382                 /* Broken tagged queuing drive */ 
383                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
384                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
385         },
386         {
387                 /*
388                  * Broken tagged queuing drive.
389                  * Submitted by:
390                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
391                  * in PR kern/9535
392                  */
393                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
394                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
395         },
396         {
397                 /*
398                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
399                  * 8MB/sec.)
400                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
401                  * Best performance with these drives is achieved with
402                  * tagged queueing turned off, and write caching turned on.
403                  */
404                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
405                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
406         },
407         {
408                 /*
409                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
410                  * 8MB/sec.)
411                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
412                  * Best performance with these drives is achieved with
413                  * tagged queueing turned off, and write caching turned on.
414                  */
415                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
416                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
417         },
418         {
419                 /*
420                  * Doesn't handle queue full condition correctly,
421                  * so we need to limit maxtags to what the device
422                  * can handle instead of determining this automatically.
423                  */
424                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
425                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
426         },
427         {
428                 /* Really only one LUN */
429                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
430                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
431         },
432         {
433                 /* I can't believe we need a quirk for DPT volumes. */
434                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
435                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
436                 /*mintags*/0, /*maxtags*/255
437         },
438         {
439                 /*
440                  * Many Sony CDROM drives don't like multi-LUN probing.
441                  */
442                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
443                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
444         },
445         {
446                 /*
447                  * This drive doesn't like multiple LUN probing.
448                  * Submitted by:  Parag Patel <parag@cgt.com>
449                  */
450                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
451                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
452         },
453         {
454                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
455                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
456         },
457         {
458                 /*
459                  * The 8200 doesn't like multi-lun probing, and probably
460                  * don't like serial number requests either.
461                  */
462                 {
463                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
464                         "EXB-8200*", "*"
465                 },
466                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
467         },
468         {
469                 /*
470                  * Let's try the same as above, but for a drive that says
471                  * it's an IPL-6860 but is actually an EXB 8200.
472                  */
473                 {
474                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
475                         "IPL-6860*", "*"
476                 },
477                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
478         },
479         {
480                 /*
481                  * These Hitachi drives don't like multi-lun probing.
482                  * The PR submitter has a DK319H, but says that the Linux
483                  * kernel has a similar work-around for the DK312 and DK314,
484                  * so all DK31* drives are quirked here.
485                  * PR:            misc/18793
486                  * Submitted by:  Paul Haddad <paul@pth.com>
487                  */
488                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
489                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
490         },
491         {
492                 /*
493                  * This old revision of the TDC3600 is also SCSI-1, and
494                  * hangs upon serial number probing.
495                  */
496                 {
497                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
498                         " TDC 3600", "U07:"
499                 },
500                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
501         },
502         {
503                 /*
504                  * Would repond to all LUNs if asked for.
505                  */
506                 {
507                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
508                         "CP150", "*"
509                 },
510                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
511         },
512         {
513                 /*
514                  * Would repond to all LUNs if asked for.
515                  */
516                 {
517                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
518                         "96X2*", "*"
519                 },
520                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
521         },
522         {
523                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
524                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
525                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
526         },
527         {
528                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
529                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
530                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
531         },
532         {
533                 /* TeraSolutions special settings for TRC-22 RAID */
534                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
535                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
536         },
537         {
538                 /* Veritas Storage Appliance */
539                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
540                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
541         },
542         {
543                 /*
544                  * Would respond to all LUNs.  Device type and removable
545                  * flag are jumper-selectable.
546                  */
547                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
548                   "Tahiti 1", "*"
549                 },
550                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
551         },
552         {
553                 /* Default tagged queuing parameters for all devices */
554                 {
555                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
556                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
557                 },
558                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
559         },
560 };
561
562 static const int xpt_quirk_table_size =
563         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
564
565 typedef enum {
566         DM_RET_COPY             = 0x01,
567         DM_RET_FLAG_MASK        = 0x0f,
568         DM_RET_NONE             = 0x00,
569         DM_RET_STOP             = 0x10,
570         DM_RET_DESCEND          = 0x20,
571         DM_RET_ERROR            = 0x30,
572         DM_RET_ACTION_MASK      = 0xf0
573 } dev_match_ret;
574
575 typedef enum {
576         XPT_DEPTH_BUS,
577         XPT_DEPTH_TARGET,
578         XPT_DEPTH_DEVICE,
579         XPT_DEPTH_PERIPH
580 } xpt_traverse_depth;
581
582 struct xpt_traverse_config {
583         xpt_traverse_depth      depth;
584         void                    *tr_func;
585         void                    *tr_arg;
586 };
587
588 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
589 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
590 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
591 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
592 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
593
594 /* Transport layer configuration information */
595 static struct xpt_softc xsoftc;
596
597 /* Queues for our software interrupt handler */
598 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
599 static cam_isrq_t cam_bioq;
600
601 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
602 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
603 static u_int xpt_max_ccbs;      /*
604                                  * Maximum size of ccb pool.  Modified as
605                                  * devices are added/removed or have their
606                                  * opening counts changed.
607                                  */
608 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
609
610 struct cam_periph *xpt_periph;
611
612 static periph_init_t xpt_periph_init;
613
614 static periph_init_t probe_periph_init;
615
616 static struct periph_driver xpt_driver =
617 {
618         xpt_periph_init, "xpt",
619         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
620 };
621
622 static struct periph_driver probe_driver =
623 {
624         probe_periph_init, "probe",
625         TAILQ_HEAD_INITIALIZER(probe_driver.units)
626 };
627
628 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
629 PERIPHDRIVER_DECLARE(probe, probe_driver);
630
631 #define XPT_CDEV_MAJOR 104
632
633 static d_open_t xptopen;
634 static d_close_t xptclose;
635 static d_ioctl_t xptioctl;
636
637 static struct dev_ops xpt_ops = {
638         { "xpt", XPT_CDEV_MAJOR, 0 },
639         .d_open = xptopen,
640         .d_close = xptclose,
641         .d_ioctl = xptioctl
642 };
643
644 static struct intr_config_hook *xpt_config_hook;
645
646 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
647 static void dead_sim_poll(struct cam_sim *sim);
648
649 /* Dummy SIM that is used when the real one has gone. */
650 static struct cam_sim cam_dead_sim = {
651         .sim_action =   dead_sim_action,
652         .sim_poll =     dead_sim_poll,
653         .sim_name =     "dead_sim",
654 };
655
656 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
657
658 /* Registered busses */
659 static TAILQ_HEAD(,cam_eb) xpt_busses;
660 static u_int bus_generation;
661
662 /* Storage for debugging datastructures */
663 #ifdef  CAMDEBUG
664 struct cam_path *cam_dpath;
665 u_int32_t cam_dflags;
666 u_int32_t cam_debug_delay;
667 #endif
668
669 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
670 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
671 #endif
672
673 /*
674  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
675  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
676  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
677  */
678 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
679     || defined(CAM_DEBUG_LUN)
680 #ifdef CAMDEBUG
681 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
682     || !defined(CAM_DEBUG_LUN)
683 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
684         and CAM_DEBUG_LUN"
685 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
686 #else /* !CAMDEBUG */
687 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
688 #endif /* CAMDEBUG */
689 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
690
691 /* Our boot-time initialization hook */
692 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
693
694 static moduledata_t cam_moduledata = {
695         "cam",
696         cam_module_event_handler,
697         NULL
698 };
699
700 static void     xpt_init(void *);
701
702 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
703 MODULE_VERSION(cam, 1);
704
705
706 static cam_status       xpt_compile_path(struct cam_path *new_path,
707                                          struct cam_periph *perph,
708                                          path_id_t path_id,
709                                          target_id_t target_id,
710                                          lun_id_t lun_id);
711
712 static void             xpt_release_path(struct cam_path *path);
713
714 static void             xpt_async_bcast(struct async_list *async_head,
715                                         u_int32_t async_code,
716                                         struct cam_path *path,
717                                         void *async_arg);
718 static void             xpt_dev_async(u_int32_t async_code,
719                                       struct cam_eb *bus,
720                                       struct cam_et *target,
721                                       struct cam_ed *device,
722                                       void *async_arg);
723 static path_id_t xptnextfreepathid(void);
724 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
725 static union ccb *xpt_get_ccb(struct cam_ed *device);
726 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
727                                   u_int32_t new_priority);
728 static void      xpt_run_dev_allocq(struct cam_eb *bus);
729 static void      xpt_run_dev_sendq(struct cam_eb *bus);
730 static timeout_t xpt_release_devq_timeout;
731 static void      xpt_release_bus(struct cam_eb *bus);
732 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
733                                          int run_queue);
734 static struct cam_et*
735                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
736 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
737 static struct cam_ed*
738                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
739                                   lun_id_t lun_id);
740 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
741                                     struct cam_ed *device);
742 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
743 static struct cam_eb*
744                  xpt_find_bus(path_id_t path_id);
745 static struct cam_et*
746                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
747 static struct cam_ed*
748                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
749 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
750 static void      xpt_scan_lun(struct cam_periph *periph,
751                               struct cam_path *path, cam_flags flags,
752                               union ccb *ccb);
753 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
754 static xpt_busfunc_t    xptconfigbuscountfunc;
755 static xpt_busfunc_t    xptconfigfunc;
756 static void      xpt_config(void *arg);
757 static xpt_devicefunc_t xptpassannouncefunc;
758 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
759 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
760 static void      xptpoll(struct cam_sim *sim);
761 static inthand2_t swi_cambio;
762 static void      camisr(cam_isrq_t *queue);
763 #if 0
764 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
765 static void      xptasync(struct cam_periph *periph,
766                           u_int32_t code, cam_path *path);
767 #endif
768 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
769                                     u_int num_patterns, struct cam_eb *bus);
770 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
771                                        u_int num_patterns,
772                                        struct cam_ed *device);
773 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
774                                        u_int num_patterns,
775                                        struct cam_periph *periph);
776 static xpt_busfunc_t    xptedtbusfunc;
777 static xpt_targetfunc_t xptedttargetfunc;
778 static xpt_devicefunc_t xptedtdevicefunc;
779 static xpt_periphfunc_t xptedtperiphfunc;
780 static xpt_pdrvfunc_t   xptplistpdrvfunc;
781 static xpt_periphfunc_t xptplistperiphfunc;
782 static int              xptedtmatch(struct ccb_dev_match *cdm);
783 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
784 static int              xptbustraverse(struct cam_eb *start_bus,
785                                        xpt_busfunc_t *tr_func, void *arg);
786 static int              xpttargettraverse(struct cam_eb *bus,
787                                           struct cam_et *start_target,
788                                           xpt_targetfunc_t *tr_func, void *arg);
789 static int              xptdevicetraverse(struct cam_et *target,
790                                           struct cam_ed *start_device,
791                                           xpt_devicefunc_t *tr_func, void *arg);
792 static int              xptperiphtraverse(struct cam_ed *device,
793                                           struct cam_periph *start_periph,
794                                           xpt_periphfunc_t *tr_func, void *arg);
795 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
796                                         xpt_pdrvfunc_t *tr_func, void *arg);
797 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
798                                             struct cam_periph *start_periph,
799                                             xpt_periphfunc_t *tr_func,
800                                             void *arg);
801 static xpt_busfunc_t    xptdefbusfunc;
802 static xpt_targetfunc_t xptdeftargetfunc;
803 static xpt_devicefunc_t xptdefdevicefunc;
804 static xpt_periphfunc_t xptdefperiphfunc;
805 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
806 #ifdef notusedyet
807 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
808                                             void *arg);
809 #endif
810 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
811                                             void *arg);
812 #ifdef notusedyet
813 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
814                                             void *arg);
815 #endif
816 static xpt_devicefunc_t xptsetasyncfunc;
817 static xpt_busfunc_t    xptsetasyncbusfunc;
818 static cam_status       xptregister(struct cam_periph *periph,
819                                     void *arg);
820 static cam_status       proberegister(struct cam_periph *periph,
821                                       void *arg);
822 static void      probeschedule(struct cam_periph *probe_periph);
823 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
824 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
825 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
826 static void      probecleanup(struct cam_periph *periph);
827 static void      xpt_find_quirk(struct cam_ed *device);
828 #ifdef CAM_NEW_TRAN_CODE
829 static void      xpt_devise_transport(struct cam_path *path);
830 #endif /* CAM_NEW_TRAN_CODE */
831 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
832                                            struct cam_ed *device,
833                                            int async_update);
834 static void      xpt_toggle_tags(struct cam_path *path);
835 static void      xpt_start_tags(struct cam_path *path);
836 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
837                                             struct cam_ed *dev);
838 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
839                                            struct cam_ed *dev);
840 static __inline int periph_is_queued(struct cam_periph *periph);
841 static __inline int device_is_alloc_queued(struct cam_ed *device);
842 static __inline int device_is_send_queued(struct cam_ed *device);
843 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
844
845 static __inline int
846 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
847 {
848         int retval;
849
850         if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
851                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
852                         cam_ccbq_resize(&dev->ccbq,
853                                         dev->ccbq.dev_openings
854                                         + dev->ccbq.dev_active);
855                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
856                 }
857                 /*
858                  * The priority of a device waiting for CCB resources
859                  * is that of the the highest priority peripheral driver
860                  * enqueued.
861                  */
862                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
863                                           &dev->alloc_ccb_entry.pinfo,
864                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
865         } else {
866                 retval = 0;
867         }
868
869         return (retval);
870 }
871
872 static __inline int
873 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
874 {
875         int     retval;
876
877         if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
878                 /*
879                  * The priority of a device waiting for controller
880                  * resources is that of the the highest priority CCB
881                  * enqueued.
882                  */
883                 retval =
884                     xpt_schedule_dev(&bus->sim->devq->send_queue,
885                                      &dev->send_ccb_entry.pinfo,
886                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
887         } else {
888                 retval = 0;
889         }
890         return (retval);
891 }
892
893 static __inline int
894 periph_is_queued(struct cam_periph *periph)
895 {
896         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
897 }
898
899 static __inline int
900 device_is_alloc_queued(struct cam_ed *device)
901 {
902         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
903 }
904
905 static __inline int
906 device_is_send_queued(struct cam_ed *device)
907 {
908         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
909 }
910
911 static __inline int
912 dev_allocq_is_runnable(struct cam_devq *devq)
913 {
914         /*
915          * Have work to do.
916          * Have space to do more work.
917          * Allowed to do work.
918          */
919         return ((devq->alloc_queue.qfrozen_cnt == 0)
920              && (devq->alloc_queue.entries > 0)
921              && (devq->alloc_openings > 0));
922 }
923
924 static void
925 xpt_periph_init(void)
926 {
927         dev_ops_add(&xpt_ops, 0, 0);
928         make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
929 }
930
931 static void
932 probe_periph_init(void)
933 {
934 }
935
936
937 static void
938 xptdone(struct cam_periph *periph, union ccb *done_ccb)
939 {
940         /* Caller will release the CCB */
941         wakeup(&done_ccb->ccb_h.cbfcnp);
942 }
943
944 static int
945 xptopen(struct dev_open_args *ap)
946 {
947         cdev_t dev = ap->a_head.a_dev;
948         int unit;
949
950         unit = minor(dev) & 0xff;
951
952         /*
953          * Only allow read-write access.
954          */
955         if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
956                 return(EPERM);
957
958         /*
959          * We don't allow nonblocking access.
960          */
961         if ((ap->a_oflags & O_NONBLOCK) != 0) {
962                 kprintf("xpt%d: can't do nonblocking access\n", unit);
963                 return(ENODEV);
964         }
965
966         /*
967          * We only have one transport layer right now.  If someone accesses
968          * us via something other than minor number 1, point out their
969          * mistake.
970          */
971         if (unit != 0) {
972                 kprintf("xptopen: got invalid xpt unit %d\n", unit);
973                 return(ENXIO);
974         }
975
976         /* Mark ourselves open */
977         xsoftc.flags |= XPT_FLAG_OPEN;
978         
979         return(0);
980 }
981
982 static int
983 xptclose(struct dev_close_args *ap)
984 {
985         cdev_t dev = ap->a_head.a_dev;
986         int unit;
987
988         unit = minor(dev) & 0xff;
989
990         /*
991          * We only have one transport layer right now.  If someone accesses
992          * us via something other than minor number 1, point out their
993          * mistake.
994          */
995         if (unit != 0) {
996                 kprintf("xptclose: got invalid xpt unit %d\n", unit);
997                 return(ENXIO);
998         }
999
1000         /* Mark ourselves closed */
1001         xsoftc.flags &= ~XPT_FLAG_OPEN;
1002
1003         return(0);
1004 }
1005
1006 static int
1007 xptioctl(struct dev_ioctl_args *ap)
1008 {
1009         cdev_t dev = ap->a_head.a_dev;
1010         int unit, error;
1011
1012         error = 0;
1013         unit = minor(dev) & 0xff;
1014
1015         /*
1016          * We only have one transport layer right now.  If someone accesses
1017          * us via something other than minor number 1, point out their
1018          * mistake.
1019          */
1020         if (unit != 0) {
1021                 kprintf("xptioctl: got invalid xpt unit %d\n", unit);
1022                 return(ENXIO);
1023         }
1024
1025         switch(ap->a_cmd) {
1026         /*
1027          * For the transport layer CAMIOCOMMAND ioctl, we really only want
1028          * to accept CCB types that don't quite make sense to send through a
1029          * passthrough driver.
1030          */
1031         case CAMIOCOMMAND: {
1032                 union ccb *ccb;
1033                 union ccb *inccb;
1034
1035                 inccb = (union ccb *)ap->a_data;
1036
1037                 switch(inccb->ccb_h.func_code) {
1038                 case XPT_SCAN_BUS:
1039                 case XPT_RESET_BUS:
1040                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1041                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1042                                 error = EINVAL;
1043                                 break;
1044                         }
1045                         /* FALLTHROUGH */
1046                 case XPT_PATH_INQ:
1047                 case XPT_ENG_INQ:
1048                 case XPT_SCAN_LUN:
1049
1050                         ccb = xpt_alloc_ccb();
1051
1052                         /*
1053                          * Create a path using the bus, target, and lun the
1054                          * user passed in.
1055                          */
1056                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1057                                             inccb->ccb_h.path_id,
1058                                             inccb->ccb_h.target_id,
1059                                             inccb->ccb_h.target_lun) !=
1060                                             CAM_REQ_CMP){
1061                                 error = EINVAL;
1062                                 xpt_free_ccb(ccb);
1063                                 break;
1064                         }
1065                         /* Ensure all of our fields are correct */
1066                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1067                                       inccb->ccb_h.pinfo.priority);
1068                         xpt_merge_ccb(ccb, inccb);
1069                         ccb->ccb_h.cbfcnp = xptdone;
1070                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1071                         bcopy(ccb, inccb, sizeof(union ccb));
1072                         xpt_free_path(ccb->ccb_h.path);
1073                         xpt_free_ccb(ccb);
1074                         break;
1075
1076                 case XPT_DEBUG: {
1077                         union ccb ccb;
1078
1079                         /*
1080                          * This is an immediate CCB, so it's okay to
1081                          * allocate it on the stack.
1082                          */
1083
1084                         /*
1085                          * Create a path using the bus, target, and lun the
1086                          * user passed in.
1087                          */
1088                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1089                                             inccb->ccb_h.path_id,
1090                                             inccb->ccb_h.target_id,
1091                                             inccb->ccb_h.target_lun) !=
1092                                             CAM_REQ_CMP){
1093                                 error = EINVAL;
1094                                 break;
1095                         }
1096                         /* Ensure all of our fields are correct */
1097                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1098                                       inccb->ccb_h.pinfo.priority);
1099                         xpt_merge_ccb(&ccb, inccb);
1100                         ccb.ccb_h.cbfcnp = xptdone;
1101                         xpt_action(&ccb);
1102                         bcopy(&ccb, inccb, sizeof(union ccb));
1103                         xpt_free_path(ccb.ccb_h.path);
1104                         break;
1105
1106                 }
1107                 case XPT_DEV_MATCH: {
1108                         struct cam_periph_map_info mapinfo;
1109                         struct cam_path *old_path;
1110
1111                         /*
1112                          * We can't deal with physical addresses for this
1113                          * type of transaction.
1114                          */
1115                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1116                                 error = EINVAL;
1117                                 break;
1118                         }
1119
1120                         /*
1121                          * Save this in case the caller had it set to
1122                          * something in particular.
1123                          */
1124                         old_path = inccb->ccb_h.path;
1125
1126                         /*
1127                          * We really don't need a path for the matching
1128                          * code.  The path is needed because of the
1129                          * debugging statements in xpt_action().  They
1130                          * assume that the CCB has a valid path.
1131                          */
1132                         inccb->ccb_h.path = xpt_periph->path;
1133
1134                         bzero(&mapinfo, sizeof(mapinfo));
1135
1136                         /*
1137                          * Map the pattern and match buffers into kernel
1138                          * virtual address space.
1139                          */
1140                         error = cam_periph_mapmem(inccb, &mapinfo);
1141
1142                         if (error) {
1143                                 inccb->ccb_h.path = old_path;
1144                                 break;
1145                         }
1146
1147                         /*
1148                          * This is an immediate CCB, we can send it on directly.
1149                          */
1150                         xpt_action(inccb);
1151
1152                         /*
1153                          * Map the buffers back into user space.
1154                          */
1155                         cam_periph_unmapmem(inccb, &mapinfo);
1156
1157                         inccb->ccb_h.path = old_path;
1158
1159                         error = 0;
1160                         break;
1161                 }
1162                 default:
1163                         error = ENOTSUP;
1164                         break;
1165                 }
1166                 break;
1167         }
1168         /*
1169          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1170          * with the periphal driver name and unit name filled in.  The other
1171          * fields don't really matter as input.  The passthrough driver name
1172          * ("pass"), and unit number are passed back in the ccb.  The current
1173          * device generation number, and the index into the device peripheral
1174          * driver list, and the status are also passed back.  Note that
1175          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1176          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1177          * (or rather should be) impossible for the device peripheral driver
1178          * list to change since we look at the whole thing in one pass, and
1179          * we do it within a critical section.
1180          * 
1181          */
1182         case CAMGETPASSTHRU: {
1183                 union ccb *ccb;
1184                 struct cam_periph *periph;
1185                 struct periph_driver **p_drv;
1186                 char   *name;
1187                 u_int unit;
1188                 u_int cur_generation;
1189                 int base_periph_found;
1190                 int splbreaknum;
1191
1192                 ccb = (union ccb *)ap->a_data;
1193                 unit = ccb->cgdl.unit_number;
1194                 name = ccb->cgdl.periph_name;
1195                 /*
1196                  * Every 100 devices, we want to call splz() to check for
1197                  * and allow the software interrupt handler a chance to run.
1198                  *
1199                  * Most systems won't run into this check, but this should
1200                  * avoid starvation in the software interrupt handler in
1201                  * large systems.
1202                  */
1203                 splbreaknum = 100;
1204
1205                 ccb = (union ccb *)ap->a_data;
1206
1207                 base_periph_found = 0;
1208
1209                 /*
1210                  * Sanity check -- make sure we don't get a null peripheral
1211                  * driver name.
1212                  */
1213                 if (*ccb->cgdl.periph_name == '\0') {
1214                         error = EINVAL;
1215                         break;
1216                 }
1217
1218                 /* Keep the list from changing while we traverse it */
1219                 crit_enter();
1220 ptstartover:
1221                 cur_generation = xsoftc.generation;
1222
1223                 /* first find our driver in the list of drivers */
1224                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1225                         if (strcmp((*p_drv)->driver_name, name) == 0)
1226                                 break;
1227                 }
1228
1229                 if (*p_drv == NULL) {
1230                         crit_exit();
1231                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1232                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1233                         *ccb->cgdl.periph_name = '\0';
1234                         ccb->cgdl.unit_number = 0;
1235                         error = ENOENT;
1236                         break;
1237                 }       
1238
1239                 /*
1240                  * Run through every peripheral instance of this driver
1241                  * and check to see whether it matches the unit passed
1242                  * in by the user.  If it does, get out of the loops and
1243                  * find the passthrough driver associated with that
1244                  * peripheral driver.
1245                  */
1246                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1247
1248                         if (periph->unit_number == unit) {
1249                                 break;
1250                         } else if (--splbreaknum == 0) {
1251                                 splz();
1252                                 splbreaknum = 100;
1253                                 if (cur_generation != xsoftc.generation)
1254                                        goto ptstartover;
1255                         }
1256                 }
1257                 /*
1258                  * If we found the peripheral driver that the user passed
1259                  * in, go through all of the peripheral drivers for that
1260                  * particular device and look for a passthrough driver.
1261                  */
1262                 if (periph != NULL) {
1263                         struct cam_ed *device;
1264                         int i;
1265
1266                         base_periph_found = 1;
1267                         device = periph->path->device;
1268                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1269                              periph != NULL;
1270                              periph = SLIST_NEXT(periph, periph_links), i++) {
1271                                 /*
1272                                  * Check to see whether we have a
1273                                  * passthrough device or not. 
1274                                  */
1275                                 if (strcmp(periph->periph_name, "pass") == 0) {
1276                                         /*
1277                                          * Fill in the getdevlist fields.
1278                                          */
1279                                         strcpy(ccb->cgdl.periph_name,
1280                                                periph->periph_name);
1281                                         ccb->cgdl.unit_number =
1282                                                 periph->unit_number;
1283                                         if (SLIST_NEXT(periph, periph_links))
1284                                                 ccb->cgdl.status =
1285                                                         CAM_GDEVLIST_MORE_DEVS;
1286                                         else
1287                                                 ccb->cgdl.status =
1288                                                        CAM_GDEVLIST_LAST_DEVICE;
1289                                         ccb->cgdl.generation =
1290                                                 device->generation;
1291                                         ccb->cgdl.index = i;
1292                                         /*
1293                                          * Fill in some CCB header fields
1294                                          * that the user may want.
1295                                          */
1296                                         ccb->ccb_h.path_id =
1297                                                 periph->path->bus->path_id;
1298                                         ccb->ccb_h.target_id =
1299                                                 periph->path->target->target_id;
1300                                         ccb->ccb_h.target_lun =
1301                                                 periph->path->device->lun_id;
1302                                         ccb->ccb_h.status = CAM_REQ_CMP;
1303                                         break;
1304                                 }
1305                         }
1306                 }
1307
1308                 /*
1309                  * If the periph is null here, one of two things has
1310                  * happened.  The first possibility is that we couldn't
1311                  * find the unit number of the particular peripheral driver
1312                  * that the user is asking about.  e.g. the user asks for
1313                  * the passthrough driver for "da11".  We find the list of
1314                  * "da" peripherals all right, but there is no unit 11.
1315                  * The other possibility is that we went through the list
1316                  * of peripheral drivers attached to the device structure,
1317                  * but didn't find one with the name "pass".  Either way,
1318                  * we return ENOENT, since we couldn't find something.
1319                  */
1320                 if (periph == NULL) {
1321                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1322                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1323                         *ccb->cgdl.periph_name = '\0';
1324                         ccb->cgdl.unit_number = 0;
1325                         error = ENOENT;
1326                         /*
1327                          * It is unfortunate that this is even necessary,
1328                          * but there are many, many clueless users out there.
1329                          * If this is true, the user is looking for the
1330                          * passthrough driver, but doesn't have one in his
1331                          * kernel.
1332                          */
1333                         if (base_periph_found == 1) {
1334                                 kprintf("xptioctl: pass driver is not in the "
1335                                        "kernel\n");
1336                                 kprintf("xptioctl: put \"device pass0\" in "
1337                                        "your kernel config file\n");
1338                         }
1339                 }
1340                 crit_exit();
1341                 break;
1342                 }
1343         default:
1344                 error = ENOTTY;
1345                 break;
1346         }
1347
1348         return(error);
1349 }
1350
1351 static int
1352 cam_module_event_handler(module_t mod, int what, void *arg)
1353 {
1354         if (what == MOD_LOAD) {
1355                 xpt_init(NULL);
1356         } else if (what == MOD_UNLOAD) {
1357                 return EBUSY;
1358         } else {
1359                 return EOPNOTSUPP;
1360         }
1361
1362         return 0;
1363 }
1364
1365 /* Functions accessed by the peripheral drivers */
1366 static void
1367 xpt_init(void *dummy)
1368 {
1369         struct cam_sim *xpt_sim;
1370         struct cam_path *path;
1371         struct cam_devq *devq;
1372         cam_status status;
1373
1374         TAILQ_INIT(&xpt_busses);
1375         TAILQ_INIT(&cam_bioq);
1376         SLIST_INIT(&ccb_freeq);
1377         STAILQ_INIT(&highpowerq);
1378
1379         /*
1380          * The xpt layer is, itself, the equivelent of a SIM.
1381          * Allow 16 ccbs in the ccb pool for it.  This should
1382          * give decent parallelism when we probe busses and
1383          * perform other XPT functions.
1384          */
1385         devq = cam_simq_alloc(16);
1386         xpt_sim = cam_sim_alloc(xptaction,
1387                                 xptpoll,
1388                                 "xpt",
1389                                 /*softc*/NULL,
1390                                 /*unit*/0,
1391                                 /*max_dev_transactions*/0,
1392                                 /*max_tagged_dev_transactions*/0,
1393                                 devq);
1394         cam_simq_release(devq);
1395         xpt_max_ccbs = 16;
1396                                 
1397         xpt_bus_register(xpt_sim, /*bus #*/0);
1398
1399         /*
1400          * Looking at the XPT from the SIM layer, the XPT is
1401          * the equivelent of a peripheral driver.  Allocate
1402          * a peripheral driver entry for us.
1403          */
1404         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1405                                       CAM_TARGET_WILDCARD,
1406                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1407                 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1408                        " failing attach\n", status);
1409                 return;
1410         }
1411
1412         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1413                          path, NULL, 0, NULL);
1414         xpt_free_path(path);
1415
1416         xpt_sim->softc = xpt_periph;
1417
1418         /*
1419          * Register a callback for when interrupts are enabled.
1420          */
1421         xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1422                                   M_TEMP, M_INTWAIT | M_ZERO);
1423         xpt_config_hook->ich_func = xpt_config;
1424         xpt_config_hook->ich_desc = "xpt";
1425         xpt_config_hook->ich_order = 1000;
1426         if (config_intrhook_establish(xpt_config_hook) != 0) {
1427                 kfree (xpt_config_hook, M_TEMP);
1428                 kprintf("xpt_init: config_intrhook_establish failed "
1429                        "- failing attach\n");
1430         }
1431
1432         /* Install our software interrupt handlers */
1433         register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1434 }
1435
1436 static cam_status
1437 xptregister(struct cam_periph *periph, void *arg)
1438 {
1439         if (periph == NULL) {
1440                 kprintf("xptregister: periph was NULL!!\n");
1441                 return(CAM_REQ_CMP_ERR);
1442         }
1443
1444         periph->softc = NULL;
1445
1446         xpt_periph = periph;
1447
1448         return(CAM_REQ_CMP);
1449 }
1450
1451 int32_t
1452 xpt_add_periph(struct cam_periph *periph)
1453 {
1454         struct cam_ed *device;
1455         int32_t  status;
1456         struct periph_list *periph_head;
1457
1458         device = periph->path->device;
1459
1460         periph_head = &device->periphs;
1461
1462         status = CAM_REQ_CMP;
1463
1464         if (device != NULL) {
1465                 /*
1466                  * Make room for this peripheral
1467                  * so it will fit in the queue
1468                  * when it's scheduled to run
1469                  */
1470                 crit_enter();
1471                 status = camq_resize(&device->drvq,
1472                                      device->drvq.array_size + 1);
1473
1474                 device->generation++;
1475
1476                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1477                 crit_exit();
1478         }
1479
1480         xsoftc.generation++;
1481
1482         return (status);
1483 }
1484
1485 void
1486 xpt_remove_periph(struct cam_periph *periph)
1487 {
1488         struct cam_ed *device;
1489
1490         device = periph->path->device;
1491
1492         if (device != NULL) {
1493                 struct periph_list *periph_head;
1494
1495                 periph_head = &device->periphs;
1496                 
1497                 /* Release the slot for this peripheral */
1498                 crit_enter();
1499                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1500
1501                 device->generation++;
1502
1503                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1504                 crit_exit();
1505         }
1506
1507         xsoftc.generation++;
1508
1509 }
1510
1511 #ifdef CAM_NEW_TRAN_CODE
1512
1513 void
1514 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1515 {
1516         struct  ccb_pathinq cpi;
1517         struct  ccb_trans_settings cts;
1518         struct  cam_path *path;
1519         u_int   speed;
1520         u_int   freq;
1521         u_int   mb;
1522
1523         path = periph->path;
1524         /*
1525          * To ensure that this is printed in one piece,
1526          * mask out CAM interrupts.
1527          */
1528         crit_enter();
1529         printf("%s%d at %s%d bus %d target %d lun %d\n",
1530                periph->periph_name, periph->unit_number,
1531                path->bus->sim->sim_name,
1532                path->bus->sim->unit_number,
1533                path->bus->sim->bus_id,
1534                path->target->target_id,
1535                path->device->lun_id);
1536         printf("%s%d: ", periph->periph_name, periph->unit_number);
1537         scsi_print_inquiry(&path->device->inq_data);
1538         if (bootverbose && path->device->serial_num_len > 0) {
1539                 /* Don't wrap the screen  - print only the first 60 chars */
1540                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1541                        periph->unit_number, path->device->serial_num);
1542         }
1543         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1544         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1545         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1546         xpt_action((union ccb*)&cts);
1547
1548         /* Ask the SIM for its base transfer speed */
1549         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1550         cpi.ccb_h.func_code = XPT_PATH_INQ;
1551         xpt_action((union ccb *)&cpi);
1552
1553         speed = cpi.base_transfer_speed;
1554         freq = 0;
1555         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1556                 struct  ccb_trans_settings_spi *spi;
1557
1558                 spi = &cts.xport_specific.spi;
1559                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1560                   && spi->sync_offset != 0) {
1561                         freq = scsi_calc_syncsrate(spi->sync_period);
1562                         speed = freq;
1563                 }
1564
1565                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1566                         speed *= (0x01 << spi->bus_width);
1567         }
1568         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1569                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1570                 if (fc->valid & CTS_FC_VALID_SPEED) {
1571                         speed = fc->bitrate;
1572                 }
1573         }
1574
1575         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1576                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1577                 if (sas->valid & CTS_SAS_VALID_SPEED) {
1578                         speed = sas->bitrate;
1579                 }
1580         }
1581
1582         mb = speed / 1000;
1583         if (mb > 0)
1584                 printf("%s%d: %d.%03dMB/s transfers",
1585                        periph->periph_name, periph->unit_number,
1586                        mb, speed % 1000);
1587         else
1588                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1589                        periph->unit_number, speed);
1590         /* Report additional information about SPI connections */
1591         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1592                 struct  ccb_trans_settings_spi *spi;
1593
1594                 spi = &cts.xport_specific.spi;
1595                 if (freq != 0) {
1596                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1597                                freq % 1000,
1598                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1599                              ? " DT" : "",
1600                                spi->sync_offset);
1601                 }
1602                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1603                  && spi->bus_width > 0) {
1604                         if (freq != 0) {
1605                                 printf(", ");
1606                         } else {
1607                                 printf(" (");
1608                         }
1609                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1610                 } else if (freq != 0) {
1611                         printf(")");
1612                 }
1613         }
1614         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1615                 struct  ccb_trans_settings_fc *fc;
1616
1617                 fc = &cts.xport_specific.fc;
1618                 if (fc->valid & CTS_FC_VALID_WWNN)
1619                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1620                 if (fc->valid & CTS_FC_VALID_WWPN)
1621                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1622                 if (fc->valid & CTS_FC_VALID_PORT)
1623                         printf(" PortID 0x%x", fc->port);
1624         }
1625
1626         if (path->device->inq_flags & SID_CmdQue
1627          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1628                 printf("\n%s%d: Tagged Queueing Enabled",
1629                        periph->periph_name, periph->unit_number);
1630         }
1631         printf("\n");
1632
1633         /*
1634          * We only want to print the caller's announce string if they've
1635          * passed one in..
1636          */
1637         if (announce_string != NULL)
1638                 printf("%s%d: %s\n", periph->periph_name,
1639                        periph->unit_number, announce_string);
1640         crit_exit();
1641 }
1642 #else /* CAM_NEW_TRAN_CODE */
1643 void
1644 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1645 {
1646         u_int mb;
1647         struct cam_path *path;
1648         struct ccb_trans_settings cts;
1649
1650         path = periph->path;
1651         /*
1652          * To ensure that this is printed in one piece,
1653          * mask out CAM interrupts.
1654          */
1655         crit_enter();
1656         kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1657                periph->periph_name, periph->unit_number,
1658                path->bus->sim->sim_name,
1659                path->bus->sim->unit_number,
1660                path->bus->sim->bus_id,
1661                path->target->target_id,
1662                path->device->lun_id);
1663         kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1664         scsi_print_inquiry(&path->device->inq_data);
1665         if ((bootverbose)
1666          && (path->device->serial_num_len > 0)) {
1667                 /* Don't wrap the screen  - print only the first 60 chars */
1668                 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1669                        periph->unit_number, path->device->serial_num);
1670         }
1671         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1672         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1673         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1674         xpt_action((union ccb*)&cts);
1675         if (cts.ccb_h.status == CAM_REQ_CMP) {
1676                 u_int speed;
1677                 u_int freq;
1678
1679                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1680                   && cts.sync_offset != 0) {
1681                         freq = scsi_calc_syncsrate(cts.sync_period);
1682                         speed = freq;
1683                 } else {
1684                         struct ccb_pathinq cpi;
1685
1686                         /* Ask the SIM for its base transfer speed */
1687                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1688                         cpi.ccb_h.func_code = XPT_PATH_INQ;
1689                         xpt_action((union ccb *)&cpi);
1690
1691                         speed = cpi.base_transfer_speed;
1692                         freq = 0;
1693                 }
1694                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1695                         speed *= (0x01 << cts.bus_width);
1696                 mb = speed / 1000;
1697                 if (mb > 0)
1698                         kprintf("%s%d: %d.%03dMB/s transfers",
1699                                periph->periph_name, periph->unit_number,
1700                                mb, speed % 1000);
1701                 else
1702                         kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1703                                periph->unit_number, speed);
1704                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1705                  && cts.sync_offset != 0) {
1706                         kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1707                                freq % 1000, cts.sync_offset);
1708                 }
1709                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1710                  && cts.bus_width > 0) {
1711                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1712                          && cts.sync_offset != 0) {
1713                                 kprintf(", ");
1714                         } else {
1715                                 kprintf(" (");
1716                         }
1717                         kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1718                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1719                         && cts.sync_offset != 0) {
1720                         kprintf(")");
1721                 }
1722
1723                 if (path->device->inq_flags & SID_CmdQue
1724                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1725                         kprintf(", Tagged Queueing Enabled");
1726                 }
1727
1728                 kprintf("\n");
1729         } else if (path->device->inq_flags & SID_CmdQue
1730                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1731                 kprintf("%s%d: Tagged Queueing Enabled\n",
1732                        periph->periph_name, periph->unit_number);
1733         }
1734
1735         /*
1736          * We only want to print the caller's announce string if they've
1737          * passed one in..
1738          */
1739         if (announce_string != NULL)
1740                 kprintf("%s%d: %s\n", periph->periph_name,
1741                        periph->unit_number, announce_string);
1742         crit_exit();
1743 }
1744
1745 #endif /* CAM_NEW_TRAN_CODE */
1746
1747 static dev_match_ret
1748 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1749             struct cam_eb *bus)
1750 {
1751         dev_match_ret retval;
1752         int i;
1753
1754         retval = DM_RET_NONE;
1755
1756         /*
1757          * If we aren't given something to match against, that's an error.
1758          */
1759         if (bus == NULL)
1760                 return(DM_RET_ERROR);
1761
1762         /*
1763          * If there are no match entries, then this bus matches no
1764          * matter what.
1765          */
1766         if ((patterns == NULL) || (num_patterns == 0))
1767                 return(DM_RET_DESCEND | DM_RET_COPY);
1768
1769         for (i = 0; i < num_patterns; i++) {
1770                 struct bus_match_pattern *cur_pattern;
1771
1772                 /*
1773                  * If the pattern in question isn't for a bus node, we
1774                  * aren't interested.  However, we do indicate to the
1775                  * calling routine that we should continue descending the
1776                  * tree, since the user wants to match against lower-level
1777                  * EDT elements.
1778                  */
1779                 if (patterns[i].type != DEV_MATCH_BUS) {
1780                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1781                                 retval |= DM_RET_DESCEND;
1782                         continue;
1783                 }
1784
1785                 cur_pattern = &patterns[i].pattern.bus_pattern;
1786
1787                 /*
1788                  * If they want to match any bus node, we give them any
1789                  * device node.
1790                  */
1791                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1792                         /* set the copy flag */
1793                         retval |= DM_RET_COPY;
1794
1795                         /*
1796                          * If we've already decided on an action, go ahead
1797                          * and return.
1798                          */
1799                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1800                                 return(retval);
1801                 }
1802
1803                 /*
1804                  * Not sure why someone would do this...
1805                  */
1806                 if (cur_pattern->flags == BUS_MATCH_NONE)
1807                         continue;
1808
1809                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1810                  && (cur_pattern->path_id != bus->path_id))
1811                         continue;
1812
1813                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1814                  && (cur_pattern->bus_id != bus->sim->bus_id))
1815                         continue;
1816
1817                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1818                  && (cur_pattern->unit_number != bus->sim->unit_number))
1819                         continue;
1820
1821                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1822                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1823                              DEV_IDLEN) != 0))
1824                         continue;
1825
1826                 /*
1827                  * If we get to this point, the user definitely wants 
1828                  * information on this bus.  So tell the caller to copy the
1829                  * data out.
1830                  */
1831                 retval |= DM_RET_COPY;
1832
1833                 /*
1834                  * If the return action has been set to descend, then we
1835                  * know that we've already seen a non-bus matching
1836                  * expression, therefore we need to further descend the tree.
1837                  * This won't change by continuing around the loop, so we
1838                  * go ahead and return.  If we haven't seen a non-bus
1839                  * matching expression, we keep going around the loop until
1840                  * we exhaust the matching expressions.  We'll set the stop
1841                  * flag once we fall out of the loop.
1842                  */
1843                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1844                         return(retval);
1845         }
1846
1847         /*
1848          * If the return action hasn't been set to descend yet, that means
1849          * we haven't seen anything other than bus matching patterns.  So
1850          * tell the caller to stop descending the tree -- the user doesn't
1851          * want to match against lower level tree elements.
1852          */
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1854                 retval |= DM_RET_STOP;
1855
1856         return(retval);
1857 }
1858
1859 static dev_match_ret
1860 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1861                struct cam_ed *device)
1862 {
1863         dev_match_ret retval;
1864         int i;
1865
1866         retval = DM_RET_NONE;
1867
1868         /*
1869          * If we aren't given something to match against, that's an error.
1870          */
1871         if (device == NULL)
1872                 return(DM_RET_ERROR);
1873
1874         /*
1875          * If there are no match entries, then this device matches no
1876          * matter what.
1877          */
1878         if ((patterns == NULL) || (num_patterns == 0))
1879                 return(DM_RET_DESCEND | DM_RET_COPY);
1880
1881         for (i = 0; i < num_patterns; i++) {
1882                 struct device_match_pattern *cur_pattern;
1883
1884                 /*
1885                  * If the pattern in question isn't for a device node, we
1886                  * aren't interested.
1887                  */
1888                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1889                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1890                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1891                                 retval |= DM_RET_DESCEND;
1892                         continue;
1893                 }
1894
1895                 cur_pattern = &patterns[i].pattern.device_pattern;
1896
1897                 /*
1898                  * If they want to match any device node, we give them any
1899                  * device node.
1900                  */
1901                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1902                         /* set the copy flag */
1903                         retval |= DM_RET_COPY;
1904
1905                         
1906                         /*
1907                          * If we've already decided on an action, go ahead
1908                          * and return.
1909                          */
1910                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1911                                 return(retval);
1912                 }
1913
1914                 /*
1915                  * Not sure why someone would do this...
1916                  */
1917                 if (cur_pattern->flags == DEV_MATCH_NONE)
1918                         continue;
1919
1920                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1921                  && (cur_pattern->path_id != device->target->bus->path_id))
1922                         continue;
1923
1924                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1925                  && (cur_pattern->target_id != device->target->target_id))
1926                         continue;
1927
1928                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1929                  && (cur_pattern->target_lun != device->lun_id))
1930                         continue;
1931
1932                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1933                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1934                                     (caddr_t)&cur_pattern->inq_pat,
1935                                     1, sizeof(cur_pattern->inq_pat),
1936                                     scsi_static_inquiry_match) == NULL))
1937                         continue;
1938
1939                 /*
1940                  * If we get to this point, the user definitely wants 
1941                  * information on this device.  So tell the caller to copy
1942                  * the data out.
1943                  */
1944                 retval |= DM_RET_COPY;
1945
1946                 /*
1947                  * If the return action has been set to descend, then we
1948                  * know that we've already seen a peripheral matching
1949                  * expression, therefore we need to further descend the tree.
1950                  * This won't change by continuing around the loop, so we
1951                  * go ahead and return.  If we haven't seen a peripheral
1952                  * matching expression, we keep going around the loop until
1953                  * we exhaust the matching expressions.  We'll set the stop
1954                  * flag once we fall out of the loop.
1955                  */
1956                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1957                         return(retval);
1958         }
1959
1960         /*
1961          * If the return action hasn't been set to descend yet, that means
1962          * we haven't seen any peripheral matching patterns.  So tell the
1963          * caller to stop descending the tree -- the user doesn't want to
1964          * match against lower level tree elements.
1965          */
1966         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1967                 retval |= DM_RET_STOP;
1968
1969         return(retval);
1970 }
1971
1972 /*
1973  * Match a single peripheral against any number of match patterns.
1974  */
1975 static dev_match_ret
1976 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1977                struct cam_periph *periph)
1978 {
1979         dev_match_ret retval;
1980         int i;
1981
1982         /*
1983          * If we aren't given something to match against, that's an error.
1984          */
1985         if (periph == NULL)
1986                 return(DM_RET_ERROR);
1987
1988         /*
1989          * If there are no match entries, then this peripheral matches no
1990          * matter what.
1991          */
1992         if ((patterns == NULL) || (num_patterns == 0))
1993                 return(DM_RET_STOP | DM_RET_COPY);
1994
1995         /*
1996          * There aren't any nodes below a peripheral node, so there's no
1997          * reason to descend the tree any further.
1998          */
1999         retval = DM_RET_STOP;
2000
2001         for (i = 0; i < num_patterns; i++) {
2002                 struct periph_match_pattern *cur_pattern;
2003
2004                 /*
2005                  * If the pattern in question isn't for a peripheral, we
2006                  * aren't interested.
2007                  */
2008                 if (patterns[i].type != DEV_MATCH_PERIPH)
2009                         continue;
2010
2011                 cur_pattern = &patterns[i].pattern.periph_pattern;
2012
2013                 /*
2014                  * If they want to match on anything, then we will do so.
2015                  */
2016                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2017                         /* set the copy flag */
2018                         retval |= DM_RET_COPY;
2019
2020                         /*
2021                          * We've already set the return action to stop,
2022                          * since there are no nodes below peripherals in
2023                          * the tree.
2024                          */
2025                         return(retval);
2026                 }
2027
2028                 /*
2029                  * Not sure why someone would do this...
2030                  */
2031                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2032                         continue;
2033
2034                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2035                  && (cur_pattern->path_id != periph->path->bus->path_id))
2036                         continue;
2037
2038                 /*
2039                  * For the target and lun id's, we have to make sure the
2040                  * target and lun pointers aren't NULL.  The xpt peripheral
2041                  * has a wildcard target and device.
2042                  */
2043                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2044                  && ((periph->path->target == NULL)
2045                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2046                         continue;
2047
2048                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2049                  && ((periph->path->device == NULL)
2050                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2051                         continue;
2052
2053                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2054                  && (cur_pattern->unit_number != periph->unit_number))
2055                         continue;
2056
2057                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2058                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2059                              DEV_IDLEN) != 0))
2060                         continue;
2061
2062                 /*
2063                  * If we get to this point, the user definitely wants 
2064                  * information on this peripheral.  So tell the caller to
2065                  * copy the data out.
2066                  */
2067                 retval |= DM_RET_COPY;
2068
2069                 /*
2070                  * The return action has already been set to stop, since
2071                  * peripherals don't have any nodes below them in the EDT.
2072                  */
2073                 return(retval);
2074         }
2075
2076         /*
2077          * If we get to this point, the peripheral that was passed in
2078          * doesn't match any of the patterns.
2079          */
2080         return(retval);
2081 }
2082
2083 static int
2084 xptedtbusfunc(struct cam_eb *bus, void *arg)
2085 {
2086         struct ccb_dev_match *cdm;
2087         dev_match_ret retval;
2088
2089         cdm = (struct ccb_dev_match *)arg;
2090
2091         /*
2092          * If our position is for something deeper in the tree, that means
2093          * that we've already seen this node.  So, we keep going down.
2094          */
2095         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2096          && (cdm->pos.cookie.bus == bus)
2097          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2098          && (cdm->pos.cookie.target != NULL))
2099                 retval = DM_RET_DESCEND;
2100         else
2101                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2102
2103         /*
2104          * If we got an error, bail out of the search.
2105          */
2106         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2107                 cdm->status = CAM_DEV_MATCH_ERROR;
2108                 return(0);
2109         }
2110
2111         /*
2112          * If the copy flag is set, copy this bus out.
2113          */
2114         if (retval & DM_RET_COPY) {
2115                 int spaceleft, j;
2116
2117                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2118                         sizeof(struct dev_match_result));
2119
2120                 /*
2121                  * If we don't have enough space to put in another
2122                  * match result, save our position and tell the
2123                  * user there are more devices to check.
2124                  */
2125                 if (spaceleft < sizeof(struct dev_match_result)) {
2126                         bzero(&cdm->pos, sizeof(cdm->pos));
2127                         cdm->pos.position_type = 
2128                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2129
2130                         cdm->pos.cookie.bus = bus;
2131                         cdm->pos.generations[CAM_BUS_GENERATION]=
2132                                 bus_generation;
2133                         cdm->status = CAM_DEV_MATCH_MORE;
2134                         return(0);
2135                 }
2136                 j = cdm->num_matches;
2137                 cdm->num_matches++;
2138                 cdm->matches[j].type = DEV_MATCH_BUS;
2139                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2140                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2141                 cdm->matches[j].result.bus_result.unit_number =
2142                         bus->sim->unit_number;
2143                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2144                         bus->sim->sim_name, DEV_IDLEN);
2145         }
2146
2147         /*
2148          * If the user is only interested in busses, there's no
2149          * reason to descend to the next level in the tree.
2150          */
2151         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2152                 return(1);
2153
2154         /*
2155          * If there is a target generation recorded, check it to
2156          * make sure the target list hasn't changed.
2157          */
2158         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2159          && (bus == cdm->pos.cookie.bus)
2160          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2161          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2162          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2163              bus->generation)) {
2164                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2165                 return(0);
2166         }
2167
2168         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2169          && (cdm->pos.cookie.bus == bus)
2170          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2171          && (cdm->pos.cookie.target != NULL))
2172                 return(xpttargettraverse(bus,
2173                                         (struct cam_et *)cdm->pos.cookie.target,
2174                                          xptedttargetfunc, arg));
2175         else
2176                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2177 }
2178
2179 static int
2180 xptedttargetfunc(struct cam_et *target, void *arg)
2181 {
2182         struct ccb_dev_match *cdm;
2183
2184         cdm = (struct ccb_dev_match *)arg;
2185
2186         /*
2187          * If there is a device list generation recorded, check it to
2188          * make sure the device list hasn't changed.
2189          */
2190         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2191          && (cdm->pos.cookie.bus == target->bus)
2192          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2193          && (cdm->pos.cookie.target == target)
2194          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2195          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2196          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2197              target->generation)) {
2198                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2199                 return(0);
2200         }
2201
2202         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2203          && (cdm->pos.cookie.bus == target->bus)
2204          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2205          && (cdm->pos.cookie.target == target)
2206          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2207          && (cdm->pos.cookie.device != NULL))
2208                 return(xptdevicetraverse(target,
2209                                         (struct cam_ed *)cdm->pos.cookie.device,
2210                                          xptedtdevicefunc, arg));
2211         else
2212                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2213 }
2214
2215 static int
2216 xptedtdevicefunc(struct cam_ed *device, void *arg)
2217 {
2218
2219         struct ccb_dev_match *cdm;
2220         dev_match_ret retval;
2221
2222         cdm = (struct ccb_dev_match *)arg;
2223
2224         /*
2225          * If our position is for something deeper in the tree, that means
2226          * that we've already seen this node.  So, we keep going down.
2227          */
2228         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2229          && (cdm->pos.cookie.device == device)
2230          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2231          && (cdm->pos.cookie.periph != NULL))
2232                 retval = DM_RET_DESCEND;
2233         else
2234                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2235                                         device);
2236
2237         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2238                 cdm->status = CAM_DEV_MATCH_ERROR;
2239                 return(0);
2240         }
2241
2242         /*
2243          * If the copy flag is set, copy this device out.
2244          */
2245         if (retval & DM_RET_COPY) {
2246                 int spaceleft, j;
2247
2248                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2249                         sizeof(struct dev_match_result));
2250
2251                 /*
2252                  * If we don't have enough space to put in another
2253                  * match result, save our position and tell the
2254                  * user there are more devices to check.
2255                  */
2256                 if (spaceleft < sizeof(struct dev_match_result)) {
2257                         bzero(&cdm->pos, sizeof(cdm->pos));
2258                         cdm->pos.position_type = 
2259                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2260                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2261
2262                         cdm->pos.cookie.bus = device->target->bus;
2263                         cdm->pos.generations[CAM_BUS_GENERATION]=
2264                                 bus_generation;
2265                         cdm->pos.cookie.target = device->target;
2266                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2267                                 device->target->bus->generation;
2268                         cdm->pos.cookie.device = device;
2269                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2270                                 device->target->generation;
2271                         cdm->status = CAM_DEV_MATCH_MORE;
2272                         return(0);
2273                 }
2274                 j = cdm->num_matches;
2275                 cdm->num_matches++;
2276                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2277                 cdm->matches[j].result.device_result.path_id =
2278                         device->target->bus->path_id;
2279                 cdm->matches[j].result.device_result.target_id =
2280                         device->target->target_id;
2281                 cdm->matches[j].result.device_result.target_lun =
2282                         device->lun_id;
2283                 bcopy(&device->inq_data,
2284                       &cdm->matches[j].result.device_result.inq_data,
2285                       sizeof(struct scsi_inquiry_data));
2286
2287                 /* Let the user know whether this device is unconfigured */
2288                 if (device->flags & CAM_DEV_UNCONFIGURED)
2289                         cdm->matches[j].result.device_result.flags =
2290                                 DEV_RESULT_UNCONFIGURED;
2291                 else
2292                         cdm->matches[j].result.device_result.flags =
2293                                 DEV_RESULT_NOFLAG;
2294         }
2295
2296         /*
2297          * If the user isn't interested in peripherals, don't descend
2298          * the tree any further.
2299          */
2300         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2301                 return(1);
2302
2303         /*
2304          * If there is a peripheral list generation recorded, make sure
2305          * it hasn't changed.
2306          */
2307         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2308          && (device->target->bus == cdm->pos.cookie.bus)
2309          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2310          && (device->target == cdm->pos.cookie.target)
2311          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2312          && (device == cdm->pos.cookie.device)
2313          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2314          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2315          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2316              device->generation)){
2317                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2318                 return(0);
2319         }
2320
2321         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2322          && (cdm->pos.cookie.bus == device->target->bus)
2323          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2324          && (cdm->pos.cookie.target == device->target)
2325          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2326          && (cdm->pos.cookie.device == device)
2327          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2328          && (cdm->pos.cookie.periph != NULL))
2329                 return(xptperiphtraverse(device,
2330                                 (struct cam_periph *)cdm->pos.cookie.periph,
2331                                 xptedtperiphfunc, arg));
2332         else
2333                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2334 }
2335
2336 static int
2337 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2338 {
2339         struct ccb_dev_match *cdm;
2340         dev_match_ret retval;
2341
2342         cdm = (struct ccb_dev_match *)arg;
2343
2344         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2345
2346         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2347                 cdm->status = CAM_DEV_MATCH_ERROR;
2348                 return(0);
2349         }
2350
2351         /*
2352          * If the copy flag is set, copy this peripheral out.
2353          */
2354         if (retval & DM_RET_COPY) {
2355                 int spaceleft, j;
2356
2357                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2358                         sizeof(struct dev_match_result));
2359
2360                 /*
2361                  * If we don't have enough space to put in another
2362                  * match result, save our position and tell the
2363                  * user there are more devices to check.
2364                  */
2365                 if (spaceleft < sizeof(struct dev_match_result)) {
2366                         bzero(&cdm->pos, sizeof(cdm->pos));
2367                         cdm->pos.position_type = 
2368                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2369                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2370                                 CAM_DEV_POS_PERIPH;
2371
2372                         cdm->pos.cookie.bus = periph->path->bus;
2373                         cdm->pos.generations[CAM_BUS_GENERATION]=
2374                                 bus_generation;
2375                         cdm->pos.cookie.target = periph->path->target;
2376                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2377                                 periph->path->bus->generation;
2378                         cdm->pos.cookie.device = periph->path->device;
2379                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2380                                 periph->path->target->generation;
2381                         cdm->pos.cookie.periph = periph;
2382                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2383                                 periph->path->device->generation;
2384                         cdm->status = CAM_DEV_MATCH_MORE;
2385                         return(0);
2386                 }
2387
2388                 j = cdm->num_matches;
2389                 cdm->num_matches++;
2390                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2391                 cdm->matches[j].result.periph_result.path_id =
2392                         periph->path->bus->path_id;
2393                 cdm->matches[j].result.periph_result.target_id =
2394                         periph->path->target->target_id;
2395                 cdm->matches[j].result.periph_result.target_lun =
2396                         periph->path->device->lun_id;
2397                 cdm->matches[j].result.periph_result.unit_number =
2398                         periph->unit_number;
2399                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2400                         periph->periph_name, DEV_IDLEN);
2401         }
2402
2403         return(1);
2404 }
2405
2406 static int
2407 xptedtmatch(struct ccb_dev_match *cdm)
2408 {
2409         int ret;
2410
2411         cdm->num_matches = 0;
2412
2413         /*
2414          * Check the bus list generation.  If it has changed, the user
2415          * needs to reset everything and start over.
2416          */
2417         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2418          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2419          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2420                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2421                 return(0);
2422         }
2423
2424         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2425          && (cdm->pos.cookie.bus != NULL))
2426                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2427                                      xptedtbusfunc, cdm);
2428         else
2429                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2430
2431         /*
2432          * If we get back 0, that means that we had to stop before fully
2433          * traversing the EDT.  It also means that one of the subroutines
2434          * has set the status field to the proper value.  If we get back 1,
2435          * we've fully traversed the EDT and copied out any matching entries.
2436          */
2437         if (ret == 1)
2438                 cdm->status = CAM_DEV_MATCH_LAST;
2439
2440         return(ret);
2441 }
2442
2443 static int
2444 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2445 {
2446         struct ccb_dev_match *cdm;
2447
2448         cdm = (struct ccb_dev_match *)arg;
2449
2450         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2451          && (cdm->pos.cookie.pdrv == pdrv)
2452          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2453          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2454          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2455              (*pdrv)->generation)) {
2456                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2457                 return(0);
2458         }
2459
2460         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2461          && (cdm->pos.cookie.pdrv == pdrv)
2462          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2463          && (cdm->pos.cookie.periph != NULL))
2464                 return(xptpdperiphtraverse(pdrv,
2465                                 (struct cam_periph *)cdm->pos.cookie.periph,
2466                                 xptplistperiphfunc, arg));
2467         else
2468                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2469 }
2470
2471 static int
2472 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2473 {
2474         struct ccb_dev_match *cdm;
2475         dev_match_ret retval;
2476
2477         cdm = (struct ccb_dev_match *)arg;
2478
2479         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2480
2481         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2482                 cdm->status = CAM_DEV_MATCH_ERROR;
2483                 return(0);
2484         }
2485
2486         /*
2487          * If the copy flag is set, copy this peripheral out.
2488          */
2489         if (retval & DM_RET_COPY) {
2490                 int spaceleft, j;
2491
2492                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2493                         sizeof(struct dev_match_result));
2494
2495                 /*
2496                  * If we don't have enough space to put in another
2497                  * match result, save our position and tell the
2498                  * user there are more devices to check.
2499                  */
2500                 if (spaceleft < sizeof(struct dev_match_result)) {
2501                         struct periph_driver **pdrv;
2502
2503                         pdrv = NULL;
2504                         bzero(&cdm->pos, sizeof(cdm->pos));
2505                         cdm->pos.position_type = 
2506                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2507                                 CAM_DEV_POS_PERIPH;
2508
2509                         /*
2510                          * This may look a bit non-sensical, but it is
2511                          * actually quite logical.  There are very few
2512                          * peripheral drivers, and bloating every peripheral
2513                          * structure with a pointer back to its parent
2514                          * peripheral driver linker set entry would cost
2515                          * more in the long run than doing this quick lookup.
2516                          */
2517                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2518                                 if (strcmp((*pdrv)->driver_name,
2519                                     periph->periph_name) == 0)
2520                                         break;
2521                         }
2522
2523                         if (*pdrv == NULL) {
2524                                 cdm->status = CAM_DEV_MATCH_ERROR;
2525                                 return(0);
2526                         }
2527
2528                         cdm->pos.cookie.pdrv = pdrv;
2529                         /*
2530                          * The periph generation slot does double duty, as
2531                          * does the periph pointer slot.  They are used for
2532                          * both edt and pdrv lookups and positioning.
2533                          */
2534                         cdm->pos.cookie.periph = periph;
2535                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2536                                 (*pdrv)->generation;
2537                         cdm->status = CAM_DEV_MATCH_MORE;
2538                         return(0);
2539                 }
2540
2541                 j = cdm->num_matches;
2542                 cdm->num_matches++;
2543                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2544                 cdm->matches[j].result.periph_result.path_id =
2545                         periph->path->bus->path_id;
2546
2547                 /*
2548                  * The transport layer peripheral doesn't have a target or
2549                  * lun.
2550                  */
2551                 if (periph->path->target)
2552                         cdm->matches[j].result.periph_result.target_id =
2553                                 periph->path->target->target_id;
2554                 else
2555                         cdm->matches[j].result.periph_result.target_id = -1;
2556
2557                 if (periph->path->device)
2558                         cdm->matches[j].result.periph_result.target_lun =
2559                                 periph->path->device->lun_id;
2560                 else
2561                         cdm->matches[j].result.periph_result.target_lun = -1;
2562
2563                 cdm->matches[j].result.periph_result.unit_number =
2564                         periph->unit_number;
2565                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2566                         periph->periph_name, DEV_IDLEN);
2567         }
2568
2569         return(1);
2570 }
2571
2572 static int
2573 xptperiphlistmatch(struct ccb_dev_match *cdm)
2574 {
2575         int ret;
2576
2577         cdm->num_matches = 0;
2578
2579         /*
2580          * At this point in the edt traversal function, we check the bus
2581          * list generation to make sure that no busses have been added or
2582          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2583          * For the peripheral driver list traversal function, however, we
2584          * don't have to worry about new peripheral driver types coming or
2585          * going; they're in a linker set, and therefore can't change
2586          * without a recompile.
2587          */
2588
2589         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2590          && (cdm->pos.cookie.pdrv != NULL))
2591                 ret = xptpdrvtraverse(
2592                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2593                                 xptplistpdrvfunc, cdm);
2594         else
2595                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2596
2597         /*
2598          * If we get back 0, that means that we had to stop before fully
2599          * traversing the peripheral driver tree.  It also means that one of
2600          * the subroutines has set the status field to the proper value.  If
2601          * we get back 1, we've fully traversed the EDT and copied out any
2602          * matching entries.
2603          */
2604         if (ret == 1)
2605                 cdm->status = CAM_DEV_MATCH_LAST;
2606
2607         return(ret);
2608 }
2609
2610 static int
2611 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2612 {
2613         struct cam_eb *bus, *next_bus;
2614         int retval;
2615
2616         retval = 1;
2617
2618         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2619              bus != NULL;
2620              bus = next_bus) {
2621                 next_bus = TAILQ_NEXT(bus, links);
2622
2623                 retval = tr_func(bus, arg);
2624                 if (retval == 0)
2625                         return(retval);
2626         }
2627
2628         return(retval);
2629 }
2630
2631 static int
2632 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2633                   xpt_targetfunc_t *tr_func, void *arg)
2634 {
2635         struct cam_et *target, *next_target;
2636         int retval;
2637
2638         retval = 1;
2639         for (target = (start_target ? start_target :
2640                        TAILQ_FIRST(&bus->et_entries));
2641              target != NULL; target = next_target) {
2642
2643                 next_target = TAILQ_NEXT(target, links);
2644
2645                 retval = tr_func(target, arg);
2646
2647                 if (retval == 0)
2648                         return(retval);
2649         }
2650
2651         return(retval);
2652 }
2653
2654 static int
2655 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2656                   xpt_devicefunc_t *tr_func, void *arg)
2657 {
2658         struct cam_ed *device, *next_device;
2659         int retval;
2660
2661         retval = 1;
2662         for (device = (start_device ? start_device :
2663                        TAILQ_FIRST(&target->ed_entries));
2664              device != NULL;
2665              device = next_device) {
2666
2667                 next_device = TAILQ_NEXT(device, links);
2668
2669                 retval = tr_func(device, arg);
2670
2671                 if (retval == 0)
2672                         return(retval);
2673         }
2674
2675         return(retval);
2676 }
2677
2678 static int
2679 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2680                   xpt_periphfunc_t *tr_func, void *arg)
2681 {
2682         struct cam_periph *periph, *next_periph;
2683         int retval;
2684
2685         retval = 1;
2686
2687         for (periph = (start_periph ? start_periph :
2688                        SLIST_FIRST(&device->periphs));
2689              periph != NULL;
2690              periph = next_periph) {
2691
2692                 next_periph = SLIST_NEXT(periph, periph_links);
2693
2694                 retval = tr_func(periph, arg);
2695                 if (retval == 0)
2696                         return(retval);
2697         }
2698
2699         return(retval);
2700 }
2701
2702 static int
2703 xptpdrvtraverse(struct periph_driver **start_pdrv,
2704                 xpt_pdrvfunc_t *tr_func, void *arg)
2705 {
2706         struct periph_driver **pdrv;
2707         int retval;
2708
2709         retval = 1;
2710
2711         /*
2712          * We don't traverse the peripheral driver list like we do the
2713          * other lists, because it is a linker set, and therefore cannot be
2714          * changed during runtime.  If the peripheral driver list is ever
2715          * re-done to be something other than a linker set (i.e. it can
2716          * change while the system is running), the list traversal should
2717          * be modified to work like the other traversal functions.
2718          */
2719         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2720              *pdrv != NULL; pdrv++) {
2721                 retval = tr_func(pdrv, arg);
2722
2723                 if (retval == 0)
2724                         return(retval);
2725         }
2726
2727         return(retval);
2728 }
2729
2730 static int
2731 xptpdperiphtraverse(struct periph_driver **pdrv,
2732                     struct cam_periph *start_periph,
2733                     xpt_periphfunc_t *tr_func, void *arg)
2734 {
2735         struct cam_periph *periph, *next_periph;
2736         int retval;
2737
2738         retval = 1;
2739
2740         for (periph = (start_periph ? start_periph :
2741              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2742              periph = next_periph) {
2743
2744                 next_periph = TAILQ_NEXT(periph, unit_links);
2745
2746                 retval = tr_func(periph, arg);
2747                 if (retval == 0)
2748                         return(retval);
2749         }
2750         return(retval);
2751 }
2752
2753 static int
2754 xptdefbusfunc(struct cam_eb *bus, void *arg)
2755 {
2756         struct xpt_traverse_config *tr_config;
2757
2758         tr_config = (struct xpt_traverse_config *)arg;
2759
2760         if (tr_config->depth == XPT_DEPTH_BUS) {
2761                 xpt_busfunc_t *tr_func;
2762
2763                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2764
2765                 return(tr_func(bus, tr_config->tr_arg));
2766         } else
2767                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2768 }
2769
2770 static int
2771 xptdeftargetfunc(struct cam_et *target, void *arg)
2772 {
2773         struct xpt_traverse_config *tr_config;
2774
2775         tr_config = (struct xpt_traverse_config *)arg;
2776
2777         if (tr_config->depth == XPT_DEPTH_TARGET) {
2778                 xpt_targetfunc_t *tr_func;
2779
2780                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2781
2782                 return(tr_func(target, tr_config->tr_arg));
2783         } else
2784                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2785 }
2786
2787 static int
2788 xptdefdevicefunc(struct cam_ed *device, void *arg)
2789 {
2790         struct xpt_traverse_config *tr_config;
2791
2792         tr_config = (struct xpt_traverse_config *)arg;
2793
2794         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2795                 xpt_devicefunc_t *tr_func;
2796
2797                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2798
2799                 return(tr_func(device, tr_config->tr_arg));
2800         } else
2801                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2802 }
2803
2804 static int
2805 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2806 {
2807         struct xpt_traverse_config *tr_config;
2808         xpt_periphfunc_t *tr_func;
2809
2810         tr_config = (struct xpt_traverse_config *)arg;
2811
2812         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2813
2814         /*
2815          * Unlike the other default functions, we don't check for depth
2816          * here.  The peripheral driver level is the last level in the EDT,
2817          * so if we're here, we should execute the function in question.
2818          */
2819         return(tr_func(periph, tr_config->tr_arg));
2820 }
2821
2822 /*
2823  * Execute the given function for every bus in the EDT.
2824  */
2825 static int
2826 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2827 {
2828         struct xpt_traverse_config tr_config;
2829
2830         tr_config.depth = XPT_DEPTH_BUS;
2831         tr_config.tr_func = tr_func;
2832         tr_config.tr_arg = arg;
2833
2834         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2835 }
2836
2837 #ifdef notusedyet
2838 /*
2839  * Execute the given function for every target in the EDT.
2840  */
2841 static int
2842 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2843 {
2844         struct xpt_traverse_config tr_config;
2845
2846         tr_config.depth = XPT_DEPTH_TARGET;
2847         tr_config.tr_func = tr_func;
2848         tr_config.tr_arg = arg;
2849
2850         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2851 }
2852 #endif /* notusedyet */
2853
2854 /*
2855  * Execute the given function for every device in the EDT.
2856  */
2857 static int
2858 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2859 {
2860         struct xpt_traverse_config tr_config;
2861
2862         tr_config.depth = XPT_DEPTH_DEVICE;
2863         tr_config.tr_func = tr_func;
2864         tr_config.tr_arg = arg;
2865
2866         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2867 }
2868
2869 #ifdef notusedyet
2870 /*
2871  * Execute the given function for every peripheral in the EDT.
2872  */
2873 static int
2874 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2875 {
2876         struct xpt_traverse_config tr_config;
2877
2878         tr_config.depth = XPT_DEPTH_PERIPH;
2879         tr_config.tr_func = tr_func;
2880         tr_config.tr_arg = arg;
2881
2882         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2883 }
2884 #endif /* notusedyet */
2885
2886 static int
2887 xptsetasyncfunc(struct cam_ed *device, void *arg)
2888 {
2889         struct cam_path path;
2890         struct ccb_getdev cgd;
2891         struct async_node *cur_entry;
2892
2893         cur_entry = (struct async_node *)arg;
2894
2895         /*
2896          * Don't report unconfigured devices (Wildcard devs,
2897          * devices only for target mode, device instances
2898          * that have been invalidated but are waiting for
2899          * their last reference count to be released).
2900          */
2901         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2902                 return (1);
2903
2904         xpt_compile_path(&path,
2905                          NULL,
2906                          device->target->bus->path_id,
2907                          device->target->target_id,
2908                          device->lun_id);
2909         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2910         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2911         xpt_action((union ccb *)&cgd);
2912         cur_entry->callback(cur_entry->callback_arg,
2913                             AC_FOUND_DEVICE,
2914                             &path, &cgd);
2915         xpt_release_path(&path);
2916
2917         return(1);
2918 }
2919
2920 static int
2921 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2922 {
2923         struct cam_path path;
2924         struct ccb_pathinq cpi;
2925         struct async_node *cur_entry;
2926
2927         cur_entry = (struct async_node *)arg;
2928
2929         xpt_compile_path(&path, /*periph*/NULL,
2930                          bus->sim->path_id,
2931                          CAM_TARGET_WILDCARD,
2932                          CAM_LUN_WILDCARD);
2933         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2934         cpi.ccb_h.func_code = XPT_PATH_INQ;
2935         xpt_action((union ccb *)&cpi);
2936         cur_entry->callback(cur_entry->callback_arg,
2937                             AC_PATH_REGISTERED,
2938                             &path, &cpi);
2939         xpt_release_path(&path);
2940
2941         return(1);
2942 }
2943
2944 void
2945 xpt_action(union ccb *start_ccb)
2946 {
2947         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2948
2949         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2950
2951         crit_enter();
2952
2953         switch (start_ccb->ccb_h.func_code) {
2954         case XPT_SCSI_IO:
2955         {
2956 #ifdef CAM_NEW_TRAN_CODE
2957                 struct cam_ed *device;
2958 #endif /* CAM_NEW_TRAN_CODE */
2959 #ifdef CAMDEBUG
2960                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2961                 struct cam_path *path;
2962
2963                 path = start_ccb->ccb_h.path;
2964 #endif
2965
2966                 /*
2967                  * For the sake of compatibility with SCSI-1
2968                  * devices that may not understand the identify
2969                  * message, we include lun information in the
2970                  * second byte of all commands.  SCSI-1 specifies
2971                  * that luns are a 3 bit value and reserves only 3
2972                  * bits for lun information in the CDB.  Later
2973                  * revisions of the SCSI spec allow for more than 8
2974                  * luns, but have deprecated lun information in the
2975                  * CDB.  So, if the lun won't fit, we must omit.
2976                  *
2977                  * Also be aware that during initial probing for devices,
2978                  * the inquiry information is unknown but initialized to 0.
2979                  * This means that this code will be exercised while probing
2980                  * devices with an ANSI revision greater than 2.
2981                  */
2982 #ifdef CAM_NEW_TRAN_CODE
2983                 device = start_ccb->ccb_h.path->device;
2984                 if (device->protocol_version <= SCSI_REV_2
2985 #else /* CAM_NEW_TRAN_CODE */
2986                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2987 #endif /* CAM_NEW_TRAN_CODE */
2988                  && start_ccb->ccb_h.target_lun < 8
2989                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2990
2991                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2992                             start_ccb->ccb_h.target_lun << 5;
2993                 }
2994                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2995                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2996                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2997                                        &path->device->inq_data),
2998                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2999                                           cdb_str, sizeof(cdb_str))));
3000                 /* FALLTHROUGH */
3001         }
3002         case XPT_TARGET_IO:
3003         case XPT_CONT_TARGET_IO:
3004                 start_ccb->csio.sense_resid = 0;
3005                 start_ccb->csio.resid = 0;
3006                 /* FALLTHROUGH */
3007         case XPT_RESET_DEV:
3008         case XPT_ENG_EXEC:
3009         {
3010                 struct cam_path *path;
3011                 struct cam_sim *sim;
3012                 int runq;
3013
3014                 path = start_ccb->ccb_h.path;
3015
3016                 sim = path->bus->sim;
3017                 if (SIM_DEAD(sim)) {
3018                         /* The SIM has gone; just execute the CCB directly. */
3019                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3020                         (*(sim->sim_action))(sim, start_ccb);
3021                         break;
3022                 }
3023
3024                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3025                 if (path->device->qfrozen_cnt == 0)
3026                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
3027                 else
3028                         runq = 0;
3029                 if (runq != 0)
3030                         xpt_run_dev_sendq(path->bus);
3031                 break;
3032         }
3033         case XPT_SET_TRAN_SETTINGS:
3034         {
3035                 xpt_set_transfer_settings(&start_ccb->cts,
3036                                           start_ccb->ccb_h.path->device,
3037                                           /*async_update*/FALSE);
3038                 break;
3039         }
3040         case XPT_CALC_GEOMETRY:
3041         {
3042                 struct cam_sim *sim;
3043
3044                 /* Filter out garbage */
3045                 if (start_ccb->ccg.block_size == 0
3046                  || start_ccb->ccg.volume_size == 0) {
3047                         start_ccb->ccg.cylinders = 0;
3048                         start_ccb->ccg.heads = 0;
3049                         start_ccb->ccg.secs_per_track = 0;
3050                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3051                         break;
3052                 }
3053                 sim = start_ccb->ccb_h.path->bus->sim;
3054                 (*(sim->sim_action))(sim, start_ccb);
3055                 break;
3056         }
3057         case XPT_ABORT:
3058         {
3059                 union ccb* abort_ccb;
3060
3061                 abort_ccb = start_ccb->cab.abort_ccb;
3062                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3063
3064                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3065                                 struct cam_ccbq *ccbq;
3066
3067                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3068                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3069                                 abort_ccb->ccb_h.status =
3070                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3071                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3072                                 xpt_done(abort_ccb);
3073                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3074                                 break;
3075                         }
3076                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3077                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3078                                 /*
3079                                  * We've caught this ccb en route to
3080                                  * the SIM.  Flag it for abort and the
3081                                  * SIM will do so just before starting
3082                                  * real work on the CCB.
3083                                  */
3084                                 abort_ccb->ccb_h.status =
3085                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3086                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3087                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3088                                 break;
3089                         }
3090                 } 
3091                 if (XPT_FC_IS_QUEUED(abort_ccb)
3092                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3093                         /*
3094                          * It's already completed but waiting
3095                          * for our SWI to get to it.
3096                          */
3097                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3098                         break;
3099                 }
3100                 /*
3101                  * If we weren't able to take care of the abort request
3102                  * in the XPT, pass the request down to the SIM for processing.
3103                  */
3104                 /* FALLTHROUGH */
3105         }
3106         case XPT_ACCEPT_TARGET_IO:
3107         case XPT_EN_LUN:
3108         case XPT_IMMED_NOTIFY:
3109         case XPT_NOTIFY_ACK:
3110         case XPT_GET_TRAN_SETTINGS:
3111         case XPT_RESET_BUS:
3112         {
3113                 struct cam_sim *sim;
3114
3115                 sim = start_ccb->ccb_h.path->bus->sim;
3116                 (*(sim->sim_action))(sim, start_ccb);
3117                 break;
3118         }
3119         case XPT_PATH_INQ:
3120         {
3121                 struct cam_sim *sim;
3122
3123                 sim = start_ccb->ccb_h.path->bus->sim;
3124                 (*(sim->sim_action))(sim, start_ccb);
3125                 break;
3126         }
3127         case XPT_PATH_STATS:
3128                 start_ccb->cpis.last_reset =
3129                         start_ccb->ccb_h.path->bus->last_reset;
3130                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3131                 break;
3132         case XPT_GDEV_TYPE:
3133         {
3134                 struct cam_ed *dev;
3135
3136                 dev = start_ccb->ccb_h.path->device;
3137                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3138                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3139                 } else {
3140                         struct ccb_getdev *cgd;
3141                         struct cam_eb *bus;
3142                         struct cam_et *tar;
3143
3144                         cgd = &start_ccb->cgd;
3145                         bus = cgd->ccb_h.path->bus;
3146                         tar = cgd->ccb_h.path->target;
3147                         cgd->inq_data = dev->inq_data;
3148                         cgd->ccb_h.status = CAM_REQ_CMP;
3149                         cgd->serial_num_len = dev->serial_num_len;
3150                         if ((dev->serial_num_len > 0)
3151                          && (dev->serial_num != NULL))
3152                                 bcopy(dev->serial_num, cgd->serial_num,
3153                                       dev->serial_num_len);
3154                 }
3155                 break; 
3156         }
3157         case XPT_GDEV_STATS:
3158         {
3159                 struct cam_ed *dev;
3160
3161                 dev = start_ccb->ccb_h.path->device;
3162                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3163                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3164                 } else {
3165                         struct ccb_getdevstats *cgds;
3166                         struct cam_eb *bus;
3167                         struct cam_et *tar;
3168
3169                         cgds = &start_ccb->cgds;
3170                         bus = cgds->ccb_h.path->bus;
3171                         tar = cgds->ccb_h.path->target;
3172                         cgds->dev_openings = dev->ccbq.dev_openings;
3173                         cgds->dev_active = dev->ccbq.dev_active;
3174                         cgds->devq_openings = dev->ccbq.devq_openings;
3175                         cgds->devq_queued = dev->ccbq.queue.entries;
3176                         cgds->held = dev->ccbq.held;
3177                         cgds->last_reset = tar->last_reset;
3178                         cgds->maxtags = dev->quirk->maxtags;
3179                         cgds->mintags = dev->quirk->mintags;
3180                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3181                                 cgds->last_reset = bus->last_reset;
3182                         cgds->ccb_h.status = CAM_REQ_CMP;
3183                 }
3184                 break;
3185         }
3186         case XPT_GDEVLIST:
3187         {
3188                 struct cam_periph       *nperiph;
3189                 struct periph_list      *periph_head;
3190                 struct ccb_getdevlist   *cgdl;
3191                 u_int                   i;
3192                 struct cam_ed           *device;
3193                 int                     found;
3194
3195
3196                 found = 0;
3197
3198                 /*
3199                  * Don't want anyone mucking with our data.
3200                  */
3201                 device = start_ccb->ccb_h.path->device;
3202                 periph_head = &device->periphs;
3203                 cgdl = &start_ccb->cgdl;
3204
3205                 /*
3206                  * Check and see if the list has changed since the user
3207                  * last requested a list member.  If so, tell them that the
3208                  * list has changed, and therefore they need to start over 
3209                  * from the beginning.
3210                  */
3211                 if ((cgdl->index != 0) && 
3212                     (cgdl->generation != device->generation)) {
3213                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3214                         break;
3215                 }
3216
3217                 /*
3218                  * Traverse the list of peripherals and attempt to find 
3219                  * the requested peripheral.
3220                  */
3221                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3222                      (nperiph != NULL) && (i <= cgdl->index);
3223                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3224                         if (i == cgdl->index) {
3225                                 strncpy(cgdl->periph_name,
3226                                         nperiph->periph_name,
3227                                         DEV_IDLEN);
3228                                 cgdl->unit_number = nperiph->unit_number;
3229                                 found = 1;
3230                         }
3231                 }
3232                 if (found == 0) {
3233                         cgdl->status = CAM_GDEVLIST_ERROR;
3234                         break;
3235                 }
3236
3237                 if (nperiph == NULL)
3238                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3239                 else
3240                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3241
3242                 cgdl->index++;
3243                 cgdl->generation = device->generation;
3244
3245                 cgdl->ccb_h.status = CAM_REQ_CMP;
3246                 break;
3247         }
3248         case XPT_DEV_MATCH:
3249         {
3250                 dev_pos_type position_type;
3251                 struct ccb_dev_match *cdm;
3252                 int ret;
3253
3254                 cdm = &start_ccb->cdm;
3255
3256                 /*
3257                  * Prevent EDT changes while we traverse it.
3258                  */
3259                 /*
3260                  * There are two ways of getting at information in the EDT.
3261                  * The first way is via the primary EDT tree.  It starts
3262                  * with a list of busses, then a list of targets on a bus,
3263                  * then devices/luns on a target, and then peripherals on a
3264                  * device/lun.  The "other" way is by the peripheral driver
3265                  * lists.  The peripheral driver lists are organized by
3266                  * peripheral driver.  (obviously)  So it makes sense to
3267                  * use the peripheral driver list if the user is looking
3268                  * for something like "da1", or all "da" devices.  If the
3269                  * user is looking for something on a particular bus/target
3270                  * or lun, it's generally better to go through the EDT tree.
3271                  */
3272
3273                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3274                         position_type = cdm->pos.position_type;
3275                 else {
3276                         u_int i;
3277
3278                         position_type = CAM_DEV_POS_NONE;
3279
3280                         for (i = 0; i < cdm->num_patterns; i++) {
3281                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3282                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3283                                         position_type = CAM_DEV_POS_EDT;
3284                                         break;
3285                                 }
3286                         }
3287
3288                         if (cdm->num_patterns == 0)
3289                                 position_type = CAM_DEV_POS_EDT;
3290                         else if (position_type == CAM_DEV_POS_NONE)
3291                                 position_type = CAM_DEV_POS_PDRV;
3292                 }
3293
3294                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3295                 case CAM_DEV_POS_EDT:
3296                         ret = xptedtmatch(cdm);
3297                         break;
3298                 case CAM_DEV_POS_PDRV:
3299                         ret = xptperiphlistmatch(cdm);
3300                         break;
3301                 default:
3302                         cdm->status = CAM_DEV_MATCH_ERROR;
3303                         break;
3304                 }
3305
3306                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3307                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3308                 else
3309                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3310
3311                 break;
3312         }
3313         case XPT_SASYNC_CB:
3314         {
3315                 struct ccb_setasync *csa;
3316                 struct async_node *cur_entry;
3317                 struct async_list *async_head;
3318                 u_int32_t added;
3319
3320                 csa = &start_ccb->csa;
3321                 added = csa->event_enable;
3322                 async_head = &csa->ccb_h.path->device->asyncs;
3323
3324                 /*
3325                  * If there is already an entry for us, simply
3326                  * update it.
3327                  */
3328                 cur_entry = SLIST_FIRST(async_head);
3329                 while (cur_entry != NULL) {
3330                         if ((cur_entry->callback_arg == csa->callback_arg)
3331                          && (cur_entry->callback == csa->callback))
3332                                 break;
3333                         cur_entry = SLIST_NEXT(cur_entry, links);
3334                 }
3335
3336                 if (cur_entry != NULL) {
3337                         /*
3338                          * If the request has no flags set,
3339                          * remove the entry.
3340                          */
3341                         added &= ~cur_entry->event_enable;
3342                         if (csa->event_enable == 0) {
3343                                 SLIST_REMOVE(async_head, cur_entry,
3344                                              async_node, links);
3345                                 csa->ccb_h.path->device->refcount--;
3346                                 kfree(cur_entry, M_CAMXPT);
3347                         } else {
3348                                 cur_entry->event_enable = csa->event_enable;
3349                         }
3350                 } else {
3351                         cur_entry = kmalloc(sizeof(*cur_entry), 
3352                                             M_CAMXPT, M_INTWAIT);
3353                         cur_entry->event_enable = csa->event_enable;
3354                         cur_entry->callback_arg = csa->callback_arg;
3355                         cur_entry->callback = csa->callback;
3356                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3357                         csa->ccb_h.path->device->refcount++;
3358                 }
3359
3360                 if ((added & AC_FOUND_DEVICE) != 0) {
3361                         /*
3362                          * Get this peripheral up to date with all
3363                          * the currently existing devices.
3364                          */
3365                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3366                 }
3367                 if ((added & AC_PATH_REGISTERED) != 0) {
3368                         /*
3369                          * Get this peripheral up to date with all
3370                          * the currently existing busses.
3371                          */
3372                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3373                 }
3374                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3375                 break;
3376         }
3377         case XPT_REL_SIMQ:
3378         {
3379                 struct ccb_relsim *crs;
3380                 struct cam_ed *dev;
3381
3382                 crs = &start_ccb->crs;
3383                 dev = crs->ccb_h.path->device;
3384                 if (dev == NULL) {
3385
3386                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3387                         break;
3388                 }
3389
3390                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3391
3392                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3393                                 /* Don't ever go below one opening */
3394                                 if (crs->openings > 0) {
3395                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3396                                                             crs->openings);
3397
3398                                         if (bootverbose) {
3399                                                 xpt_print_path(crs->ccb_h.path);
3400                                                 kprintf("tagged openings "
3401                                                        "now %d\n",
3402                                                        crs->openings);
3403                                         }
3404                                 }
3405                         }
3406                 }
3407
3408                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3409
3410                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3411
3412                                 /*
3413                                  * Just extend the old timeout and decrement
3414                                  * the freeze count so that a single timeout
3415                                  * is sufficient for releasing the queue.
3416                                  */
3417                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3418                                 callout_stop(&dev->c_handle);
3419                         } else {
3420
3421                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3422                         }
3423
3424                         callout_reset(&dev->c_handle,
3425                                       (crs->release_timeout * hz) / 1000, 
3426                                       xpt_release_devq_timeout, dev);
3427
3428                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3429
3430                 }
3431
3432                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3433
3434                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3435                                 /*
3436                                  * Decrement the freeze count so that a single
3437                                  * completion is still sufficient to unfreeze
3438                                  * the queue.
3439                                  */
3440                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3441                         } else {
3442                                 
3443                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3444                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3445                         }
3446                 }
3447
3448                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3449
3450                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3451                          || (dev->ccbq.dev_active == 0)) {
3452
3453                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3454                         } else {
3455                                 
3456                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3457                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3458                         }
3459                 }
3460                 
3461                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3462
3463                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3464                                          /*run_queue*/TRUE);
3465                 }
3466                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3467                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3468                 break;
3469         }
3470         case XPT_SCAN_BUS:
3471                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3472                 break;
3473         case XPT_SCAN_LUN:
3474                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3475                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3476                              start_ccb);
3477                 break;
3478         case XPT_DEBUG: {
3479 #ifdef CAMDEBUG
3480 #ifdef CAM_DEBUG_DELAY
3481                 cam_debug_delay = CAM_DEBUG_DELAY;
3482 #endif
3483                 cam_dflags = start_ccb->cdbg.flags;
3484                 if (cam_dpath != NULL) {
3485                         xpt_free_path(cam_dpath);
3486                         cam_dpath = NULL;
3487                 }
3488
3489                 if (cam_dflags != CAM_DEBUG_NONE) {
3490                         if (xpt_create_path(&cam_dpath, xpt_periph,
3491                                             start_ccb->ccb_h.path_id,
3492                                             start_ccb->ccb_h.target_id,
3493                                             start_ccb->ccb_h.target_lun) !=
3494                                             CAM_REQ_CMP) {
3495                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3496                                 cam_dflags = CAM_DEBUG_NONE;
3497                         } else {
3498                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3499                                 xpt_print_path(cam_dpath);
3500                                 kprintf("debugging flags now %x\n", cam_dflags);
3501                         }
3502                 } else {
3503                         cam_dpath = NULL;
3504                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3505                 }
3506 #else /* !CAMDEBUG */
3507                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3508 #endif /* CAMDEBUG */
3509                 break;
3510         }
3511         case XPT_NOOP:
3512                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3513                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3514                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3515                 break;
3516         default:
3517         case XPT_SDEV_TYPE:
3518         case XPT_TERM_IO:
3519         case XPT_ENG_INQ:
3520                 /* XXX Implement */
3521                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3522                 break;
3523         }
3524         crit_exit();
3525 }
3526
3527 void
3528 xpt_polled_action(union ccb *start_ccb)
3529 {
3530         u_int32_t timeout;
3531         struct    cam_sim *sim; 
3532         struct    cam_devq *devq;
3533         struct    cam_ed *dev;
3534
3535         timeout = start_ccb->ccb_h.timeout;
3536         sim = start_ccb->ccb_h.path->bus->sim;
3537         devq = sim->devq;
3538         dev = start_ccb->ccb_h.path->device;
3539
3540         crit_enter();
3541
3542         /*
3543          * Steal an opening so that no other queued requests
3544          * can get it before us while we simulate interrupts.
3545          */
3546         dev->ccbq.devq_openings--;
3547         dev->ccbq.dev_openings--;       
3548         
3549         while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3550            && (--timeout > 0)) {
3551                 DELAY(1000);
3552                 (*(sim->sim_poll))(sim);
3553                 swi_cambio(NULL, NULL);         
3554         }
3555         
3556         dev->ccbq.devq_openings++;
3557         dev->ccbq.dev_openings++;
3558         
3559         if (timeout != 0) {
3560                 xpt_action(start_ccb);
3561                 while(--timeout > 0) {
3562                         (*(sim->sim_poll))(sim);
3563                         swi_cambio(NULL, NULL);
3564                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3565                             != CAM_REQ_INPROG)
3566                                 break;
3567                         DELAY(1000);
3568                 }
3569                 if (timeout == 0) {
3570                         /*
3571                          * XXX Is it worth adding a sim_timeout entry
3572                          * point so we can attempt recovery?  If
3573                          * this is only used for dumps, I don't think
3574                          * it is.
3575                          */
3576                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3577                 }
3578         } else {
3579                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3580         }
3581         crit_exit();
3582 }
3583         
3584 /*
3585  * Schedule a peripheral driver to receive a ccb when it's
3586  * target device has space for more transactions.
3587  */
3588 void
3589 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3590 {
3591         struct cam_ed *device;
3592         union ccb *work_ccb;
3593         int runq;
3594
3595         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3596         device = perph->path->device;
3597         crit_enter();
3598         if (periph_is_queued(perph)) {
3599                 /* Simply reorder based on new priority */
3600                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3601                           ("   change priority to %d\n", new_priority));
3602                 if (new_priority < perph->pinfo.priority) {
3603                         camq_change_priority(&device->drvq,
3604                                              perph->pinfo.index,
3605                                              new_priority);
3606                 }
3607                 runq = 0;
3608         } else if (SIM_DEAD(perph->path->bus->sim)) {
3609                 /* The SIM is gone so just call periph_start directly. */
3610                 work_ccb = xpt_get_ccb(perph->path->device);
3611                 crit_exit();
3612                 if (work_ccb == NULL)
3613                         return; /* XXX */
3614                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3615                 perph->pinfo.priority = new_priority;
3616                 perph->periph_start(perph, work_ccb);
3617                 return;
3618         } else {
3619                 /* New entry on the queue */
3620                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3621                           ("   added periph to queue\n"));
3622                 perph->pinfo.priority = new_priority;
3623                 perph->pinfo.generation = ++device->drvq.generation;
3624                 camq_insert(&device->drvq, &perph->pinfo);
3625                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3626         }
3627         crit_exit();
3628         if (runq != 0) {
3629                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3630                           ("   calling xpt_run_devq\n"));
3631                 xpt_run_dev_allocq(perph->path->bus);
3632         }
3633 }
3634
3635
3636 /*
3637  * Schedule a device to run on a given queue.
3638  * If the device was inserted as a new entry on the queue,
3639  * return 1 meaning the device queue should be run. If we
3640  * were already queued, implying someone else has already
3641  * started the queue, return 0 so the caller doesn't attempt
3642  * to run the queue.  Must be run in a critical section.
3643  */
3644 static int
3645 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3646                  u_int32_t new_priority)
3647 {
3648         int retval;
3649         u_int32_t old_priority;
3650
3651         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3652
3653         old_priority = pinfo->priority;
3654
3655         /*
3656          * Are we already queued?
3657          */
3658         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3659                 /* Simply reorder based on new priority */
3660                 if (new_priority < old_priority) {
3661                         camq_change_priority(queue, pinfo->index,
3662                                              new_priority);
3663                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3664                                         ("changed priority to %d\n",
3665                                          new_priority));
3666                 }
3667                 retval = 0;
3668         } else {
3669                 /* New entry on the queue */
3670                 if (new_priority < old_priority)
3671                         pinfo->priority = new_priority;
3672
3673                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3674                                 ("Inserting onto queue\n"));
3675                 pinfo->generation = ++queue->generation;
3676                 camq_insert(queue, pinfo);
3677                 retval = 1;
3678         }
3679         return (retval);
3680 }
3681
3682 static void
3683 xpt_run_dev_allocq(struct cam_eb *bus)
3684 {
3685         struct  cam_devq *devq;
3686
3687         if ((devq = bus->sim->devq) == NULL) {
3688                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3689                 return;
3690         }
3691         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3692
3693         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3694                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3695                          "openings == %d, active == %d\n",
3696                          devq->alloc_queue.qfrozen_cnt,
3697                          devq->alloc_queue.entries,
3698                          devq->alloc_openings,
3699                          devq->alloc_active));
3700
3701         crit_enter();
3702         devq->alloc_queue.qfrozen_cnt++;
3703         while ((devq->alloc_queue.entries > 0)
3704             && (devq->alloc_openings > 0)
3705             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3706                 struct  cam_ed_qinfo *qinfo;
3707                 struct  cam_ed *device;
3708                 union   ccb *work_ccb;
3709                 struct  cam_periph *drv;
3710                 struct  camq *drvq;
3711                 
3712                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3713                                                            CAMQ_HEAD);
3714                 device = qinfo->device;
3715
3716                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3717                                 ("running device %p\n", device));
3718
3719                 drvq = &device->drvq;
3720
3721 #ifdef CAMDEBUG
3722                 if (drvq->entries <= 0) {
3723                         panic("xpt_run_dev_allocq: "
3724                               "Device on queue without any work to do");
3725                 }
3726 #endif
3727                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3728                         devq->alloc_openings--;
3729                         devq->alloc_active++;
3730                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3731                         crit_exit();
3732                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3733                                       drv->pinfo.priority);
3734                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3735                                         ("calling periph start\n"));
3736                         drv->periph_start(drv, work_ccb);
3737                 } else {
3738                         /*
3739                          * Malloc failure in alloc_ccb
3740                          */
3741                         /*
3742                          * XXX add us to a list to be run from free_ccb
3743                          * if we don't have any ccbs active on this
3744                          * device queue otherwise we may never get run
3745                          * again.
3746                          */
3747                         break;
3748                 }
3749         
3750                 /* Raise IPL for possible insertion and test at top of loop */
3751                 crit_enter();
3752
3753                 if (drvq->entries > 0) {
3754                         /* We have more work.  Attempt to reschedule */
3755                         xpt_schedule_dev_allocq(bus, device);
3756                 }
3757         }
3758         devq->alloc_queue.qfrozen_cnt--;
3759         crit_exit();
3760 }
3761
3762 static void
3763 xpt_run_dev_sendq(struct cam_eb *bus)
3764 {
3765         struct  cam_devq *devq;
3766
3767         if ((devq = bus->sim->devq) == NULL) {
3768                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3769                 return;
3770         }
3771         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3772
3773         crit_enter();
3774         devq->send_queue.qfrozen_cnt++;
3775         while ((devq->send_queue.entries > 0)
3776             && (devq->send_openings > 0)) {
3777                 struct  cam_ed_qinfo *qinfo;
3778                 struct  cam_ed *device;
3779                 union ccb *work_ccb;
3780                 struct  cam_sim *sim;
3781
3782                 if (devq->send_queue.qfrozen_cnt > 1) {
3783                         break;
3784                 }
3785
3786                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3787                                                            CAMQ_HEAD);
3788                 device = qinfo->device;
3789
3790                 /*
3791                  * If the device has been "frozen", don't attempt
3792                  * to run it.
3793                  */
3794                 if (device->qfrozen_cnt > 0) {
3795                         continue;
3796                 }
3797
3798                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3799                                 ("running device %p\n", device));
3800
3801                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3802                 if (work_ccb == NULL) {
3803                         kprintf("device on run queue with no ccbs???\n");
3804                         continue;
3805                 }
3806
3807                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3808
3809                         if (num_highpower <= 0) {
3810                                 /*
3811                                  * We got a high power command, but we
3812                                  * don't have any available slots.  Freeze
3813                                  * the device queue until we have a slot
3814                                  * available.
3815                                  */
3816                                 device->qfrozen_cnt++;
3817                                 STAILQ_INSERT_TAIL(&highpowerq, 
3818                                                    &work_ccb->ccb_h, 
3819                                                    xpt_links.stqe);
3820
3821                                 continue;
3822                         } else {
3823                                 /*
3824                                  * Consume a high power slot while
3825                                  * this ccb runs.
3826                                  */
3827                                 num_highpower--;
3828                         }
3829                 }
3830                 devq->active_dev = device;
3831                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3832
3833                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3834
3835                 devq->send_openings--;
3836                 devq->send_active++;            
3837                 
3838                 if (device->ccbq.queue.entries > 0)
3839                         xpt_schedule_dev_sendq(bus, device);
3840
3841                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3842                         /*
3843                          * The client wants to freeze the queue
3844                          * after this CCB is sent.
3845                          */
3846                         device->qfrozen_cnt++;
3847                 }
3848
3849                 /* In Target mode, the peripheral driver knows best... */
3850                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3851                         if ((device->inq_flags & SID_CmdQue) != 0
3852                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3853                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3854                         else
3855                                 /*
3856                                  * Clear this in case of a retried CCB that
3857                                  * failed due to a rejected tag.
3858                                  */
3859                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3860                 }
3861
3862                 /*
3863                  * Device queues can be shared among multiple sim instances
3864                  * that reside on different busses.  Use the SIM in the queue
3865                  * CCB's path, rather than the one in the bus that was passed
3866                  * into this function.
3867                  */
3868                 sim = work_ccb->ccb_h.path->bus->sim;
3869                 (*(sim->sim_action))(sim, work_ccb);
3870
3871                 devq->active_dev = NULL;
3872                 /* Raise IPL for possible insertion and test at top of loop */
3873         }
3874         devq->send_queue.qfrozen_cnt--;
3875         crit_exit();
3876 }
3877
3878 /*
3879  * This function merges stuff from the slave ccb into the master ccb, while
3880  * keeping important fields in the master ccb constant.
3881  */
3882 void
3883 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3884 {
3885         /*
3886          * Pull fields that are valid for peripheral drivers to set
3887          * into the master CCB along with the CCB "payload".
3888          */
3889         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3890         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3891         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3892         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3893         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3894               sizeof(union ccb) - sizeof(struct ccb_hdr));
3895 }
3896
3897 void
3898 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3899 {
3900         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3901         callout_init(&ccb_h->timeout_ch);
3902         ccb_h->pinfo.priority = priority;
3903         ccb_h->path = path;
3904         ccb_h->path_id = path->bus->path_id;
3905         if (path->target)
3906                 ccb_h->target_id = path->target->target_id;
3907         else
3908                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3909         if (path->device) {
3910                 ccb_h->target_lun = path->device->lun_id;
3911                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3912         } else {
3913                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3914         }
3915         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3916         ccb_h->flags = 0;
3917 }
3918
3919 /* Path manipulation functions */
3920 cam_status
3921 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3922                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3923 {
3924         struct     cam_path *path;
3925         cam_status status;
3926
3927         path = kmalloc(sizeof(*path), M_CAMXPT, M_INTWAIT);
3928         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3929         if (status != CAM_REQ_CMP) {
3930                 kfree(path, M_CAMXPT);
3931                 path = NULL;
3932         }
3933         *new_path_ptr = path;
3934         return (status);
3935 }
3936
3937 static cam_status
3938 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3939                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3940 {
3941         struct       cam_eb *bus;
3942         struct       cam_et *target;
3943         struct       cam_ed *device;
3944         cam_status   status;
3945
3946         status = CAM_REQ_CMP;   /* Completed without error */
3947         target = NULL;          /* Wildcarded */
3948         device = NULL;          /* Wildcarded */
3949
3950         /*
3951          * We will potentially modify the EDT, so block interrupts
3952          * that may attempt to create cam paths.
3953          */
3954         crit_enter();
3955         bus = xpt_find_bus(path_id);
3956         if (bus == NULL) {
3957                 status = CAM_PATH_INVALID;
3958         } else {
3959                 target = xpt_find_target(bus, target_id);
3960                 if (target == NULL) {
3961                         /* Create one */
3962                         struct cam_et *new_target;
3963
3964                         new_target = xpt_alloc_target(bus, target_id);
3965                         if (new_target == NULL) {
3966                                 status = CAM_RESRC_UNAVAIL;
3967                         } else {
3968                                 target = new_target;
3969                         }
3970                 }
3971                 if (target != NULL) {
3972                         device = xpt_find_device(target, lun_id);
3973                         if (device == NULL) {
3974                                 /* Create one */
3975                                 struct cam_ed *new_device;
3976
3977                                 new_device = xpt_alloc_device(bus,
3978                                                               target,
3979                                                               lun_id);
3980                                 if (new_device == NULL) {
3981                                         status = CAM_RESRC_UNAVAIL;
3982                                 } else {
3983                                         device = new_device;
3984                                 }
3985                         }
3986                 }
3987         }
3988         crit_exit();
3989
3990         /*
3991          * Only touch the user's data if we are successful.
3992          */
3993         if (status == CAM_REQ_CMP) {
3994                 new_path->periph = perph;
3995                 new_path->bus = bus;
3996                 new_path->target = target;
3997                 new_path->device = device;
3998                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3999         } else {
4000                 if (device != NULL)
4001                         xpt_release_device(bus, target, device);
4002                 if (target != NULL)
4003                         xpt_release_target(bus, target);
4004                 if (bus != NULL)
4005                         xpt_release_bus(bus);
4006         }
4007         return (status);
4008 }
4009
4010 static void
4011 xpt_release_path(struct cam_path *path)
4012 {
4013         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4014         if (path->device != NULL) {
4015                 xpt_release_device(path->bus, path->target, path->device);
4016                 path->device = NULL;
4017         }
4018         if (path->target != NULL) {
4019                 xpt_release_target(path->bus, path->target);
4020                 path->target = NULL;
4021         }
4022         if (path->bus != NULL) {
4023                 xpt_release_bus(path->bus);
4024                 path->bus = NULL;
4025         }
4026 }
4027
4028 void
4029 xpt_free_path(struct cam_path *path)
4030 {
4031         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4032         xpt_release_path(path);
4033         kfree(path, M_CAMXPT);
4034 }
4035
4036
4037 /*
4038  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4039  * in path1, 2 for match with wildcards in path2.
4040  */
4041 int
4042 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4043 {
4044         int retval = 0;
4045
4046         if (path1->bus != path2->bus) {
4047                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4048                         retval = 1;
4049                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4050                         retval = 2;
4051                 else
4052                         return (-1);
4053         }
4054         if (path1->target != path2->target) {
4055                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4056                         if (retval == 0)
4057                                 retval = 1;
4058                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4059                         retval = 2;
4060                 else
4061                         return (-1);
4062         }
4063         if (path1->device != path2->device) {
4064                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4065                         if (retval == 0)
4066                                 retval = 1;
4067                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4068                         retval = 2;
4069                 else
4070                         return (-1);
4071         }
4072         return (retval);
4073 }
4074
4075 void
4076 xpt_print_path(struct cam_path *path)
4077 {
4078         if (path == NULL)
4079                 kprintf("(nopath): ");
4080         else {
4081                 if (path->periph != NULL)
4082                         kprintf("(%s%d:", path->periph->periph_name,
4083                                path->periph->unit_number);
4084                 else
4085                         kprintf("(noperiph:");
4086
4087                 if (path->bus != NULL)
4088                         kprintf("%s%d:%d:", path->bus->sim->sim_name,
4089                                path->bus->sim->unit_number,
4090                                path->bus->sim->bus_id);
4091                 else
4092                         kprintf("nobus:");
4093
4094                 if (path->target != NULL)
4095                         kprintf("%d:", path->target->target_id);
4096                 else
4097                         kprintf("X:");
4098
4099                 if (path->device != NULL)
4100                         kprintf("%d): ", path->device->lun_id);
4101                 else
4102                         kprintf("X): ");
4103         }
4104 }
4105
4106 int
4107 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4108 {
4109         struct sbuf sb;
4110
4111         sbuf_new(&sb, str, str_len, 0);
4112
4113         if (path == NULL)
4114                 sbuf_printf(&sb, "(nopath): ");
4115         else {
4116                 if (path->periph != NULL)
4117                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4118                                     path->periph->unit_number);
4119                 else
4120                         sbuf_printf(&sb, "(noperiph:");
4121
4122                 if (path->bus != NULL)
4123                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4124                                     path->bus->sim->unit_number,
4125                                     path->bus->sim->bus_id);
4126                 else
4127                         sbuf_printf(&sb, "nobus:");
4128
4129                 if (path->target != NULL)
4130                         sbuf_printf(&sb, "%d:", path->target->target_id);
4131                 else
4132                         sbuf_printf(&sb, "X:");
4133
4134                 if (path->device != NULL)
4135                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
4136                 else
4137                         sbuf_printf(&sb, "X): ");
4138         }
4139         sbuf_finish(&sb);
4140
4141         return(sbuf_len(&sb));
4142 }
4143
4144 path_id_t
4145 xpt_path_path_id(struct cam_path *path)
4146 {
4147         return(path->bus->path_id);
4148 }
4149
4150 target_id_t
4151 xpt_path_target_id(struct cam_path *path)
4152 {
4153         if (path->target != NULL)
4154                 return (path->target->target_id);
4155         else
4156                 return (CAM_TARGET_WILDCARD);
4157 }
4158
4159 lun_id_t
4160 xpt_path_lun_id(struct cam_path *path)
4161 {
4162         if (path->device != NULL)
4163                 return (path->device->lun_id);
4164         else
4165                 return (CAM_LUN_WILDCARD);
4166 }
4167
4168 struct cam_sim *
4169 xpt_path_sim(struct cam_path *path)
4170 {
4171         return (path->bus->sim);
4172 }
4173
4174 struct cam_periph*
4175 xpt_path_periph(struct cam_path *path)
4176 {
4177         return (path->periph);
4178 }
4179
4180 /*
4181  * Release a CAM control block for the caller.  Remit the cost of the structure
4182  * to the device referenced by the path.  If the this device had no 'credits'
4183  * and peripheral drivers have registered async callbacks for this notification
4184  * call them now.
4185  */
4186 void
4187 xpt_release_ccb(union ccb *free_ccb)
4188 {
4189         struct   cam_path *path;
4190         struct   cam_ed *device;
4191         struct   cam_eb *bus;
4192
4193         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4194         path = free_ccb->ccb_h.path;
4195         device = path->device;
4196         bus = path->bus;
4197         crit_enter();
4198         cam_ccbq_release_opening(&device->ccbq);
4199         if (xpt_ccb_count > xpt_max_ccbs) {
4200                 xpt_free_ccb(free_ccb);
4201                 xpt_ccb_count--;
4202         } else {
4203                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4204         }
4205         if (bus->sim->devq == NULL) {
4206                 crit_exit();
4207                 return;
4208         }
4209         bus->sim->devq->alloc_openings++;
4210         bus->sim->devq->alloc_active--;
4211         /* XXX Turn this into an inline function - xpt_run_device?? */
4212         if ((device_is_alloc_queued(device) == 0)
4213          && (device->drvq.entries > 0)) {
4214                 xpt_schedule_dev_allocq(bus, device);
4215         }
4216         crit_exit();
4217         if (bus->sim->devq && dev_allocq_is_runnable(bus->sim->devq))
4218                 xpt_run_dev_allocq(bus);
4219 }
4220
4221 /* Functions accessed by SIM drivers */
4222
4223 /*
4224  * A sim structure, listing the SIM entry points and instance
4225  * identification info is passed to xpt_bus_register to hook the SIM
4226  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4227  * for this new bus and places it in the array of busses and assigns
4228  * it a path_id.  The path_id may be influenced by "hard wiring"
4229  * information specified by the user.  Once interrupt services are
4230  * availible, the bus will be probed.
4231  */
4232 int32_t
4233 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4234 {
4235         struct cam_eb *new_bus;
4236         struct cam_eb *old_bus;
4237         struct ccb_pathinq cpi;
4238
4239         sim->bus_id = bus;
4240         new_bus = kmalloc(sizeof(*new_bus), M_CAMXPT, M_INTWAIT);
4241
4242         if (strcmp(sim->sim_name, "xpt") != 0) {
4243                 sim->path_id =
4244                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4245         }
4246
4247         TAILQ_INIT(&new_bus->et_entries);
4248         new_bus->path_id = sim->path_id;
4249         new_bus->sim = sim;
4250         ++sim->refcount;
4251         timevalclear(&new_bus->last_reset);
4252         new_bus->flags = 0;
4253         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4254         new_bus->generation = 0;
4255         crit_enter();
4256         old_bus = TAILQ_FIRST(&xpt_busses);
4257         while (old_bus != NULL
4258             && old_bus->path_id < new_bus->path_id)
4259                 old_bus = TAILQ_NEXT(old_bus, links);
4260         if (old_bus != NULL)
4261                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4262         else
4263                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4264         bus_generation++;
4265         crit_exit();
4266
4267         /* Notify interested parties */
4268         if (sim->path_id != CAM_XPT_PATH_ID) {
4269                 struct cam_path path;
4270
4271                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4272                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4273                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4274                 cpi.ccb_h.func_code = XPT_PATH_INQ;
4275                 xpt_action((union ccb *)&cpi);
4276                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4277                 xpt_release_path(&path);
4278         }
4279         return (CAM_SUCCESS);
4280 }
4281
4282 /*
4283  * Deregister a bus.  We must clean out all transactions pending on the bus.
4284  * This routine is typically called prior to cam_sim_free() (e.g. see
4285  * dev/usbmisc/umass/umass.c)
4286  */
4287 int32_t
4288 xpt_bus_deregister(path_id_t pathid)
4289 {
4290         struct cam_path bus_path;
4291         struct cam_ed *device;
4292         struct cam_ed_qinfo *qinfo;
4293         struct cam_devq *devq;
4294         struct cam_periph *periph;
4295         struct cam_sim *ccbsim;
4296         union ccb *work_ccb;
4297         cam_status status;
4298
4299         status = xpt_compile_path(&bus_path, NULL, pathid,
4300                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4301         if (status != CAM_REQ_CMP)
4302                 return (status);
4303
4304         /*
4305          * This should clear out all pending requests and timeouts, but
4306          * the ccb's may be queued to a software interrupt.
4307          *
4308          * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4309          * and it really ought to.
4310          */
4311         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4312         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4313
4314         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4315         devq = bus_path.bus->sim->devq;
4316         bus_path.bus->sim = &cam_dead_sim;
4317
4318         /* Execute any pending operations now. */
4319         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4320             CAMQ_HEAD)) != NULL ||
4321             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4322             CAMQ_HEAD)) != NULL) {
4323                 do {
4324                         device = qinfo->device;
4325                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4326                         if (work_ccb != NULL) {
4327                                 devq->active_dev = device;
4328                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4329                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4330                                 ccbsim = work_ccb->ccb_h.path->bus->sim;
4331                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4332                         }
4333
4334                         periph = (struct cam_periph *)camq_remove(&device->drvq,
4335                             CAMQ_HEAD);
4336                         if (periph != NULL)
4337                                 xpt_schedule(periph, periph->pinfo.priority);
4338                 } while (work_ccb != NULL || periph != NULL);
4339         }
4340
4341         /* Make sure all completed CCBs are processed. */
4342         while (!TAILQ_EMPTY(&cam_bioq)) {
4343                 camisr(&cam_bioq);
4344
4345                 /* Repeat the async's for the benefit of any new devices. */
4346                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4347                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4348         }
4349
4350         /* Release the reference count held while registered. */
4351         xpt_release_bus(bus_path.bus);
4352         xpt_release_path(&bus_path);
4353
4354         /* Recheck for more completed CCBs. */
4355         while (!TAILQ_EMPTY(&cam_bioq))
4356                 camisr(&cam_bioq);
4357
4358         return (CAM_REQ_CMP);
4359 }
4360
4361 static path_id_t
4362 xptnextfreepathid(void)
4363 {
4364         struct cam_eb *bus;
4365         path_id_t pathid;
4366         char *strval;
4367
4368         pathid = 0;
4369         bus = TAILQ_FIRST(&xpt_busses);
4370 retry:
4371         /* Find an unoccupied pathid */
4372         while (bus != NULL
4373             && bus->path_id <= pathid) {
4374                 if (bus->path_id == pathid)
4375                         pathid++;
4376                 bus = TAILQ_NEXT(bus, links);
4377         }
4378
4379         /*
4380          * Ensure that this pathid is not reserved for
4381          * a bus that may be registered in the future.
4382          */
4383         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4384                 ++pathid;
4385                 /* Start the search over */
4386                 goto retry;
4387         }
4388         return (pathid);
4389 }
4390
4391 static path_id_t
4392 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4393 {
4394         path_id_t pathid;
4395         int i, dunit, val;
4396         char buf[32];
4397
4398         pathid = CAM_XPT_PATH_ID;
4399         ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4400         i = -1;
4401         while ((i = resource_query_string(i, "at", buf)) != -1) {
4402                 if (strcmp(resource_query_name(i), "scbus")) {
4403                         /* Avoid a bit of foot shooting. */
4404                         continue;
4405                 }
4406                 dunit = resource_query_unit(i);
4407                 if (dunit < 0)          /* unwired?! */
4408                         continue;
4409                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4410                         if (sim_bus == val) {
4411                                 pathid = dunit;
4412                                 break;
4413                         }
4414                 } else if (sim_bus == 0) {
4415                         /* Unspecified matches bus 0 */
4416                         pathid = dunit;
4417                         break;
4418                 } else {
4419                         kprintf("Ambiguous scbus configuration for %s%d "
4420                                "bus %d, cannot wire down.  The kernel "
4421                                "config entry for scbus%d should "
4422                                "specify a controller bus.\n"
4423                                "Scbus will be assigned dynamically.\n",
4424                                sim_name, sim_unit, sim_bus, dunit);
4425                         break;
4426                 }
4427         }
4428
4429         if (pathid == CAM_XPT_PATH_ID)
4430                 pathid = xptnextfreepathid();
4431         return (pathid);
4432 }
4433
4434 void
4435 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4436 {
4437         struct cam_eb *bus;
4438         struct cam_et *target, *next_target;
4439         struct cam_ed *device, *next_device;
4440
4441         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4442
4443         /*
4444          * Most async events come from a CAM interrupt context.  In
4445          * a few cases, the error recovery code at the peripheral layer,
4446          * which may run from our SWI or a process context, may signal
4447          * deferred events with a call to xpt_async. Ensure async
4448          * notifications are serialized by blocking cam interrupts.
4449          */
4450         crit_enter();
4451
4452         bus = path->bus;
4453
4454         if (async_code == AC_BUS_RESET) { 
4455                 /* Update our notion of when the last reset occurred */
4456                 microuptime(&bus->last_reset);
4457         }
4458
4459         for (target = TAILQ_FIRST(&bus->et_entries);
4460              target != NULL;
4461              target = next_target) {
4462
4463                 next_target = TAILQ_NEXT(target, links);
4464
4465                 if (path->target != target
4466                  && path->target->target_id != CAM_TARGET_WILDCARD
4467                  && target->target_id != CAM_TARGET_WILDCARD)
4468                         continue;
4469
4470                 if (async_code == AC_SENT_BDR) {
4471                         /* Update our notion of when the last reset occurred */
4472                         microuptime(&path->target->last_reset);
4473                 }
4474
4475                 for (device = TAILQ_FIRST(&target->ed_entries);
4476                      device != NULL;
4477                      device = next_device) {
4478
4479                         next_device = TAILQ_NEXT(device, links);
4480
4481                         if (path->device != device 
4482                          && path->device->lun_id != CAM_LUN_WILDCARD
4483                          && device->lun_id != CAM_LUN_WILDCARD)
4484                                 continue;
4485
4486                         xpt_dev_async(async_code, bus, target,
4487                                       device, async_arg);
4488
4489                         xpt_async_bcast(&device->asyncs, async_code,
4490                                         path, async_arg);
4491                 }
4492         }
4493         
4494         /*
4495          * If this wasn't a fully wildcarded async, tell all
4496          * clients that want all async events.
4497          */
4498         if (bus != xpt_periph->path->bus)
4499                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4500                                 path, async_arg);
4501         crit_exit();
4502 }
4503
4504 static void
4505 xpt_async_bcast(struct async_list *async_head,
4506                 u_int32_t async_code,
4507                 struct cam_path *path, void *async_arg)
4508 {
4509         struct async_node *cur_entry;
4510
4511         cur_entry = SLIST_FIRST(async_head);
4512         while (cur_entry != NULL) {
4513                 struct async_node *next_entry;
4514                 /*
4515                  * Grab the next list entry before we call the current
4516                  * entry's callback.  This is because the callback function
4517                  * can delete its async callback entry.
4518                  */
4519                 next_entry = SLIST_NEXT(cur_entry, links);
4520                 if ((cur_entry->event_enable & async_code) != 0)
4521                         cur_entry->callback(cur_entry->callback_arg,
4522                                             async_code, path,
4523                                             async_arg);
4524                 cur_entry = next_entry;
4525         }
4526 }
4527
4528 /*
4529  * Handle any per-device event notifications that require action by the XPT.
4530  */
4531 static void
4532 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4533               struct cam_ed *device, void *async_arg)
4534 {
4535         cam_status status;
4536         struct cam_path newpath;
4537
4538         /*
4539          * We only need to handle events for real devices.
4540          */
4541         if (target->target_id == CAM_TARGET_WILDCARD
4542          || device->lun_id == CAM_LUN_WILDCARD)
4543                 return;
4544
4545         /*
4546          * We need our own path with wildcards expanded to
4547          * handle certain types of events.
4548          */
4549         if ((async_code == AC_SENT_BDR)
4550          || (async_code == AC_BUS_RESET)
4551          || (async_code == AC_INQ_CHANGED))
4552                 status = xpt_compile_path(&newpath, NULL,
4553                                           bus->path_id,
4554                                           target->target_id,
4555                                           device->lun_id);
4556         else
4557                 status = CAM_REQ_CMP_ERR;
4558
4559         if (status == CAM_REQ_CMP) {
4560
4561                 /*
4562                  * Allow transfer negotiation to occur in a
4563                  * tag free environment.
4564                  */
4565                 if (async_code == AC_SENT_BDR
4566                  || async_code == AC_BUS_RESET)
4567                         xpt_toggle_tags(&newpath);
4568
4569                 if (async_code == AC_INQ_CHANGED) {
4570                         /*
4571                          * We've sent a start unit command, or
4572                          * something similar to a device that
4573                          * may have caused its inquiry data to
4574                          * change. So we re-scan the device to
4575                          * refresh the inquiry data for it.
4576                          */
4577                         xpt_scan_lun(newpath.periph, &newpath,
4578                                      CAM_EXPECT_INQ_CHANGE, NULL);
4579                 }
4580                 xpt_release_path(&newpath);
4581         } else if (async_code == AC_LOST_DEVICE) {
4582                 /*
4583                  * When we lose a device the device may be about to detach
4584                  * the sim, we have to clear out all pending timeouts and
4585                  * requests before that happens.  XXX it would be nice if
4586                  * we could abort the requests pertaining to the device.
4587                  */
4588                 xpt_release_devq_timeout(device);
4589                 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4590                         device->flags |= CAM_DEV_UNCONFIGURED;
4591                         xpt_release_device(bus, target, device);
4592                 }
4593         } else if (async_code == AC_TRANSFER_NEG) {
4594                 struct ccb_trans_settings *settings;
4595
4596                 settings = (struct ccb_trans_settings *)async_arg;
4597                 xpt_set_transfer_settings(settings, device,
4598                                           /*async_update*/TRUE);
4599         }
4600 }
4601
4602 u_int32_t
4603 xpt_freeze_devq(struct cam_path *path, u_int count)
4604 {
4605         struct ccb_hdr *ccbh;
4606
4607         crit_enter();
4608         path->device->qfrozen_cnt += count;
4609
4610         /*
4611          * Mark the last CCB in the queue as needing
4612          * to be requeued if the driver hasn't
4613          * changed it's state yet.  This fixes a race
4614          * where a ccb is just about to be queued to
4615          * a controller driver when it's interrupt routine
4616          * freezes the queue.  To completly close the
4617          * hole, controller drives must check to see
4618          * if a ccb's status is still CAM_REQ_INPROG
4619          * under critical section protection just before they queue
4620          * the CCB.  See ahc_action/ahc_freeze_devq for
4621          * an example.
4622          */
4623         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4624         if (ccbh && ccbh->status == CAM_REQ_INPROG)
4625                 ccbh->status = CAM_REQUEUE_REQ;
4626         crit_exit();
4627         return (path->device->qfrozen_cnt);
4628 }
4629
4630 u_int32_t
4631 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4632 {
4633         if (sim->devq == NULL)
4634                 return(count);
4635         sim->devq->send_queue.qfrozen_cnt += count;
4636         if (sim->devq->active_dev != NULL) {
4637                 struct ccb_hdr *ccbh;
4638                 
4639                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4640                                   ccb_hdr_tailq);
4641                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4642                         ccbh->status = CAM_REQUEUE_REQ;
4643         }
4644         return (sim->devq->send_queue.qfrozen_cnt);
4645 }
4646
4647 /*
4648  * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4649  * We ref-count the sim (and the bus only NULLs it out when the bus has been
4650  * freed, which is not the case here), but the device queue is also freed XXX
4651  * and we have to check that here.
4652  *
4653  * XXX fixme: could we simply not null-out the device queue via 
4654  * cam_sim_free()?
4655  */
4656 static void
4657 xpt_release_devq_timeout(void *arg)
4658 {
4659         struct cam_ed *device;
4660
4661         device = (struct cam_ed *)arg;
4662
4663         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4664 }
4665
4666 void
4667 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4668 {
4669         xpt_release_devq_device(path->device, count, run_queue);
4670 }
4671
4672 static void
4673 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4674 {
4675         int     rundevq;
4676
4677         rundevq = 0;
4678         crit_enter();
4679
4680         if (dev->qfrozen_cnt > 0) {
4681
4682                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4683                 dev->qfrozen_cnt -= count;
4684                 if (dev->qfrozen_cnt == 0) {
4685
4686                         /*
4687                          * No longer need to wait for a successful
4688                          * command completion.
4689                          */
4690                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4691
4692                         /*
4693                          * Remove any timeouts that might be scheduled
4694                          * to release this queue.
4695                          */
4696                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4697                                 callout_stop(&dev->c_handle);
4698                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4699                         }
4700
4701                         /*
4702                          * Now that we are unfrozen schedule the
4703                          * device so any pending transactions are
4704                          * run.
4705                          */
4706                         if ((dev->ccbq.queue.entries > 0)
4707                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4708                          && (run_queue != 0)) {
4709                                 rundevq = 1;
4710                         }
4711                 }
4712         }
4713         if (rundevq != 0)
4714                 xpt_run_dev_sendq(dev->target->bus);
4715         crit_exit();
4716 }
4717
4718 void
4719 xpt_release_simq(struct cam_sim *sim, int run_queue)
4720 {
4721         struct  camq *sendq;
4722
4723         if (sim->devq == NULL)
4724                 return;
4725
4726         sendq = &(sim->devq->send_queue);
4727         crit_enter();
4728
4729         if (sendq->qfrozen_cnt > 0) {
4730                 sendq->qfrozen_cnt--;
4731                 if (sendq->qfrozen_cnt == 0) {
4732                         struct cam_eb *bus;
4733
4734                         /*
4735                          * If there is a timeout scheduled to release this
4736                          * sim queue, remove it.  The queue frozen count is
4737                          * already at 0.
4738                          */
4739                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4740                                 callout_stop(&sim->c_handle);
4741                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4742                         }
4743                         bus = xpt_find_bus(sim->path_id);
4744                         crit_exit();
4745
4746                         if (run_queue) {
4747                                 /*
4748                                  * Now that we are unfrozen run the send queue.
4749                                  */
4750                                 xpt_run_dev_sendq(bus);
4751                         }
4752                         xpt_release_bus(bus);
4753                 } else {
4754                         crit_exit();
4755                 }
4756         } else {
4757                 crit_exit();
4758         }
4759 }
4760
4761 void
4762 xpt_done(union ccb *done_ccb)
4763 {
4764         crit_enter();
4765
4766         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4767         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4768                 /*
4769                  * Queue up the request for handling by our SWI handler
4770                  * any of the "non-immediate" type of ccbs.
4771                  */
4772                 switch (done_ccb->ccb_h.path->periph->type) {
4773                 case CAM_PERIPH_BIO:
4774                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4775                                           sim_links.tqe);
4776                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4777                         setsoftcambio();
4778                         break;
4779                 default:
4780                         panic("unknown periph type %d",
4781                                 done_ccb->ccb_h.path->periph->type);
4782                 }
4783         }
4784         crit_exit();
4785 }
4786
4787 union ccb *
4788 xpt_alloc_ccb(void)
4789 {
4790         union ccb *new_ccb;
4791
4792         new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT);
4793         return (new_ccb);
4794 }
4795
4796 void
4797 xpt_free_ccb(union ccb *free_ccb)
4798 {
4799         kfree(free_ccb, M_CAMXPT);
4800 }
4801
4802
4803
4804 /* Private XPT functions */
4805
4806 /*
4807  * Get a CAM control block for the caller. Charge the structure to the device
4808  * referenced by the path.  If the this device has no 'credits' then the
4809  * device already has the maximum number of outstanding operations under way
4810  * and we return NULL. If we don't have sufficient resources to allocate more
4811  * ccbs, we also return NULL.
4812  */
4813 static union ccb *
4814 xpt_get_ccb(struct cam_ed *device)
4815 {
4816         union ccb *new_ccb;
4817
4818         crit_enter();
4819         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4820                 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT);
4821                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4822                                   xpt_links.sle);
4823                 xpt_ccb_count++;
4824         }
4825         cam_ccbq_take_opening(&device->ccbq);
4826         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4827         crit_exit();
4828         return (new_ccb);
4829 }
4830
4831 static void
4832 xpt_release_bus(struct cam_eb *bus)
4833 {
4834
4835         crit_enter();
4836         if (bus->refcount == 1) {
4837                 KKASSERT(TAILQ_FIRST(&bus->et_entries) == NULL);
4838                 TAILQ_REMOVE(&xpt_busses, bus, links);
4839                 if (bus->sim) {
4840                         cam_sim_release(bus->sim, 0);
4841                         bus->sim = NULL;
4842                 }
4843                 bus_generation++;
4844                 KKASSERT(bus->refcount == 1);
4845                 kfree(bus, M_CAMXPT);
4846         } else {
4847                 --bus->refcount;
4848         }
4849         crit_exit();
4850 }
4851
4852 static struct cam_et *
4853 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4854 {
4855         struct cam_et *target;
4856         struct cam_et *cur_target;
4857
4858         target = kmalloc(sizeof(*target), M_CAMXPT, M_INTWAIT);
4859
4860         TAILQ_INIT(&target->ed_entries);
4861         target->bus = bus;
4862         target->target_id = target_id;
4863         target->refcount = 1;
4864         target->generation = 0;
4865         timevalclear(&target->last_reset);
4866         /*
4867          * Hold a reference to our parent bus so it
4868          * will not go away before we do.
4869          */
4870         bus->refcount++;
4871
4872         /* Insertion sort into our bus's target list */
4873         cur_target = TAILQ_FIRST(&bus->et_entries);
4874         while (cur_target != NULL && cur_target->target_id < target_id)
4875                 cur_target = TAILQ_NEXT(cur_target, links);
4876
4877         if (cur_target != NULL) {
4878                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4879         } else {
4880                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4881         }
4882         bus->generation++;
4883         return (target);
4884 }
4885
4886 static void
4887 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4888 {
4889         crit_enter();
4890         if (target->refcount == 1) {
4891                 KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
4892                 TAILQ_REMOVE(&bus->et_entries, target, links);
4893                 bus->generation++;
4894                 xpt_release_bus(bus);
4895                 KKASSERT(target->refcount == 1);
4896                 kfree(target, M_CAMXPT);
4897         } else {
4898                 --target->refcount;
4899         }
4900         crit_exit();
4901 }
4902
4903 static struct cam_ed *
4904 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4905 {
4906 #ifdef CAM_NEW_TRAN_CODE
4907         struct     cam_path path;
4908 #endif /* CAM_NEW_TRAN_CODE */
4909         struct     cam_ed *device;
4910         struct     cam_devq *devq;
4911         cam_status status;
4912
4913         if (SIM_DEAD(bus->sim))
4914                 return (NULL);
4915
4916         /* Make space for us in the device queue on our bus */
4917         if (bus->sim->devq == NULL)
4918                 return(NULL);
4919         devq = bus->sim->devq;
4920         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4921
4922         if (status != CAM_REQ_CMP) {
4923                 device = NULL;
4924         } else {
4925                 device = kmalloc(sizeof(*device), M_CAMXPT, M_INTWAIT);
4926         }
4927
4928         if (device != NULL) {
4929                 struct cam_ed *cur_device;
4930
4931                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4932                 device->alloc_ccb_entry.device = device;
4933                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4934                 device->send_ccb_entry.device = device;
4935                 device->target = target;
4936                 device->lun_id = lun_id;
4937                 /* Initialize our queues */
4938                 if (camq_init(&device->drvq, 0) != 0) {
4939                         kfree(device, M_CAMXPT);
4940                         return (NULL);
4941                 }
4942                 if (cam_ccbq_init(&device->ccbq,
4943                                   bus->sim->max_dev_openings) != 0) {
4944                         camq_fini(&device->drvq);
4945                         kfree(device, M_CAMXPT);
4946                         return (NULL);
4947                 }
4948                 SLIST_INIT(&device->asyncs);
4949                 SLIST_INIT(&device->periphs);
4950                 device->generation = 0;
4951                 device->owner = NULL;
4952                 /*
4953                  * Take the default quirk entry until we have inquiry
4954                  * data and can determine a better quirk to use.
4955                  */
4956                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4957                 bzero(&device->inq_data, sizeof(device->inq_data));
4958                 device->inq_flags = 0;
4959                 device->queue_flags = 0;
4960                 device->serial_num = NULL;
4961                 device->serial_num_len = 0;
4962                 device->qfrozen_cnt = 0;
4963                 device->flags = CAM_DEV_UNCONFIGURED;
4964                 device->tag_delay_count = 0;
4965                 device->tag_saved_openings = 0;
4966                 device->refcount = 1;
4967                 callout_init(&device->c_handle);
4968
4969                 /*
4970                  * Hold a reference to our parent target so it
4971                  * will not go away before we do.
4972                  */
4973                 target->refcount++;
4974
4975                 /*
4976                  * XXX should be limited by number of CCBs this bus can
4977                  * do.
4978                  */
4979                 xpt_max_ccbs += device->ccbq.devq_openings;
4980                 /* Insertion sort into our target's device list */
4981                 cur_device = TAILQ_FIRST(&target->ed_entries);
4982                 while (cur_device != NULL && cur_device->lun_id < lun_id)
4983                         cur_device = TAILQ_NEXT(cur_device, links);
4984                 if (cur_device != NULL) {
4985                         TAILQ_INSERT_BEFORE(cur_device, device, links);
4986                 } else {
4987                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4988                 }
4989                 target->generation++;
4990 #ifdef CAM_NEW_TRAN_CODE
4991                 if (lun_id != CAM_LUN_WILDCARD) {
4992                         xpt_compile_path(&path,
4993                                          NULL,
4994                                          bus->path_id,
4995                                          target->target_id,
4996                                          lun_id);
4997                         xpt_devise_transport(&path);
4998                         xpt_release_path(&path);
4999                 }
5000 #endif /* CAM_NEW_TRAN_CODE */
5001         }
5002         return (device);
5003 }
5004
5005 static void
5006 xpt_reference_device(struct cam_ed *device)
5007 {
5008         ++device->refcount;
5009 }
5010
5011 static void
5012 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5013                    struct cam_ed *device)
5014 {
5015         struct cam_devq *devq;
5016
5017         crit_enter();
5018         if (device->refcount == 1) {
5019                 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
5020
5021                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5022                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5023                         panic("Removing device while still queued for ccbs");
5024
5025                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
5026                         device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
5027                         callout_stop(&device->c_handle);
5028                 }
5029
5030                 TAILQ_REMOVE(&target->ed_entries, device,links);
5031                 target->generation++;
5032                 xpt_max_ccbs -= device->ccbq.devq_openings;
5033                 if (!SIM_DEAD(bus->sim)) {
5034                         /* Release our slot in the devq */
5035                         devq = bus->sim->devq;
5036                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5037                 }
5038                 camq_fini(&device->drvq);
5039                 camq_fini(&device->ccbq.queue);
5040                 xpt_release_target(bus, target);
5041                 KKASSERT(device->refcount == 1);
5042                 kfree(device, M_CAMXPT);
5043         } else {
5044                 --device->refcount;
5045         }
5046         crit_exit();
5047 }
5048
5049 static u_int32_t
5050 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5051 {
5052         int     diff;
5053         int     result;
5054         struct  cam_ed *dev;
5055
5056         dev = path->device;
5057
5058         crit_enter();
5059
5060         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5061         result = cam_ccbq_resize(&dev->ccbq, newopenings);
5062         if (result == CAM_REQ_CMP && (diff < 0)) {
5063                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5064         }
5065         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5066          || (dev->inq_flags & SID_CmdQue) != 0)
5067                 dev->tag_saved_openings = newopenings;
5068         /* Adjust the global limit */
5069         xpt_max_ccbs += diff;
5070         crit_exit();
5071         return (result);
5072 }
5073
5074 static struct cam_eb *
5075 xpt_find_bus(path_id_t path_id)
5076 {
5077         struct cam_eb *bus;
5078
5079         TAILQ_FOREACH(bus, &xpt_busses, links) {
5080                 if (bus->path_id == path_id) {
5081                         bus->refcount++;
5082                         break;
5083                 }
5084         }
5085         return (bus);
5086 }
5087
5088 static struct cam_et *
5089 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5090 {
5091         struct cam_et *target;
5092
5093         TAILQ_FOREACH(target, &bus->et_entries, links) {
5094                 if (target->target_id == target_id) {
5095                         target->refcount++;
5096                         break;
5097                 }
5098         }
5099         return (target);
5100 }
5101
5102 static struct cam_ed *
5103 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5104 {
5105         struct cam_ed *device;
5106
5107         TAILQ_FOREACH(device, &target->ed_entries, links) {
5108                 if (device->lun_id == lun_id) {
5109                         device->refcount++;
5110                         break;
5111                 }
5112         }
5113         return (device);
5114 }
5115
5116 typedef struct {
5117         union   ccb *request_ccb;
5118         struct  ccb_pathinq *cpi;
5119         int     pending_count;
5120 } xpt_scan_bus_info;
5121
5122 /*
5123  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5124  * As the scan progresses, xpt_scan_bus is used as the
5125  * callback on completion function.
5126  */
5127 static void
5128 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5129 {
5130         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5131                   ("xpt_scan_bus\n"));
5132         switch (request_ccb->ccb_h.func_code) {
5133         case XPT_SCAN_BUS:
5134         {
5135                 xpt_scan_bus_info *scan_info;
5136                 union   ccb *work_ccb;
5137                 struct  cam_path *path;
5138                 u_int   i;
5139                 u_int   max_target;
5140                 u_int   initiator_id;
5141
5142                 /* Find out the characteristics of the bus */
5143                 work_ccb = xpt_alloc_ccb();
5144                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5145                               request_ccb->ccb_h.pinfo.priority);
5146                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5147                 xpt_action(work_ccb);
5148                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5149                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5150                         xpt_free_ccb(work_ccb);
5151                         xpt_done(request_ccb);
5152                         return;
5153                 }
5154
5155                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5156                         /*
5157                          * Can't scan the bus on an adapter that
5158                          * cannot perform the initiator role.
5159                          */
5160                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5161                         xpt_free_ccb(work_ccb);
5162                         xpt_done(request_ccb);
5163                         return;
5164                 }
5165
5166                 /* Save some state for use while we probe for devices */
5167                 scan_info = (xpt_scan_bus_info *)
5168                     kmalloc(sizeof(xpt_scan_bus_info), M_TEMP, M_INTWAIT);
5169                 scan_info->request_ccb = request_ccb;
5170                 scan_info->cpi = &work_ccb->cpi;
5171
5172                 /* Cache on our stack so we can work asynchronously */
5173                 max_target = scan_info->cpi->max_target;
5174                 initiator_id = scan_info->cpi->initiator_id;
5175
5176                 /*
5177                  * Don't count the initiator if the
5178                  * initiator is addressable.
5179                  */
5180                 scan_info->pending_count = max_target + 1;
5181                 if (initiator_id <= max_target)
5182                         scan_info->pending_count--;
5183
5184                 for (i = 0; i <= max_target; i++) {
5185                         cam_status status;
5186                         if (i == initiator_id)
5187                                 continue;
5188
5189                         status = xpt_create_path(&path, xpt_periph,
5190                                                  request_ccb->ccb_h.path_id,
5191                                                  i, 0);
5192                         if (status != CAM_REQ_CMP) {
5193                                 kprintf("xpt_scan_bus: xpt_create_path failed"
5194                                        " with status %#x, bus scan halted\n",
5195                                        status);
5196                                 break;
5197                         }
5198                         work_ccb = xpt_alloc_ccb();
5199                         xpt_setup_ccb(&work_ccb->ccb_h, path,
5200                                       request_ccb->ccb_h.pinfo.priority);
5201                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5202                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5203                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5204                         work_ccb->crcn.flags = request_ccb->crcn.flags;
5205                         xpt_action(work_ccb);
5206                 }
5207                 break;
5208         }
5209         case XPT_SCAN_LUN:
5210         {
5211                 xpt_scan_bus_info *scan_info;
5212                 path_id_t path_id;
5213                 target_id_t target_id;
5214                 lun_id_t lun_id;
5215
5216                 /* Reuse the same CCB to query if a device was really found */
5217                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5218                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5219                               request_ccb->ccb_h.pinfo.priority);
5220                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5221
5222                 path_id = request_ccb->ccb_h.path_id;
5223                 target_id = request_ccb->ccb_h.target_id;
5224                 lun_id = request_ccb->ccb_h.target_lun;
5225                 xpt_action(request_ccb);
5226
5227                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5228                         struct cam_ed *device;
5229                         struct cam_et *target;
5230                         int phl;
5231
5232                         /*
5233                          * If we already probed lun 0 successfully, or
5234                          * we have additional configured luns on this
5235                          * target that might have "gone away", go onto
5236                          * the next lun.
5237                          */
5238                         target = request_ccb->ccb_h.path->target;
5239                         /*
5240                          * We may touch devices that we don't
5241                          * hold references too, so ensure they
5242                          * don't disappear out from under us.
5243                          * The target above is referenced by the
5244                          * path in the request ccb.
5245                          */
5246                         phl = 0;
5247                         crit_enter();
5248                         device = TAILQ_FIRST(&target->ed_entries);
5249                         if (device != NULL) {
5250                                 phl = CAN_SRCH_HI_SPARSE(device);
5251                                 if (device->lun_id == 0)
5252                                         device = TAILQ_NEXT(device, links);
5253                         }
5254                         crit_exit();
5255                         if ((lun_id != 0) || (device != NULL)) {
5256                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5257                                         lun_id++;
5258                         }
5259                 } else {
5260                         struct cam_ed *device;
5261                         
5262                         device = request_ccb->ccb_h.path->device;
5263
5264                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5265                                 /* Try the next lun */
5266                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5267                                   || CAN_SRCH_HI_DENSE(device))
5268                                         lun_id++;
5269                         }
5270                 }
5271
5272                 xpt_free_path(request_ccb->ccb_h.path);
5273
5274                 /* Check Bounds */
5275                 if ((lun_id == request_ccb->ccb_h.target_lun)
5276                  || lun_id > scan_info->cpi->max_lun) {
5277                         /* We're done */
5278
5279                         xpt_free_ccb(request_ccb);
5280                         scan_info->pending_count--;
5281                         if (scan_info->pending_count == 0) {
5282                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5283                                 request_ccb = scan_info->request_ccb;
5284                                 kfree(scan_info, M_TEMP);
5285                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
5286                                 xpt_done(request_ccb);
5287                         }
5288                 } else {
5289                         /* Try the next device */
5290                         struct cam_path *path;
5291                         cam_status status;
5292
5293                         status = xpt_create_path(&path, xpt_periph,
5294                                                  path_id, target_id, lun_id);
5295                         if (status != CAM_REQ_CMP) {
5296                                 kprintf("xpt_scan_bus: xpt_create_path failed "
5297                                        "with status %#x, halting LUN scan\n",
5298                                        status);
5299                                 xpt_free_ccb(request_ccb);
5300                                 scan_info->pending_count--;
5301                                 if (scan_info->pending_count == 0) {
5302                                         xpt_free_ccb(
5303                                                 (union ccb *)scan_info->cpi);
5304                                         request_ccb = scan_info->request_ccb;
5305                                         kfree(scan_info, M_TEMP);
5306                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5307                                         xpt_done(request_ccb);
5308                                 }
5309                                 break;
5310                         }
5311                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5312                                       request_ccb->ccb_h.pinfo.priority);
5313                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5314                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5315                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5316                         request_ccb->crcn.flags =
5317                                 scan_info->request_ccb->crcn.flags;
5318                         xpt_action(request_ccb);
5319                 }
5320                 break;
5321         }
5322         default:
5323                 break;
5324         }
5325 }
5326
5327 typedef enum {
5328         PROBE_TUR,
5329         PROBE_INQUIRY,
5330         PROBE_FULL_INQUIRY,
5331         PROBE_MODE_SENSE,
5332         PROBE_SERIAL_NUM,
5333         PROBE_TUR_FOR_NEGOTIATION
5334 } probe_action;
5335
5336 typedef enum {
5337         PROBE_INQUIRY_CKSUM     = 0x01,
5338         PROBE_SERIAL_CKSUM      = 0x02,
5339         PROBE_NO_ANNOUNCE       = 0x04
5340 } probe_flags;
5341
5342 typedef struct {
5343         TAILQ_HEAD(, ccb_hdr) request_ccbs;
5344         probe_action    action;
5345         union ccb       saved_ccb;
5346         probe_flags     flags;
5347         MD5_CTX         context;
5348         u_int8_t        digest[16];
5349 } probe_softc;
5350
5351 static void
5352 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5353              cam_flags flags, union ccb *request_ccb)
5354 {
5355         struct ccb_pathinq cpi;
5356         cam_status status;
5357         struct cam_path *new_path;
5358         struct cam_periph *old_periph;
5359         
5360         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5361                   ("xpt_scan_lun\n"));
5362         
5363         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5364         cpi.ccb_h.func_code = XPT_PATH_INQ;
5365         xpt_action((union ccb *)&cpi);
5366
5367         if (cpi.ccb_h.status != CAM_REQ_CMP) {
5368                 if (request_ccb != NULL) {
5369                         request_ccb->ccb_h.status = cpi.ccb_h.status;
5370                         xpt_done(request_ccb);
5371                 }
5372                 return;
5373         }
5374
5375         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5376                 /*
5377                  * Can't scan the bus on an adapter that
5378                  * cannot perform the initiator role.
5379                  */
5380                 if (request_ccb != NULL) {
5381                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5382                         xpt_done(request_ccb);
5383                 }
5384                 return;
5385         }
5386
5387         if (request_ccb == NULL) {
5388                 request_ccb = kmalloc(sizeof(union ccb), M_TEMP, M_INTWAIT);
5389                 new_path = kmalloc(sizeof(*new_path), M_TEMP, M_INTWAIT);
5390                 status = xpt_compile_path(new_path, xpt_periph,
5391                                           path->bus->path_id,
5392                                           path->target->target_id,
5393                                           path->device->lun_id);
5394
5395                 if (status != CAM_REQ_CMP) {
5396                         xpt_print_path(path);
5397                         kprintf("xpt_scan_lun: can't compile path, can't "
5398                                "continue\n");
5399                         kfree(request_ccb, M_TEMP);
5400                         kfree(new_path, M_TEMP);
5401                         return;
5402                 }
5403                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5404                 request_ccb->ccb_h.cbfcnp = xptscandone;
5405                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5406                 request_ccb->crcn.flags = flags;
5407         }
5408
5409         crit_enter();
5410         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5411                 probe_softc *softc;
5412
5413                 softc = (probe_softc *)old_periph->softc;
5414                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5415                                   periph_links.tqe);
5416         } else {
5417                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5418                                           probestart, "probe",
5419                                           CAM_PERIPH_BIO,
5420                                           request_ccb->ccb_h.path, NULL, 0,
5421                                           request_ccb);
5422
5423                 if (status != CAM_REQ_CMP) {
5424                         xpt_print_path(path);
5425                         kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5426                                "error, can't continue probe\n");
5427                         request_ccb->ccb_h.status = status;
5428                         xpt_done(request_ccb);
5429                 }
5430         }
5431         crit_exit();
5432 }
5433
5434 static void
5435 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5436 {
5437         xpt_release_path(done_ccb->ccb_h.path);
5438         kfree(done_ccb->ccb_h.path, M_TEMP);
5439         kfree(done_ccb, M_TEMP);
5440 }
5441
5442 static cam_status
5443 proberegister(struct cam_periph *periph, void *arg)
5444 {
5445         union ccb *request_ccb; /* CCB representing the probe request */
5446         probe_softc *softc;
5447
5448         request_ccb = (union ccb *)arg;
5449         if (periph == NULL) {
5450                 kprintf("proberegister: periph was NULL!!\n");
5451                 return(CAM_REQ_CMP_ERR);
5452         }
5453
5454         if (request_ccb == NULL) {
5455                 kprintf("proberegister: no probe CCB, "
5456                        "can't register device\n");
5457                 return(CAM_REQ_CMP_ERR);
5458         }
5459
5460         softc = kmalloc(sizeof(*softc), M_TEMP, M_INTWAIT | M_ZERO);
5461         TAILQ_INIT(&softc->request_ccbs);
5462         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5463                           periph_links.tqe);
5464         softc->flags = 0;
5465         periph->softc = softc;
5466         cam_periph_acquire(periph);
5467         /*
5468          * Ensure we've waited at least a bus settle
5469          * delay before attempting to probe the device.
5470          * For HBAs that don't do bus resets, this won't make a difference.
5471          */
5472         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5473                                       scsi_delay);
5474         probeschedule(periph);
5475         return(CAM_REQ_CMP);
5476 }
5477
5478 static void
5479 probeschedule(struct cam_periph *periph)
5480 {
5481         struct ccb_pathinq cpi;
5482         union ccb *ccb;
5483         probe_softc *softc;
5484
5485         softc = (probe_softc *)periph->softc;
5486         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5487
5488         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5489         cpi.ccb_h.func_code = XPT_PATH_INQ;
5490         xpt_action((union ccb *)&cpi);
5491
5492         /*
5493          * If a device has gone away and another device, or the same one,
5494          * is back in the same place, it should have a unit attention
5495          * condition pending.  It will not report the unit attention in
5496          * response to an inquiry, which may leave invalid transfer
5497          * negotiations in effect.  The TUR will reveal the unit attention
5498          * condition.  Only send the TUR for lun 0, since some devices 
5499          * will get confused by commands other than inquiry to non-existent
5500          * luns.  If you think a device has gone away start your scan from
5501          * lun 0.  This will insure that any bogus transfer settings are
5502          * invalidated.
5503          *
5504          * If we haven't seen the device before and the controller supports
5505          * some kind of transfer negotiation, negotiate with the first
5506          * sent command if no bus reset was performed at startup.  This
5507          * ensures that the device is not confused by transfer negotiation
5508          * settings left over by loader or BIOS action.
5509          */
5510         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5511          && (ccb->ccb_h.target_lun == 0)) {
5512                 softc->action = PROBE_TUR;
5513         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5514               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5515                 proberequestdefaultnegotiation(periph);
5516                 softc->action = PROBE_INQUIRY;
5517         } else {
5518                 softc->action = PROBE_INQUIRY;
5519         }
5520
5521         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5522                 softc->flags |= PROBE_NO_ANNOUNCE;
5523         else
5524                 softc->flags &= ~PROBE_NO_ANNOUNCE;
5525
5526         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5527 }
5528
5529 static void
5530 probestart(struct cam_periph *periph, union ccb *start_ccb)
5531 {
5532         /* Probe the device that our peripheral driver points to */
5533         struct ccb_scsiio *csio;
5534         probe_softc *softc;
5535
5536         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5537
5538         softc = (probe_softc *)periph->softc;
5539         csio = &start_ccb->csio;
5540
5541         switch (softc->action) {
5542         case PROBE_TUR:
5543         case PROBE_TUR_FOR_NEGOTIATION:
5544         {
5545                 scsi_test_unit_ready(csio,
5546                                      /*retries*/4,
5547                                      probedone,
5548                                      MSG_SIMPLE_Q_TAG,
5549                                      SSD_FULL_SIZE,
5550                                      /*timeout*/60000);
5551                 break;
5552         }
5553         case PROBE_INQUIRY:
5554         case PROBE_FULL_INQUIRY:
5555         {
5556                 u_int inquiry_len;
5557                 struct scsi_inquiry_data *inq_buf;
5558
5559                 inq_buf = &periph->path->device->inq_data;
5560                 /*
5561                  * If the device is currently configured, we calculate an
5562                  * MD5 checksum of the inquiry data, and if the serial number
5563                  * length is greater than 0, add the serial number data
5564                  * into the checksum as well.  Once the inquiry and the
5565                  * serial number check finish, we attempt to figure out
5566                  * whether we still have the same device.
5567                  */
5568                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5569                         
5570                         MD5Init(&softc->context);
5571                         MD5Update(&softc->context, (unsigned char *)inq_buf,
5572                                   sizeof(struct scsi_inquiry_data));
5573                         softc->flags |= PROBE_INQUIRY_CKSUM;
5574                         if (periph->path->device->serial_num_len > 0) {
5575                                 MD5Update(&softc->context,
5576                                           periph->path->device->serial_num,
5577                                           periph->path->device->serial_num_len);
5578                                 softc->flags |= PROBE_SERIAL_CKSUM;
5579                         }
5580                         MD5Final(softc->digest, &softc->context);
5581                 } 
5582
5583                 if (softc->action == PROBE_INQUIRY)
5584                         inquiry_len = SHORT_INQUIRY_LENGTH;
5585                 else
5586                         inquiry_len = inq_buf->additional_length
5587                                     + offsetof(struct scsi_inquiry_data,
5588                                                additional_length) + 1;
5589
5590                 /*
5591                  * Some parallel SCSI devices fail to send an
5592                  * ignore wide residue message when dealing with
5593                  * odd length inquiry requests.  Round up to be
5594                  * safe.
5595                  */
5596                 inquiry_len = roundup2(inquiry_len, 2);
5597         
5598                 scsi_inquiry(csio,
5599                              /*retries*/4,
5600                              probedone,
5601                              MSG_SIMPLE_Q_TAG,
5602                              (u_int8_t *)inq_buf,
5603                              inquiry_len,
5604                              /*evpd*/FALSE,
5605                              /*page_code*/0,
5606                              SSD_MIN_SIZE,
5607                              /*timeout*/60 * 1000);
5608                 break;
5609         }
5610         case PROBE_MODE_SENSE:
5611         {
5612                 void  *mode_buf;
5613                 int    mode_buf_len;
5614
5615                 mode_buf_len = sizeof(struct scsi_mode_header_6)
5616                              + sizeof(struct scsi_mode_blk_desc)
5617                              + sizeof(struct scsi_control_page);
5618                 mode_buf = kmalloc(mode_buf_len, M_TEMP, M_INTWAIT);
5619                 scsi_mode_sense(csio,
5620                                 /*retries*/4,
5621                                 probedone,
5622                                 MSG_SIMPLE_Q_TAG,
5623                                 /*dbd*/FALSE,
5624                                 SMS_PAGE_CTRL_CURRENT,
5625                                 SMS_CONTROL_MODE_PAGE,
5626                                 mode_buf,
5627                                 mode_buf_len,
5628                                 SSD_FULL_SIZE,
5629                                 /*timeout*/60000);
5630                 break;
5631         }
5632         case PROBE_SERIAL_NUM:
5633         {
5634                 struct scsi_vpd_unit_serial_number *serial_buf;
5635                 struct cam_ed* device;
5636
5637                 serial_buf = NULL;
5638                 device = periph->path->device;
5639                 device->serial_num = NULL;
5640                 device->serial_num_len = 0;
5641
5642                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5643                         serial_buf = kmalloc(sizeof(*serial_buf), M_TEMP,
5644                                             M_INTWAIT | M_ZERO);
5645                         scsi_inquiry(csio,
5646                                      /*retries*/4,
5647                                      probedone,
5648                                      MSG_SIMPLE_Q_TAG,
5649                                      (u_int8_t *)serial_buf,
5650                                      sizeof(*serial_buf),
5651                                      /*evpd*/TRUE,
5652                                      SVPD_UNIT_SERIAL_NUMBER,
5653                                      SSD_MIN_SIZE,
5654                                      /*timeout*/60 * 1000);
5655                         break;
5656                 }
5657                 /*
5658                  * We'll have to do without, let our probedone
5659                  * routine finish up for us.
5660                  */
5661                 start_ccb->csio.data_ptr = NULL;
5662                 probedone(periph, start_ccb);
5663                 return;
5664         }
5665         }
5666         xpt_action(start_ccb);
5667 }
5668
5669 static void
5670 proberequestdefaultnegotiation(struct cam_periph *periph)
5671 {
5672         struct ccb_trans_settings cts;
5673
5674         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5675         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5676 #ifdef CAM_NEW_TRAN_CODE
5677         cts.type = CTS_TYPE_USER_SETTINGS;
5678 #else /* CAM_NEW_TRAN_CODE */
5679         cts.flags = CCB_TRANS_USER_SETTINGS;
5680 #endif /* CAM_NEW_TRAN_CODE */
5681         xpt_action((union ccb *)&cts);
5682         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5683 #ifdef CAM_NEW_TRAN_CODE
5684         cts.type = CTS_TYPE_CURRENT_SETTINGS;
5685 #else /* CAM_NEW_TRAN_CODE */
5686         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5687         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5688 #endif /* CAM_NEW_TRAN_CODE */
5689         xpt_action((union ccb *)&cts);
5690 }
5691
5692 static void
5693 probedone(struct cam_periph *periph, union ccb *done_ccb)
5694 {
5695         probe_softc *softc;
5696         struct cam_path *path;
5697         u_int32_t  priority;
5698
5699         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5700
5701         softc = (probe_softc *)periph->softc;
5702         path = done_ccb->ccb_h.path;
5703         priority = done_ccb->ccb_h.pinfo.priority;
5704
5705         switch (softc->action) {
5706         case PROBE_TUR:
5707         {
5708                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5709
5710                         if (cam_periph_error(done_ccb, 0,
5711                                              SF_NO_PRINT, NULL) == ERESTART)
5712                                 return;
5713                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5714                                 /* Don't wedge the queue */
5715                                 xpt_release_devq(done_ccb->ccb_h.path,
5716                                                  /*count*/1,
5717                                                  /*run_queue*/TRUE);
5718                 }
5719                 softc->action = PROBE_INQUIRY;
5720                 xpt_release_ccb(done_ccb);
5721                 xpt_schedule(periph, priority);
5722                 return;
5723         }
5724         case PROBE_INQUIRY:
5725         case PROBE_FULL_INQUIRY:
5726         {
5727                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5728                         struct scsi_inquiry_data *inq_buf;
5729                         u_int8_t periph_qual;
5730
5731                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5732                         inq_buf = &path->device->inq_data;
5733
5734                         periph_qual = SID_QUAL(inq_buf);
5735                         
5736                         switch(periph_qual) {
5737                         case SID_QUAL_LU_CONNECTED:
5738                         {
5739                                 u_int8_t len;
5740
5741                                 /*
5742                                  * We conservatively request only
5743                                  * SHORT_INQUIRY_LEN bytes of inquiry
5744                                  * information during our first try
5745                                  * at sending an INQUIRY. If the device
5746                                  * has more information to give,
5747                                  * perform a second request specifying
5748                                  * the amount of information the device
5749                                  * is willing to give.
5750                                  */
5751                                 len = inq_buf->additional_length
5752                                     + offsetof(struct scsi_inquiry_data,
5753                                                 additional_length) + 1;
5754                                 if (softc->action == PROBE_INQUIRY
5755                                  && len > SHORT_INQUIRY_LENGTH) {
5756                                         softc->action = PROBE_FULL_INQUIRY;
5757                                         xpt_release_ccb(done_ccb);
5758                                         xpt_schedule(periph, priority);
5759                                         return;
5760                                 }
5761
5762                                 xpt_find_quirk(path->device);
5763
5764 #ifdef CAM_NEW_TRAN_CODE
5765                                 xpt_devise_transport(path);
5766 #endif /* CAM_NEW_TRAN_CODE */
5767                                 if (INQ_DATA_TQ_ENABLED(inq_buf))
5768                                         softc->action = PROBE_MODE_SENSE;
5769                                 else
5770                                         softc->action = PROBE_SERIAL_NUM;
5771
5772                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5773                                 xpt_reference_device(path->device);
5774
5775                                 xpt_release_ccb(done_ccb);
5776                                 xpt_schedule(periph, priority);
5777                                 return;
5778                         }
5779                         default:
5780                                 break;
5781                         }
5782                 } else if (cam_periph_error(done_ccb, 0,
5783                                             done_ccb->ccb_h.target_lun > 0
5784                                             ? SF_RETRY_UA|SF_QUIET_IR
5785                                             : SF_RETRY_UA,
5786                                             &softc->saved_ccb) == ERESTART) {
5787                         return;
5788                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5789                         /* Don't wedge the queue */
5790                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5791                                          /*run_queue*/TRUE);
5792                 }
5793                 /*
5794                  * If we get to this point, we got an error status back
5795                  * from the inquiry and the error status doesn't require
5796                  * automatically retrying the command.  Therefore, the
5797                  * inquiry failed.  If we had inquiry information before
5798                  * for this device, but this latest inquiry command failed,
5799                  * the device has probably gone away.  If this device isn't
5800                  * already marked unconfigured, notify the peripheral
5801                  * drivers that this device is no more.
5802                  */
5803                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5804                         /* Send the async notification. */
5805                         xpt_async(AC_LOST_DEVICE, path, NULL);
5806                 }
5807
5808                 xpt_release_ccb(done_ccb);
5809                 break;
5810         }
5811         case PROBE_MODE_SENSE:
5812         {
5813                 struct ccb_scsiio *csio;
5814                 struct scsi_mode_header_6 *mode_hdr;
5815
5816                 csio = &done_ccb->csio;
5817                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5818                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5819                         struct scsi_control_page *page;
5820                         u_int8_t *offset;
5821
5822                         offset = ((u_int8_t *)&mode_hdr[1])
5823                             + mode_hdr->blk_desc_len;
5824                         page = (struct scsi_control_page *)offset;
5825                         path->device->queue_flags = page->queue_flags;
5826                 } else if (cam_periph_error(done_ccb, 0,
5827                                             SF_RETRY_UA|SF_NO_PRINT,
5828                                             &softc->saved_ccb) == ERESTART) {
5829                         return;
5830                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5831                         /* Don't wedge the queue */
5832                         xpt_release_devq(done_ccb->ccb_h.path,
5833                                          /*count*/1, /*run_queue*/TRUE);
5834                 }
5835                 xpt_release_ccb(done_ccb);
5836                 kfree(mode_hdr, M_TEMP);
5837                 softc->action = PROBE_SERIAL_NUM;
5838                 xpt_schedule(periph, priority);
5839                 return;
5840         }
5841         case PROBE_SERIAL_NUM:
5842         {
5843                 struct ccb_scsiio *csio;
5844                 struct scsi_vpd_unit_serial_number *serial_buf;
5845                 u_int32_t  priority;
5846                 int changed;
5847                 int have_serialnum;
5848
5849                 changed = 1;
5850                 have_serialnum = 0;
5851                 csio = &done_ccb->csio;
5852                 priority = done_ccb->ccb_h.pinfo.priority;
5853                 serial_buf =
5854                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5855
5856                 /* Clean up from previous instance of this device */
5857                 if (path->device->serial_num != NULL) {
5858                         kfree(path->device->serial_num, M_CAMXPT);
5859                         path->device->serial_num = NULL;
5860                         path->device->serial_num_len = 0;
5861                 }
5862
5863                 if (serial_buf == NULL) {
5864                         /*
5865                          * Don't process the command as it was never sent
5866                          */
5867                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5868                         && (serial_buf->length > 0)) {
5869
5870                         have_serialnum = 1;
5871                         path->device->serial_num =
5872                                 kmalloc((serial_buf->length + 1),
5873                                        M_CAMXPT, M_INTWAIT);
5874                         bcopy(serial_buf->serial_num,
5875                               path->device->serial_num,
5876                               serial_buf->length);
5877                         path->device->serial_num_len = serial_buf->length;
5878                         path->device->serial_num[serial_buf->length] = '\0';
5879                 } else if (cam_periph_error(done_ccb, 0,
5880                                             SF_RETRY_UA|SF_NO_PRINT,
5881                                             &softc->saved_ccb) == ERESTART) {
5882                         return;
5883                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5884                         /* Don't wedge the queue */
5885                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5886                                          /*run_queue*/TRUE);
5887                 }
5888                 
5889                 /*
5890                  * Let's see if we have seen this device before.
5891                  */
5892                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5893                         MD5_CTX context;
5894                         u_int8_t digest[16];
5895
5896                         MD5Init(&context);
5897                         
5898                         MD5Update(&context,
5899                                   (unsigned char *)&path->device->inq_data,
5900                                   sizeof(struct scsi_inquiry_data));
5901
5902                         if (have_serialnum)
5903                                 MD5Update(&context, serial_buf->serial_num,
5904                                           serial_buf->length);
5905
5906                         MD5Final(digest, &context);
5907                         if (bcmp(softc->digest, digest, 16) == 0)
5908                                 changed = 0;
5909
5910                         /*
5911                          * XXX Do we need to do a TUR in order to ensure
5912                          *     that the device really hasn't changed???
5913                          */
5914                         if ((changed != 0)
5915                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5916                                 xpt_async(AC_LOST_DEVICE, path, NULL);
5917                 }
5918                 if (serial_buf != NULL)
5919                         kfree(serial_buf, M_TEMP);
5920
5921                 if (changed != 0) {
5922                         /*
5923                          * Now that we have all the necessary
5924                          * information to safely perform transfer
5925                          * negotiations... Controllers don't perform
5926                          * any negotiation or tagged queuing until
5927                          * after the first XPT_SET_TRAN_SETTINGS ccb is
5928                          * received.  So, on a new device, just retreive
5929                          * the user settings, and set them as the current
5930                          * settings to set the device up.
5931                          */
5932                         proberequestdefaultnegotiation(periph);
5933                         xpt_release_ccb(done_ccb);
5934
5935                         /*
5936                          * Perform a TUR to allow the controller to
5937                          * perform any necessary transfer negotiation.
5938                          */
5939                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
5940                         xpt_schedule(periph, priority);
5941                         return;
5942                 }
5943                 xpt_release_ccb(done_ccb);
5944                 break;
5945         }
5946         case PROBE_TUR_FOR_NEGOTIATION:
5947                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5948                         /* Don't wedge the queue */
5949                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5950                                          /*run_queue*/TRUE);
5951                 }
5952
5953                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5954                 xpt_reference_device(path->device);
5955
5956                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5957                         /* Inform the XPT that a new device has been found */
5958                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5959                         xpt_action(done_ccb);
5960
5961                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
5962                                   done_ccb);
5963                 }
5964                 xpt_release_ccb(done_ccb);
5965                 break;
5966         }
5967         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5968         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5969         done_ccb->ccb_h.status = CAM_REQ_CMP;
5970         xpt_done(done_ccb);
5971         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5972                 cam_periph_invalidate(periph);
5973                 cam_periph_release(periph);
5974         } else {
5975                 probeschedule(periph);
5976         }
5977 }
5978
5979 static void
5980 probecleanup(struct cam_periph *periph)
5981 {
5982         kfree(periph->softc, M_TEMP);
5983 }
5984
5985 static void
5986 xpt_find_quirk(struct cam_ed *device)
5987 {
5988         caddr_t match;
5989
5990         match = cam_quirkmatch((caddr_t)&device->inq_data,
5991                                (caddr_t)xpt_quirk_table,
5992                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5993                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
5994
5995         if (match == NULL)
5996                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
5997
5998         device->quirk = (struct xpt_quirk_entry *)match;
5999 }
6000
6001 static int
6002 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6003 {
6004         int error, bool;
6005
6006         bool = cam_srch_hi;
6007         error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6008         if (error != 0 || req->newptr == NULL)
6009                 return (error);
6010         if (bool == 0 || bool == 1) {
6011                 cam_srch_hi = bool;
6012                 return (0);
6013         } else {
6014                 return (EINVAL);
6015         }
6016 }
6017
6018 #ifdef CAM_NEW_TRAN_CODE
6019
6020 static void
6021 xpt_devise_transport(struct cam_path *path)
6022 {
6023         struct ccb_pathinq cpi;
6024         struct ccb_trans_settings cts;
6025         struct scsi_inquiry_data *inq_buf;
6026
6027         /* Get transport information from the SIM */
6028         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6029         cpi.ccb_h.func_code = XPT_PATH_INQ;
6030         xpt_action((union ccb *)&cpi);
6031
6032         inq_buf = NULL;
6033         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6034                 inq_buf = &path->device->inq_data;
6035         path->device->protocol = PROTO_SCSI;
6036         path->device->protocol_version =
6037             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6038         path->device->transport = cpi.transport;
6039         path->device->transport_version = cpi.transport_version;
6040
6041         /*
6042          * Any device not using SPI3 features should
6043          * be considered SPI2 or lower.
6044          */
6045         if (inq_buf != NULL) {
6046                 if (path->device->transport == XPORT_SPI
6047                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
6048                  && path->device->transport_version > 2)
6049                         path->device->transport_version = 2;
6050         } else {
6051                 struct cam_ed* otherdev;
6052
6053                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6054                      otherdev != NULL;
6055                      otherdev = TAILQ_NEXT(otherdev, links)) {
6056                         if (otherdev != path->device)
6057                                 break;
6058                 }
6059
6060                 if (otherdev != NULL) {
6061                         /*
6062                          * Initially assume the same versioning as
6063                          * prior luns for this target.
6064                          */
6065                         path->device->protocol_version =
6066                             otherdev->protocol_version;
6067                         path->device->transport_version =
6068                             otherdev->transport_version;
6069                 } else {
6070                         /* Until we know better, opt for safty */
6071                         path->device->protocol_version = 2;
6072                         if (path->device->transport == XPORT_SPI)
6073                                 path->device->transport_version = 2;
6074                         else
6075                                 path->device->transport_version = 0;
6076                 }
6077         }
6078
6079         /*
6080          * XXX
6081          * For a device compliant with SPC-2 we should be able
6082          * to determine the transport version supported by
6083          * scrutinizing the version descriptors in the
6084          * inquiry buffer.
6085          */
6086
6087         /* Tell the controller what we think */
6088         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6089         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6090         cts.type = CTS_TYPE_CURRENT_SETTINGS;
6091         cts.transport = path->device->transport;
6092         cts.transport_version = path->device->transport_version;
6093         cts.protocol = path->device->protocol;
6094         cts.protocol_version = path->device->protocol_version;
6095         cts.proto_specific.valid = 0;
6096         cts.xport_specific.valid = 0;
6097         xpt_action((union ccb *)&cts);
6098 }
6099
6100 static void
6101 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6102                           int async_update)
6103 {
6104         struct  ccb_pathinq cpi;
6105         struct  ccb_trans_settings cur_cts;
6106         struct  ccb_trans_settings_scsi *scsi;
6107         struct  ccb_trans_settings_scsi *cur_scsi;
6108         struct  cam_sim *sim;
6109         struct  scsi_inquiry_data *inq_data;
6110
6111         if (device == NULL) {
6112                 cts->ccb_h.status = CAM_PATH_INVALID;
6113                 xpt_done((union ccb *)cts);
6114                 return;
6115         }
6116
6117         if (cts->protocol == PROTO_UNKNOWN
6118          || cts->protocol == PROTO_UNSPECIFIED) {
6119                 cts->protocol = device->protocol;
6120                 cts->protocol_version = device->protocol_version;
6121         }
6122
6123         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6124          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6125                 cts->protocol_version = device->protocol_version;
6126
6127         if (cts->protocol != device->protocol) {
6128                 xpt_print_path(cts->ccb_h.path);
6129                 printf("Uninitialized Protocol %x:%x?\n",
6130                        cts->protocol, device->protocol);
6131                 cts->protocol = device->protocol;
6132         }
6133
6134         if (cts->protocol_version > device->protocol_version) {
6135                 if (bootverbose) {
6136                         xpt_print_path(cts->ccb_h.path);
6137                         printf("Down reving Protocol Version from %d to %d?\n",
6138                                cts->protocol_version, device->protocol_version);
6139                 }
6140                 cts->protocol_version = device->protocol_version;
6141         }
6142
6143         if (cts->transport == XPORT_UNKNOWN
6144          || cts->transport == XPORT_UNSPECIFIED) {
6145                 cts->transport = device->transport;
6146                 cts->transport_version = device->transport_version;
6147         }
6148
6149         if (cts->transport_version == XPORT_VERSION_UNKNOWN
6150          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6151                 cts->transport_version = device->transport_version;
6152
6153         if (cts->transport != device->transport) {
6154                 xpt_print_path(cts->ccb_h.path);
6155                 printf("Uninitialized Transport %x:%x?\n",
6156                        cts->transport, device->transport);
6157                 cts->transport = device->transport;
6158         }
6159
6160         if (cts->transport_version > device->transport_version) {
6161                 if (bootverbose) {
6162                         xpt_print_path(cts->ccb_h.path);
6163                         printf("Down reving Transport Version from %d to %d?\n",
6164                                cts->transport_version,
6165                                device->transport_version);
6166                 }
6167                 cts->transport_version = device->transport_version;
6168         }
6169
6170         sim = cts->ccb_h.path->bus->sim;
6171
6172         /*
6173          * Nothing more of interest to do unless
6174          * this is a device connected via the
6175          * SCSI protocol.
6176          */
6177         if (cts->protocol != PROTO_SCSI) {
6178                 if (async_update == FALSE)
6179                         (*(sim->sim_action))(sim, (union ccb *)cts);
6180                 return;
6181         }
6182
6183         inq_data = &device->inq_data;
6184         scsi = &cts->proto_specific.scsi;
6185         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6186         cpi.ccb_h.func_code = XPT_PATH_INQ;
6187         xpt_action((union ccb *)&cpi);
6188
6189         /* SCSI specific sanity checking */
6190         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6191          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6192          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6193          || (device->quirk->mintags == 0)) {
6194                 /*
6195                  * Can't tag on hardware that doesn't support tags,
6196                  * doesn't have it enabled, or has broken tag support.
6197                  */
6198                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6199         }
6200
6201         if (async_update == FALSE) {
6202                 /*
6203                  * Perform sanity checking against what the
6204                  * controller and device can do.
6205                  */
6206                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6207                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6208                 cur_cts.type = cts->type;
6209                 xpt_action((union ccb *)&cur_cts);
6210
6211                 cur_scsi = &cur_cts.proto_specific.scsi;
6212                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6213                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6214                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6215                 }
6216                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6217                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6218         }
6219
6220         /* SPI specific sanity checking */
6221         if (cts->transport == XPORT_SPI && async_update == FALSE) {
6222                 u_int spi3caps;
6223                 struct ccb_trans_settings_spi *spi;
6224                 struct ccb_trans_settings_spi *cur_spi;
6225
6226                 spi = &cts->xport_specific.spi;
6227
6228                 cur_spi = &cur_cts.xport_specific.spi;
6229
6230                 /* Fill in any gaps in what the user gave us */
6231                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6232                         spi->sync_period = cur_spi->sync_period;
6233                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6234                         spi->sync_period = 0;
6235                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6236                         spi->sync_offset = cur_spi->sync_offset;
6237                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6238                         spi->sync_offset = 0;
6239                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6240                         spi->ppr_options = cur_spi->ppr_options;
6241                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6242                         spi->ppr_options = 0;
6243                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6244                         spi->bus_width = cur_spi->bus_width;
6245                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6246                         spi->bus_width = 0;
6247                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6248                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6249                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6250                 }
6251                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6252                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6253                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6254                   && (inq_data->flags & SID_Sync) == 0
6255                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6256                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6257                  || (cur_spi->sync_offset == 0)
6258                  || (cur_spi->sync_period == 0)) {
6259                         /* Force async */
6260                         spi->sync_period = 0;
6261                         spi->sync_offset = 0;
6262                 }
6263
6264                 switch (spi->bus_width) {
6265                 case MSG_EXT_WDTR_BUS_32_BIT:
6266                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6267                           || (inq_data->flags & SID_WBus32) != 0
6268                           || cts->type == CTS_TYPE_USER_SETTINGS)
6269                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6270                                 break;
6271                         /* Fall Through to 16-bit */
6272                 case MSG_EXT_WDTR_BUS_16_BIT:
6273                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6274                           || (inq_data->flags & SID_WBus16) != 0
6275                           || cts->type == CTS_TYPE_USER_SETTINGS)
6276                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6277                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6278                                 break;
6279                         }
6280                         /* Fall Through to 8-bit */
6281                 default: /* New bus width?? */
6282                 case MSG_EXT_WDTR_BUS_8_BIT:
6283                         /* All targets can do this */
6284                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6285                         break;
6286                 }
6287
6288                 spi3caps = cpi.xport_specific.spi.ppr_options;
6289                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6290                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6291                         spi3caps &= inq_data->spi3data;
6292
6293                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6294                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6295
6296                 if ((spi3caps & SID_SPI_IUS) == 0)
6297                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6298
6299                 if ((spi3caps & SID_SPI_QAS) == 0)
6300                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6301
6302                 /* No SPI Transfer settings are allowed unless we are wide */
6303                 if (spi->bus_width == 0)
6304                         spi->ppr_options = 0;
6305
6306                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6307                         /*
6308                          * Can't tag queue without disconnection.
6309                          */
6310                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6311                         scsi->valid |= CTS_SCSI_VALID_TQ;
6312                 }
6313
6314                 /*
6315                  * If we are currently performing tagged transactions to
6316                  * this device and want to change its negotiation parameters,
6317                  * go non-tagged for a bit to give the controller a chance to
6318                  * negotiate unhampered by tag messages.
6319                  */
6320                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6321                  && (device->inq_flags & SID_CmdQue) != 0
6322                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6323                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6324                                    CTS_SPI_VALID_SYNC_OFFSET|
6325                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
6326                         xpt_toggle_tags(cts->ccb_h.path);
6327         }
6328
6329         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6330          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6331                 int device_tagenb;
6332
6333                 /*
6334                  * If we are transitioning from tags to no-tags or
6335                  * vice-versa, we need to carefully freeze and restart
6336                  * the queue so that we don't overlap tagged and non-tagged
6337                  * commands.  We also temporarily stop tags if there is
6338                  * a change in transfer negotiation settings to allow
6339                  * "tag-less" negotiation.
6340                  */
6341                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6342                  || (device->inq_flags & SID_CmdQue) != 0)
6343                         device_tagenb = TRUE;
6344                 else
6345                         device_tagenb = FALSE;
6346
6347                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6348                   && device_tagenb == FALSE)
6349                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6350                   && device_tagenb == TRUE)) {
6351
6352                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6353                                 /*
6354                                  * Delay change to use tags until after a
6355                                  * few commands have gone to this device so
6356                                  * the controller has time to perform transfer
6357                                  * negotiations without tagged messages getting
6358                                  * in the way.
6359                                  */
6360                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6361                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6362                         } else {
6363                                 struct ccb_relsim crs;
6364
6365                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6366                                 device->inq_flags &= ~SID_CmdQue;
6367                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6368                                                     sim->max_dev_openings);
6369                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6370                                 device->tag_delay_count = 0;
6371
6372                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6373                                               /*priority*/1);
6374                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6375                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6376                                 crs.openings
6377                                     = crs.release_timeout
6378                                     = crs.qfrozen_cnt
6379                                     = 0;
6380                                 xpt_action((union ccb *)&crs);
6381                         }
6382                 }
6383         }
6384         if (async_update == FALSE)
6385                 (*(sim->sim_action))(sim, (union ccb *)cts);
6386 }
6387
6388 #else /* CAM_NEW_TRAN_CODE */
6389
6390 static void
6391 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6392                           int async_update)
6393 {
6394         struct  cam_sim *sim;
6395         int     qfrozen;
6396
6397         sim = cts->ccb_h.path->bus->sim;
6398         if (async_update == FALSE) {
6399                 struct  scsi_inquiry_data *inq_data;
6400                 struct  ccb_pathinq cpi;
6401                 struct  ccb_trans_settings cur_cts;
6402
6403                 if (device == NULL) {
6404                         cts->ccb_h.status = CAM_PATH_INVALID;
6405                         xpt_done((union ccb *)cts);
6406                         return;
6407                 }
6408
6409                 /*
6410                  * Perform sanity checking against what the
6411                  * controller and device can do.
6412                  */
6413                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6414                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6415                 xpt_action((union ccb *)&cpi);
6416                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6417                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6418                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6419                 xpt_action((union ccb *)&cur_cts);
6420                 inq_data = &device->inq_data;
6421
6422                 /* Fill in any gaps in what the user gave us */
6423                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6424                         cts->sync_period = cur_cts.sync_period;
6425                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6426                         cts->sync_offset = cur_cts.sync_offset;
6427                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6428                         cts->bus_width = cur_cts.bus_width;
6429                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6430                         cts->flags &= ~CCB_TRANS_DISC_ENB;
6431                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6432                 }
6433                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6434                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6435                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6436                 }
6437
6438                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6439                   && (inq_data->flags & SID_Sync) == 0)
6440                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6441                  || (cts->sync_offset == 0)
6442                  || (cts->sync_period == 0)) {
6443                         /* Force async */
6444                         cts->sync_period = 0;
6445                         cts->sync_offset = 0;
6446                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
6447
6448                         if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6449                          && cts->sync_period <= 0x9) {
6450                                 /*
6451                                  * Don't allow DT transmission rates if the
6452                                  * device does not support it.
6453                                  */
6454                                 cts->sync_period = 0xa;
6455                         }
6456                         if ((inq_data->spi3data & SID_SPI_IUS) == 0
6457                          && cts->sync_period <= 0x8) {
6458                                 /*
6459                                  * Don't allow PACE transmission rates
6460                                  * if the device does support packetized
6461                                  * transfers.
6462                                  */
6463                                 cts->sync_period = 0x9;
6464                         }
6465                 }
6466
6467                 switch (cts->bus_width) {
6468                 case MSG_EXT_WDTR_BUS_32_BIT:
6469                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6470                           || (inq_data->flags & SID_WBus32) != 0)
6471                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6472                                 break;
6473                         /* Fall Through to 16-bit */
6474                 case MSG_EXT_WDTR_BUS_16_BIT:
6475                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6476                           || (inq_data->flags & SID_WBus16) != 0)
6477                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6478                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6479                                 break;
6480                         }
6481                         /* Fall Through to 8-bit */
6482                 default: /* New bus width?? */
6483                 case MSG_EXT_WDTR_BUS_8_BIT:
6484                         /* All targets can do this */
6485                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6486                         break;
6487                 }
6488
6489                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6490                         /*
6491                          * Can't tag queue without disconnection.
6492                          */
6493                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6494                         cts->valid |= CCB_TRANS_TQ_VALID;
6495                 }
6496
6497                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6498                  || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6499                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6500                  || (device->quirk->mintags == 0)) {
6501                         /*
6502                          * Can't tag on hardware that doesn't support,
6503                          * doesn't have it enabled, or has broken tag support.
6504                          */
6505                         cts->flags &= ~CCB_TRANS_TAG_ENB;
6506                 }
6507         }
6508
6509         qfrozen = FALSE;
6510         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6511                 int device_tagenb;
6512
6513                 /*
6514                  * If we are transitioning from tags to no-tags or
6515                  * vice-versa, we need to carefully freeze and restart
6516                  * the queue so that we don't overlap tagged and non-tagged
6517                  * commands.  We also temporarily stop tags if there is
6518                  * a change in transfer negotiation settings to allow
6519                  * "tag-less" negotiation.
6520                  */
6521                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6522                  || (device->inq_flags & SID_CmdQue) != 0)
6523                         device_tagenb = TRUE;
6524                 else
6525                         device_tagenb = FALSE;
6526
6527                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6528                   && device_tagenb == FALSE)
6529                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6530                   && device_tagenb == TRUE)) {
6531
6532                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6533                                 /*
6534                                  * Delay change to use tags until after a
6535                                  * few commands have gone to this device so
6536                                  * the controller has time to perform transfer
6537                                  * negotiations without tagged messages getting
6538                                  * in the way.
6539                                  */
6540                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6541                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6542                         } else {
6543                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6544                                 qfrozen = TRUE;
6545                                 device->inq_flags &= ~SID_CmdQue;
6546                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6547                                                     sim->max_dev_openings);
6548                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6549                                 device->tag_delay_count = 0;
6550                         }
6551                 }
6552         }
6553
6554         if (async_update == FALSE) {
6555                 /*
6556                  * If we are currently performing tagged transactions to
6557                  * this device and want to change its negotiation parameters,
6558                  * go non-tagged for a bit to give the controller a chance to
6559                  * negotiate unhampered by tag messages.
6560                  */
6561                 if ((device->inq_flags & SID_CmdQue) != 0
6562                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6563                                    CCB_TRANS_SYNC_OFFSET_VALID|
6564                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6565                         xpt_toggle_tags(cts->ccb_h.path);
6566
6567                 (*(sim->sim_action))(sim, (union ccb *)cts);
6568         }
6569
6570         if (qfrozen) {
6571                 struct ccb_relsim crs;
6572
6573                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6574                               /*priority*/1);
6575                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6576                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6577                 crs.openings
6578                     = crs.release_timeout 
6579                     = crs.qfrozen_cnt
6580                     = 0;
6581                 xpt_action((union ccb *)&crs);
6582         }
6583 }
6584
6585
6586 #endif /* CAM_NEW_TRAN_CODE */
6587
6588 static void
6589 xpt_toggle_tags(struct cam_path *path)
6590 {
6591         struct cam_ed *dev;
6592
6593         /*
6594          * Give controllers a chance to renegotiate
6595          * before starting tag operations.  We
6596          * "toggle" tagged queuing off then on
6597          * which causes the tag enable command delay
6598          * counter to come into effect.
6599          */
6600         dev = path->device;
6601         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6602          || ((dev->inq_flags & SID_CmdQue) != 0
6603           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6604                 struct ccb_trans_settings cts;
6605
6606                 xpt_setup_ccb(&cts.ccb_h, path, 1);
6607 #ifdef CAM_NEW_TRAN_CODE
6608                 cts.protocol = PROTO_SCSI;
6609                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6610                 cts.transport = XPORT_UNSPECIFIED;
6611                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6612                 cts.proto_specific.scsi.flags = 0;
6613                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6614 #else /* CAM_NEW_TRAN_CODE */
6615                 cts.flags = 0;
6616                 cts.valid = CCB_TRANS_TQ_VALID;
6617 #endif /* CAM_NEW_TRAN_CODE */
6618                 xpt_set_transfer_settings(&cts, path->device,
6619                                           /*async_update*/TRUE);
6620 #ifdef CAM_NEW_TRAN_CODE
6621                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6622 #else /* CAM_NEW_TRAN_CODE */
6623                 cts.flags = CCB_TRANS_TAG_ENB;
6624 #endif /* CAM_NEW_TRAN_CODE */
6625                 xpt_set_transfer_settings(&cts, path->device,
6626                                           /*async_update*/TRUE);
6627         }
6628 }
6629
6630 static void
6631 xpt_start_tags(struct cam_path *path)
6632 {
6633         struct ccb_relsim crs;
6634         struct cam_ed *device;
6635         struct cam_sim *sim;
6636         int    newopenings;
6637
6638         device = path->device;
6639         sim = path->bus->sim;
6640         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6641         xpt_freeze_devq(path, /*count*/1);
6642         device->inq_flags |= SID_CmdQue;
6643         if (device->tag_saved_openings != 0)
6644                 newopenings = device->tag_saved_openings;
6645         else
6646                 newopenings = min(device->quirk->maxtags,
6647                                   sim->max_tagged_dev_openings);
6648         xpt_dev_ccbq_resize(path, newopenings);
6649         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6650         crs.ccb_h.func_code = XPT_REL_SIMQ;
6651         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6652         crs.openings
6653             = crs.release_timeout 
6654             = crs.qfrozen_cnt
6655             = 0;
6656         xpt_action((union ccb *)&crs);
6657 }
6658
6659 static int busses_to_config;
6660 static int busses_to_reset;
6661
6662 static int
6663 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6664 {
6665         if (bus->path_id != CAM_XPT_PATH_ID) {
6666                 struct cam_path path;
6667                 struct ccb_pathinq cpi;
6668                 int can_negotiate;
6669
6670                 busses_to_config++;
6671                 xpt_compile_path(&path, NULL, bus->path_id,
6672                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6673                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6674                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6675                 xpt_action((union ccb *)&cpi);
6676                 can_negotiate = cpi.hba_inquiry;
6677                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6678                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6679                  && can_negotiate)
6680                         busses_to_reset++;
6681                 xpt_release_path(&path);
6682         }
6683
6684         return(1);
6685 }
6686
6687 static int
6688 xptconfigfunc(struct cam_eb *bus, void *arg)
6689 {
6690         struct  cam_path *path;
6691         union   ccb *work_ccb;
6692
6693         if (bus->path_id != CAM_XPT_PATH_ID) {
6694                 cam_status status;
6695                 int can_negotiate;
6696
6697                 work_ccb = xpt_alloc_ccb();
6698                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6699                                               CAM_TARGET_WILDCARD,
6700                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6701                         kprintf("xptconfigfunc: xpt_create_path failed with "
6702                                "status %#x for bus %d\n", status, bus->path_id);
6703                         kprintf("xptconfigfunc: halting bus configuration\n");
6704                         xpt_free_ccb(work_ccb);
6705                         busses_to_config--;
6706                         xpt_finishconfig(xpt_periph, NULL);
6707                         return(0);
6708                 }
6709                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6710                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6711                 xpt_action(work_ccb);
6712                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6713                         kprintf("xptconfigfunc: CPI failed on bus %d "
6714                                "with status %d\n", bus->path_id,
6715                                work_ccb->ccb_h.status);
6716                         xpt_finishconfig(xpt_periph, work_ccb);
6717                         return(1);
6718                 }
6719
6720                 can_negotiate = work_ccb->cpi.hba_inquiry;
6721                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6722                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6723                  && (can_negotiate != 0)) {
6724                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6725                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6726                         work_ccb->ccb_h.cbfcnp = NULL;
6727                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6728                                   ("Resetting Bus\n"));
6729                         xpt_action(work_ccb);
6730                         xpt_finishconfig(xpt_periph, work_ccb);
6731                 } else {
6732                         /* Act as though we performed a successful BUS RESET */
6733                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6734                         xpt_finishconfig(xpt_periph, work_ccb);
6735                 }
6736         }
6737
6738         return(1);
6739 }
6740
6741 static void
6742 xpt_config(void *arg)
6743 {
6744         /*
6745          * Now that interrupts are enabled, go find our devices
6746          */
6747
6748 #ifdef CAMDEBUG
6749         /* Setup debugging flags and path */
6750 #ifdef CAM_DEBUG_FLAGS
6751         cam_dflags = CAM_DEBUG_FLAGS;
6752 #else /* !CAM_DEBUG_FLAGS */
6753         cam_dflags = CAM_DEBUG_NONE;
6754 #endif /* CAM_DEBUG_FLAGS */
6755 #ifdef CAM_DEBUG_BUS
6756         if (cam_dflags != CAM_DEBUG_NONE) {
6757                 if (xpt_create_path(&cam_dpath, xpt_periph,
6758                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6759                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6760                         kprintf("xpt_config: xpt_create_path() failed for debug"
6761                                " target %d:%d:%d, debugging disabled\n",
6762                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6763                         cam_dflags = CAM_DEBUG_NONE;
6764                 }
6765         } else
6766                 cam_dpath = NULL;
6767 #else /* !CAM_DEBUG_BUS */
6768         cam_dpath = NULL;
6769 #endif /* CAM_DEBUG_BUS */
6770 #endif /* CAMDEBUG */
6771
6772         /*
6773          * Scan all installed busses.
6774          */
6775         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6776
6777         if (busses_to_config == 0) {
6778                 /* Call manually because we don't have any busses */
6779                 xpt_finishconfig(xpt_periph, NULL);
6780         } else  {
6781                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6782                         kprintf("Waiting %d seconds for SCSI "
6783                                "devices to settle\n", scsi_delay/1000);
6784                 }
6785                 xpt_for_all_busses(xptconfigfunc, NULL);
6786         }
6787 }
6788
6789 /*
6790  * If the given device only has one peripheral attached to it, and if that
6791  * peripheral is the passthrough driver, announce it.  This insures that the
6792  * user sees some sort of announcement for every peripheral in their system.
6793  */
6794 static int
6795 xptpassannouncefunc(struct cam_ed *device, void *arg)
6796 {
6797         struct cam_periph *periph;
6798         int i;
6799
6800         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6801              periph = SLIST_NEXT(periph, periph_links), i++);
6802
6803         periph = SLIST_FIRST(&device->periphs);
6804         if ((i == 1)
6805          && (strncmp(periph->periph_name, "pass", 4) == 0))
6806                 xpt_announce_periph(periph, NULL);
6807
6808         return(1);
6809 }
6810
6811 static void
6812 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6813 {
6814         struct  periph_driver **p_drv;
6815         int     i;
6816
6817         if (done_ccb != NULL) {
6818                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6819                           ("xpt_finishconfig\n"));
6820                 switch(done_ccb->ccb_h.func_code) {
6821                 case XPT_RESET_BUS:
6822                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6823                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6824                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6825                                 done_ccb->crcn.flags = 0;
6826                                 xpt_action(done_ccb);
6827                                 return;
6828                         }
6829                         /* FALLTHROUGH */
6830                 case XPT_SCAN_BUS:
6831                 default:
6832                         xpt_free_path(done_ccb->ccb_h.path);
6833                         busses_to_config--;
6834                         break;
6835                 }
6836         }
6837
6838         if (busses_to_config == 0) {
6839                 /* Register all the peripheral drivers */
6840                 /* XXX This will have to change when we have loadable modules */
6841                 p_drv = periph_drivers;
6842                 for (i = 0; p_drv[i] != NULL; i++) {
6843                         (*p_drv[i]->init)();
6844                 }
6845
6846                 /*
6847                  * Check for devices with no "standard" peripheral driver
6848                  * attached.  For any devices like that, announce the
6849                  * passthrough driver so the user will see something.
6850                  */
6851                 xpt_for_all_devices(xptpassannouncefunc, NULL);
6852
6853                 /* Release our hook so that the boot can continue. */
6854                 config_intrhook_disestablish(xpt_config_hook);
6855                 kfree(xpt_config_hook, M_TEMP);
6856                 xpt_config_hook = NULL;
6857         }
6858         if (done_ccb != NULL)
6859                 xpt_free_ccb(done_ccb);
6860 }
6861
6862 static void
6863 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6864 {
6865         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6866
6867         switch (work_ccb->ccb_h.func_code) {
6868         /* Common cases first */
6869         case XPT_PATH_INQ:              /* Path routing inquiry */
6870         {
6871                 struct ccb_pathinq *cpi;
6872
6873                 cpi = &work_ccb->cpi;
6874                 cpi->version_num = 1; /* XXX??? */
6875                 cpi->hba_inquiry = 0;
6876                 cpi->target_sprt = 0;
6877                 cpi->hba_misc = 0;
6878                 cpi->hba_eng_cnt = 0;
6879                 cpi->max_target = 0;
6880                 cpi->max_lun = 0;
6881                 cpi->initiator_id = 0;
6882                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6883                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6884                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6885                 cpi->unit_number = sim->unit_number;
6886                 cpi->bus_id = sim->bus_id;
6887                 cpi->base_transfer_speed = 0;
6888 #ifdef CAM_NEW_TRAN_CODE
6889                 cpi->protocol = PROTO_UNSPECIFIED;
6890                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
6891                 cpi->transport = XPORT_UNSPECIFIED;
6892                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
6893 #endif /* CAM_NEW_TRAN_CODE */
6894                 cpi->ccb_h.status = CAM_REQ_CMP;
6895                 xpt_done(work_ccb);
6896                 break;
6897         }
6898         default:
6899                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6900                 xpt_done(work_ccb);
6901                 break;
6902         }
6903 }
6904
6905 /*
6906  * The xpt as a "controller" has no interrupt sources, so polling
6907  * is a no-op.
6908  */
6909 static void
6910 xptpoll(struct cam_sim *sim)
6911 {
6912 }
6913
6914 /*
6915  * Should only be called by the machine interrupt dispatch routines,
6916  * so put these prototypes here instead of in the header.
6917  */
6918
6919 static void
6920 swi_cambio(void *arg, void *frame)
6921 {
6922         camisr(&cam_bioq);
6923 }
6924
6925 static void
6926 camisr(cam_isrq_t *queue)
6927 {
6928         struct  ccb_hdr *ccb_h;
6929
6930         crit_enter();
6931         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6932                 int     runq;
6933
6934                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6935                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6936                 splz();
6937
6938                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6939                           ("camisr\n"));
6940
6941                 runq = FALSE;
6942
6943                 if (ccb_h->flags & CAM_HIGH_POWER) {
6944                         struct highpowerlist    *hphead;
6945                         struct cam_ed           *device;
6946                         union ccb               *send_ccb;
6947
6948                         hphead = &highpowerq;
6949
6950                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6951
6952                         /*
6953                          * Increment the count since this command is done.
6954                          */
6955                         num_highpower++;
6956
6957                         /* 
6958                          * Any high powered commands queued up?
6959                          */
6960                         if (send_ccb != NULL) {
6961                                 device = send_ccb->ccb_h.path->device;
6962
6963                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6964
6965                                 xpt_release_devq(send_ccb->ccb_h.path,
6966                                                  /*count*/1, /*runqueue*/TRUE);
6967                         }
6968                 }
6969                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6970                         struct cam_ed *dev;
6971
6972                         dev = ccb_h->path->device;
6973
6974                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6975
6976                         if (!SIM_DEAD(ccb_h->path->bus->sim)) {
6977                                 ccb_h->path->bus->sim->devq->send_active--;
6978                                 ccb_h->path->bus->sim->devq->send_openings++;
6979                         }
6980                         
6981                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6982                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
6983                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6984                           && (dev->ccbq.dev_active == 0))) {
6985                                 
6986                                 xpt_release_devq(ccb_h->path, /*count*/1,
6987                                                  /*run_queue*/TRUE);
6988                         }
6989
6990                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6991                          && (--dev->tag_delay_count == 0))
6992                                 xpt_start_tags(ccb_h->path);
6993
6994                         if ((dev->ccbq.queue.entries > 0)
6995                          && (dev->qfrozen_cnt == 0)
6996                          && (device_is_send_queued(dev) == 0)) {
6997                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6998                                                               dev);
6999                         }
7000                 }
7001
7002                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7003                         xpt_release_simq(ccb_h->path->bus->sim,
7004                                          /*run_queue*/TRUE);
7005                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
7006                         runq = FALSE;
7007                 } 
7008
7009                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7010                  && (ccb_h->status & CAM_DEV_QFRZN)) {
7011                         xpt_release_devq(ccb_h->path, /*count*/1,
7012                                          /*run_queue*/TRUE);
7013                         ccb_h->status &= ~CAM_DEV_QFRZN;
7014                 } else if (runq) {
7015                         xpt_run_dev_sendq(ccb_h->path->bus);
7016                 }
7017
7018                 /* Call the peripheral driver's callback */
7019                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7020         }
7021         crit_exit();
7022 }
7023
7024 static void
7025 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7026 {
7027
7028         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7029         xpt_done(ccb);
7030 }
7031
7032 static void
7033 dead_sim_poll(struct cam_sim *sim)
7034 {
7035 }