Change the peripheral driver list from a linker set to module driven
[dragonfly.git] / sys / bus / cam / cam_periph.c
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_periph.c,v 1.24.2.3 2003/01/25 19:04:40 dillon Exp $
30  * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.24 2007/11/17 20:28:46 pavalos Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/proc.h>
39 #include <sys/devicestat.h>
40 #include <sys/bus.h>
41 #include <vm/vm.h>
42 #include <vm/vm_extern.h>
43
44 #include <sys/thread2.h>
45
46 #include "cam.h"
47 #include "cam_ccb.h"
48 #include "cam_xpt_periph.h"
49 #include "cam_periph.h"
50 #include "cam_debug.h"
51
52 #include <bus/cam/scsi/scsi_all.h>
53 #include <bus/cam/scsi/scsi_message.h>
54 #include <bus/cam/scsi/scsi_pass.h>
55
56 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
57                                           u_int newunit, int wired,
58                                           path_id_t pathid, target_id_t target,
59                                           lun_id_t lun);
60 static  u_int           camperiphunit(struct periph_driver *p_drv,
61                                       path_id_t pathid, target_id_t target,
62                                       lun_id_t lun); 
63 static  void            camperiphdone(struct cam_periph *periph, 
64                                         union ccb *done_ccb);
65 static  void            camperiphfree(struct cam_periph *periph);
66
67 static int nperiph_drivers;
68 struct periph_driver **periph_drivers;
69
70 void
71 periphdriver_register(void *data)
72 {
73         struct periph_driver **newdrivers, **old;
74         int ndrivers;
75
76         ndrivers = nperiph_drivers + 2;
77         newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK);
78         if (periph_drivers)
79                 bcopy(periph_drivers, newdrivers,
80                       sizeof(*newdrivers) * nperiph_drivers);
81         newdrivers[nperiph_drivers] = (struct periph_driver *)data;
82         newdrivers[nperiph_drivers + 1] = NULL;
83         old = periph_drivers;
84         periph_drivers = newdrivers;
85         if (old)
86                 kfree(old, M_TEMP);
87         nperiph_drivers++;
88 }
89
90 cam_status
91 cam_periph_alloc(periph_ctor_t *periph_ctor,
92                  periph_oninv_t *periph_oninvalidate,
93                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
94                  char *name, cam_periph_type type, struct cam_path *path,
95                  ac_callback_t *ac_callback, ac_code code, void *arg)
96 {
97         struct          periph_driver **p_drv;
98         struct          cam_periph *periph;
99         struct          cam_periph *cur_periph;
100         path_id_t       path_id;
101         target_id_t     target_id;
102         lun_id_t        lun_id;
103         cam_status      status;
104         u_int           init_level;
105
106         init_level = 0;
107         /*
108          * Handle Hot-Plug scenarios.  If there is already a peripheral
109          * of our type assigned to this path, we are likely waiting for
110          * final close on an old, invalidated, peripheral.  If this is
111          * the case, queue up a deferred call to the peripheral's async
112          * handler.  If it looks like a mistaken re-alloation, complain.
113          */
114         if ((periph = cam_periph_find(path, name)) != NULL) {
115
116                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
117                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
118                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
119                         periph->deferred_callback = ac_callback;
120                         periph->deferred_ac = code;
121                         return (CAM_REQ_INPROG);
122                 } else {
123                         kprintf("cam_periph_alloc: attempt to re-allocate "
124                                "valid device %s%d rejected\n",
125                                periph->periph_name, periph->unit_number);
126                 }
127                 return (CAM_REQ_INVALID);
128         }
129         
130         periph = kmalloc(sizeof(*periph), M_DEVBUF, M_INTWAIT | M_ZERO);
131         
132         init_level++;
133
134         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
135                 if (strcmp((*p_drv)->driver_name, name) == 0)
136                         break;
137         }
138         
139         path_id = xpt_path_path_id(path);
140         target_id = xpt_path_target_id(path);
141         lun_id = xpt_path_lun_id(path);
142         cam_init_pinfo(&periph->pinfo);
143         periph->periph_start = periph_start;
144         periph->periph_dtor = periph_dtor;
145         periph->periph_oninval = periph_oninvalidate;
146         periph->type = type;
147         periph->periph_name = name;
148         periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
149         periph->immediate_priority = CAM_PRIORITY_NONE;
150         periph->refcount = 0;
151         SLIST_INIT(&periph->ccb_list);
152         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
153         if (status != CAM_REQ_CMP)
154                 goto failure;
155
156         periph->path = path;
157         init_level++;
158
159         status = xpt_add_periph(periph);
160
161         if (status != CAM_REQ_CMP)
162                 goto failure;
163
164         crit_enter();
165         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
166         while (cur_periph != NULL
167             && cur_periph->unit_number < periph->unit_number)
168                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
169
170         if (cur_periph != NULL)
171                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
172         else {
173                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
174                 (*p_drv)->generation++;
175         }
176
177         crit_exit();
178
179         init_level++;
180
181         status = periph_ctor(periph, arg);
182
183         if (status == CAM_REQ_CMP)
184                 init_level++;
185
186 failure:
187         switch (init_level) {
188         case 4:
189                 /* Initialized successfully */
190                 break;
191         case 3:
192                 crit_enter();
193                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
194                 crit_exit();
195                 xpt_remove_periph(periph);
196         case 2:
197                 xpt_free_path(periph->path);
198         case 1:
199                 kfree(periph, M_DEVBUF);
200         case 0:
201                 /* No cleanup to perform. */
202                 break;
203         default:
204                 panic("cam_periph_alloc: Unknown init level");
205         }
206         return(status);
207 }
208
209 /*
210  * Find a peripheral structure with the specified path, target, lun, 
211  * and (optionally) type.  If the name is NULL, this function will return
212  * the first peripheral driver that matches the specified path.
213  */
214 struct cam_periph *
215 cam_periph_find(struct cam_path *path, char *name)
216 {
217         struct periph_driver **p_drv;
218         struct cam_periph *periph;
219
220         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
221                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
222                         continue;
223
224                 crit_enter();
225                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
226                         if (xpt_path_comp(periph->path, path) == 0) {
227                                 crit_exit();
228                                 return(periph);
229                         }
230                 }
231                 crit_exit();
232                 if (name != NULL)
233                         return(NULL);
234         }
235         return(NULL);
236 }
237
238 cam_status
239 cam_periph_acquire(struct cam_periph *periph)
240 {
241         if (periph == NULL)
242                 return(CAM_REQ_CMP_ERR);
243
244         crit_enter();
245         periph->refcount++;
246         crit_exit();
247
248         return(CAM_REQ_CMP);
249 }
250
251 void
252 cam_periph_release(struct cam_periph *periph)
253 {
254         if (periph == NULL)
255                 return;
256
257         crit_enter();
258         if ((--periph->refcount == 0)
259          && (periph->flags & CAM_PERIPH_INVALID)) {
260                 camperiphfree(periph);
261         }
262         crit_exit();
263 }
264
265 /*
266  * Look for the next unit number that is not currently in use for this
267  * peripheral type starting at "newunit".  Also exclude unit numbers that
268  * are reserved by for future "hardwiring" unless we already know that this
269  * is a potential wired device.  Only assume that the device is "wired" the
270  * first time through the loop since after that we'll be looking at unit
271  * numbers that did not match a wiring entry.
272  */
273 static u_int
274 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
275                   path_id_t pathid, target_id_t target, lun_id_t lun)
276 {
277         struct  cam_periph *periph;
278         char    *periph_name, *strval;
279         int     i, val, dunit;
280         const char *dname;
281
282         crit_enter();
283         periph_name = p_drv->driver_name;
284         for (;;newunit++) {
285
286                 for (periph = TAILQ_FIRST(&p_drv->units);
287                      periph != NULL && periph->unit_number != newunit;
288                      periph = TAILQ_NEXT(periph, unit_links))
289                         ;
290
291                 if (periph != NULL && periph->unit_number == newunit) {
292                         if (wired != 0) {
293                                 xpt_print_path(periph->path);
294                                 kprintf("Duplicate Wired Device entry!\n");
295                                 xpt_print_path(periph->path);
296                                 kprintf("Second device (%s device at scbus%d "
297                                        "target %d lun %d) will not be wired\n",
298                                        periph_name, pathid, target, lun);
299                                 wired = 0;
300                         }
301                         continue;
302                 }
303                 if (wired)
304                         break;
305
306                 /*
307                  * Don't match entries like "da 4" as a wired down
308                  * device, but do match entries like "da 4 target 5"
309                  * or even "da 4 scbus 1". 
310                  */
311                 i = -1;
312                 while ((i = resource_locate(i, periph_name)) != -1) {
313                         dname = resource_query_name(i);
314                         dunit = resource_query_unit(i);
315                         /* if no "target" and no specific scbus, skip */
316                         if (resource_int_value(dname, dunit, "target", &val) &&
317                             (resource_string_value(dname, dunit, "at",&strval)||
318                              strcmp(strval, "scbus") == 0))
319                                 continue;
320                         if (newunit == dunit)
321                                 break;
322                 }
323                 if (i == -1)
324                         break;
325         }
326         crit_exit();
327         return (newunit);
328 }
329
330 static u_int
331 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
332               target_id_t target, lun_id_t lun)
333 {
334         u_int   unit;
335         int     hit, i, val, dunit;
336         const char *dname;
337         char    pathbuf[32], *strval, *periph_name;
338
339         unit = 0;
340
341         periph_name = p_drv->driver_name;
342         ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
343         i = -1;
344         for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
345                 dname = resource_query_name(i);
346                 dunit = resource_query_unit(i);
347                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
348                         if (strcmp(strval, pathbuf) != 0)
349                                 continue;
350                         hit++;
351                 }
352                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
353                         if (val != target)
354                                 continue;
355                         hit++;
356                 }
357                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
358                         if (val != lun)
359                                 continue;
360                         hit++;
361                 }
362                 if (hit != 0) {
363                         unit = dunit;
364                         break;
365                 }
366         }
367
368         /*
369          * Either start from 0 looking for the next unit or from
370          * the unit number given in the resource config.  This way,
371          * if we have wildcard matches, we don't return the same
372          * unit number twice.
373          */
374         unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
375                                  target, lun);
376
377         return (unit);
378 }
379
380 void
381 cam_periph_invalidate(struct cam_periph *periph)
382 {
383         /*
384          * We only call this routine the first time a peripheral is
385          * invalidated.  The oninvalidate() routine is always called in
386          * a critical section.
387          */
388         crit_enter();
389         if (((periph->flags & CAM_PERIPH_INVALID) == 0)
390          && (periph->periph_oninval != NULL))
391                 periph->periph_oninval(periph);
392
393         periph->flags |= CAM_PERIPH_INVALID;
394         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
395
396         if (periph->refcount == 0)
397                 camperiphfree(periph);
398         else if (periph->refcount < 0)
399                 kprintf("cam_invalidate_periph: refcount < 0!!\n");
400         crit_exit();
401 }
402
403 static void
404 camperiphfree(struct cam_periph *periph)
405 {
406         struct periph_driver **p_drv;
407
408         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
409                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
410                         break;
411         }
412
413         if (*p_drv == NULL) {
414                 kprintf("camperiphfree: attempt to free "
415                         "non-existent periph: %s\n", periph->periph_name);
416                 return;
417         }
418         
419         if (periph->periph_dtor != NULL)
420                 periph->periph_dtor(periph);
421         
422         crit_enter();
423         TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
424         (*p_drv)->generation++;
425         crit_exit();
426
427         xpt_remove_periph(periph);
428
429         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
430                 union ccb ccb;
431                 void *arg;
432
433                 switch (periph->deferred_ac) {
434                 case AC_FOUND_DEVICE:
435                         ccb.ccb_h.func_code = XPT_GDEV_TYPE;
436                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
437                         xpt_action(&ccb);
438                         arg = &ccb;
439                         break;
440                 case AC_PATH_REGISTERED:
441                         ccb.ccb_h.func_code = XPT_PATH_INQ;
442                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
443                         xpt_action(&ccb);
444                         arg = &ccb;
445                         break;
446                 default:
447                         arg = NULL;
448                         break;
449                 }
450                 periph->deferred_callback(NULL, periph->deferred_ac,
451                                           periph->path, arg);
452         }
453         xpt_free_path(periph->path);
454         kfree(periph, M_DEVBUF);
455 }
456
457 /*
458  * Wait interruptibly for an exclusive lock.
459  */
460 int
461 cam_periph_lock(struct cam_periph *periph, int flags)
462 {
463         int error;
464
465         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
466                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
467                 if ((error = tsleep(periph, flags, "caplck", 0)) != 0)
468                         return error;
469         }
470
471         if (cam_periph_acquire(periph) != CAM_REQ_CMP)
472                 return(ENXIO);
473
474         periph->flags |= CAM_PERIPH_LOCKED;
475         return 0;
476 }
477
478 /*
479  * Unlock and wake up any waiters.
480  */
481 void
482 cam_periph_unlock(struct cam_periph *periph)
483 {
484         periph->flags &= ~CAM_PERIPH_LOCKED;
485         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
486                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
487                 wakeup(periph);
488         }
489
490         cam_periph_release(periph);
491 }
492
493 /*
494  * Map user virtual pointers into kernel virtual address space, so we can
495  * access the memory.  This won't work on physical pointers, for now it's
496  * up to the caller to check for that.  (XXX KDM -- should we do that here
497  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
498  * buffers to map stuff in and out, we're limited to the buffer size.
499  */
500 int
501 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
502 {
503         int numbufs, i, j;
504         buf_cmd_t cmd[CAM_PERIPH_MAXMAPS];
505         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
506         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
507         u_int32_t dirs[CAM_PERIPH_MAXMAPS];
508
509         switch(ccb->ccb_h.func_code) {
510         case XPT_DEV_MATCH:
511                 if (ccb->cdm.match_buf_len == 0) {
512                         kprintf("cam_periph_mapmem: invalid match buffer "
513                                "length 0\n");
514                         return(EINVAL);
515                 }
516                 if (ccb->cdm.pattern_buf_len > 0) {
517                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
518                         lengths[0] = ccb->cdm.pattern_buf_len;
519                         dirs[0] = CAM_DIR_OUT;
520                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
521                         lengths[1] = ccb->cdm.match_buf_len;
522                         dirs[1] = CAM_DIR_IN;
523                         numbufs = 2;
524                 } else {
525                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
526                         lengths[0] = ccb->cdm.match_buf_len;
527                         dirs[0] = CAM_DIR_IN;
528                         numbufs = 1;
529                 }
530                 break;
531         case XPT_SCSI_IO:
532         case XPT_CONT_TARGET_IO:
533                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
534                         return(0);
535
536                 data_ptrs[0] = &ccb->csio.data_ptr;
537                 lengths[0] = ccb->csio.dxfer_len;
538                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
539                 numbufs = 1;
540                 break;
541         default:
542                 return(EINVAL);
543                 break; /* NOTREACHED */
544         }
545
546         /*
547          * Check the transfer length and permissions first, so we don't
548          * have to unmap any previously mapped buffers.
549          */
550         for (i = 0; i < numbufs; i++) {
551                 /*
552                  * Its kinda bogus, we need a R+W command.  For now the
553                  * buffer needs some sort of command.  Use BUF_CMD_WRITE
554                  * to indicate a write and BUF_CMD_READ to indicate R+W.
555                  */
556                 cmd[i] = BUF_CMD_WRITE;
557
558                 /*
559                  * The userland data pointer passed in may not be page
560                  * aligned.  vmapbuf() truncates the address to a page
561                  * boundary, so if the address isn't page aligned, we'll
562                  * need enough space for the given transfer length, plus
563                  * whatever extra space is necessary to make it to the page
564                  * boundary.
565                  */
566                 if ((lengths[i] +
567                     (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
568                         kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
569                                "which is greater than DFLTPHYS(%d)\n",
570                                (long)(lengths[i] +
571                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
572                                DFLTPHYS);
573                         return(E2BIG);
574                 }
575
576                 if (dirs[i] & CAM_DIR_OUT) {
577                         if (!useracc(*data_ptrs[i], lengths[i], 
578                                      VM_PROT_READ)) {
579                                 kprintf("cam_periph_mapmem: error, "
580                                         "address %p, length %lu isn't "
581                                         "user accessible for READ\n",
582                                         (void *)*data_ptrs[i],
583                                         (u_long)lengths[i]);
584                                 return(EACCES);
585                         }
586                 }
587
588                 if (dirs[i] & CAM_DIR_IN) {
589                         cmd[i] = BUF_CMD_READ;
590                         if (!useracc(*data_ptrs[i], lengths[i], 
591                                      VM_PROT_WRITE)) {
592                                 kprintf("cam_periph_mapmem: error, "
593                                         "address %p, length %lu isn't "
594                                         "user accessible for WRITE\n",
595                                         (void *)*data_ptrs[i],
596                                         (u_long)lengths[i]);
597
598                                 return(EACCES);
599                         }
600                 }
601
602         }
603
604         for (i = 0; i < numbufs; i++) {
605                 /*
606                  * Get the buffer.
607                  */
608                 mapinfo->bp[i] = getpbuf(NULL);
609
610                 /* save the original user pointer */
611                 mapinfo->saved_ptrs[i] = *data_ptrs[i];
612
613                 /* set the flags */
614                 mapinfo->bp[i]->b_cmd = cmd[i];
615
616                 /* map the user buffer into kernel memory */
617                 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i]) < 0) {
618                         kprintf("cam_periph_mapmem: error, "
619                                 "address %p, length %lu isn't "
620                                 "user accessible any more\n",
621                                 (void *)*data_ptrs[i],
622                                 (u_long)lengths[i]);
623                         for (j = 0; j < i; ++j) {
624                                 *data_ptrs[j] = mapinfo->saved_ptrs[j];
625                                 vunmapbuf(mapinfo->bp[j]);
626                                 relpbuf(mapinfo->bp[j], NULL);
627                         }
628                         mapinfo->num_bufs_used -= i;
629                         return(EACCES);
630                 }
631
632                 /* set our pointer to the new mapped area */
633                 *data_ptrs[i] = mapinfo->bp[i]->b_data;
634
635                 mapinfo->num_bufs_used++;
636         }
637
638         return(0);
639 }
640
641 /*
642  * Unmap memory segments mapped into kernel virtual address space by
643  * cam_periph_mapmem().
644  */
645 void
646 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
647 {
648         int numbufs, i;
649         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
650
651         if (mapinfo->num_bufs_used <= 0) {
652                 /* allow ourselves to be swapped once again */
653                 return;
654         }
655
656         switch (ccb->ccb_h.func_code) {
657         case XPT_DEV_MATCH:
658                 numbufs = min(mapinfo->num_bufs_used, 2);
659
660                 if (numbufs == 1) {
661                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
662                 } else {
663                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
664                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
665                 }
666                 break;
667         case XPT_SCSI_IO:
668         case XPT_CONT_TARGET_IO:
669                 data_ptrs[0] = &ccb->csio.data_ptr;
670                 numbufs = min(mapinfo->num_bufs_used, 1);
671                 break;
672         default:
673                 /* allow ourselves to be swapped once again */
674                 return;
675                 break; /* NOTREACHED */ 
676         }
677
678         for (i = 0; i < numbufs; i++) {
679                 /* Set the user's pointer back to the original value */
680                 *data_ptrs[i] = mapinfo->saved_ptrs[i];
681
682                 /* unmap the buffer */
683                 vunmapbuf(mapinfo->bp[i]);
684
685                 /* release the buffer */
686                 relpbuf(mapinfo->bp[i], NULL);
687         }
688
689         /* allow ourselves to be swapped once again */
690 }
691
692 union ccb *
693 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
694 {
695         struct ccb_hdr *ccb_h;
696
697         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
698
699         crit_enter();
700         
701         while (SLIST_FIRST(&periph->ccb_list) == NULL) {
702                 if (periph->immediate_priority > priority)
703                         periph->immediate_priority = priority;
704                 xpt_schedule(periph, priority);
705                 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
706                  && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
707                         break;
708                 tsleep(&periph->ccb_list, 0, "cgticb", 0);
709         }
710
711         ccb_h = SLIST_FIRST(&periph->ccb_list);
712         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
713         crit_exit();
714         return ((union ccb *)ccb_h);
715 }
716
717 void
718 cam_periph_ccbwait(union ccb *ccb)
719 {
720         crit_enter();
721         if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
722          || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
723                 tsleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0);
724         crit_exit();
725 }
726
727 int
728 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
729                  int (*error_routine)(union ccb *ccb, 
730                                       cam_flags camflags,
731                                       u_int32_t sense_flags))
732 {
733         union ccb            *ccb;
734         int                  error;
735         int                  found;
736
737         error = found = 0;
738
739         switch(cmd){
740         case CAMGETPASSTHRU:
741                 ccb = cam_periph_getccb(periph, /* priority */ 1);
742                 xpt_setup_ccb(&ccb->ccb_h,
743                               ccb->ccb_h.path,
744                               /*priority*/1);
745                 ccb->ccb_h.func_code = XPT_GDEVLIST;
746
747                 /*
748                  * Basically, the point of this is that we go through
749                  * getting the list of devices, until we find a passthrough
750                  * device.  In the current version of the CAM code, the
751                  * only way to determine what type of device we're dealing
752                  * with is by its name.
753                  */
754                 while (found == 0) {
755                         ccb->cgdl.index = 0;
756                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
757                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
758
759                                 /* we want the next device in the list */
760                                 xpt_action(ccb);
761                                 if (strncmp(ccb->cgdl.periph_name, 
762                                     "pass", 4) == 0){
763                                         found = 1;
764                                         break;
765                                 }
766                         }
767                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
768                             (found == 0)) {
769                                 ccb->cgdl.periph_name[0] = '\0';
770                                 ccb->cgdl.unit_number = 0;
771                                 break;
772                         }
773                 }
774
775                 /* copy the result back out */  
776                 bcopy(ccb, addr, sizeof(union ccb));
777
778                 /* and release the ccb */
779                 xpt_release_ccb(ccb);
780
781                 break;
782         default:
783                 error = ENOTTY;
784                 break;
785         }
786         return(error);
787 }
788
789 int
790 cam_periph_runccb(union ccb *ccb,
791                   int (*error_routine)(union ccb *ccb,
792                                        cam_flags camflags,
793                                        u_int32_t sense_flags),
794                   cam_flags camflags, u_int32_t sense_flags,
795                   struct devstat *ds)
796 {
797         int error;
798  
799         error = 0;
800         
801         /*
802          * If the user has supplied a stats structure, and if we understand
803          * this particular type of ccb, record the transaction start.
804          */
805         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
806                 devstat_start_transaction(ds);
807
808         xpt_action(ccb);
809  
810         do {
811                 cam_periph_ccbwait(ccb);
812                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
813                         error = 0;
814                 else if (error_routine != NULL)
815                         error = (*error_routine)(ccb, camflags, sense_flags);
816                 else
817                         error = 0;
818
819         } while (error == ERESTART);
820           
821         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 
822                 cam_release_devq(ccb->ccb_h.path,
823                                  /* relsim_flags */0,
824                                  /* openings */0,
825                                  /* timeout */0,
826                                  /* getcount_only */ FALSE);
827
828         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
829                 devstat_end_transaction(ds,
830                                         ccb->csio.dxfer_len,
831                                         ccb->csio.tag_action & 0xf,
832                                         ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
833                                         CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
834                                         (ccb->ccb_h.flags & CAM_DIR_OUT) ?
835                                         DEVSTAT_WRITE : 
836                                         DEVSTAT_READ);
837
838         return(error);
839 }
840
841 void
842 cam_freeze_devq(struct cam_path *path)
843 {
844         struct ccb_hdr ccb_h;
845
846         xpt_setup_ccb(&ccb_h, path, /*priority*/1);
847         ccb_h.func_code = XPT_NOOP;
848         ccb_h.flags = CAM_DEV_QFREEZE;
849         xpt_action((union ccb *)&ccb_h);
850 }
851
852 u_int32_t
853 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
854                  u_int32_t openings, u_int32_t timeout,
855                  int getcount_only)
856 {
857         struct ccb_relsim crs;
858
859         xpt_setup_ccb(&crs.ccb_h, path,
860                       /*priority*/1);
861         crs.ccb_h.func_code = XPT_REL_SIMQ;
862         crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
863         crs.release_flags = relsim_flags;
864         crs.openings = openings;
865         crs.release_timeout = timeout;
866         xpt_action((union ccb *)&crs);
867         return (crs.qfrozen_cnt);
868 }
869
870 #define saved_ccb_ptr ppriv_ptr0
871 static void
872 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
873 {
874         cam_status      status;
875         int             frozen;
876         int             sense;
877         struct scsi_start_stop_unit *scsi_cmd;
878         u_int32_t       relsim_flags, timeout;
879         u_int32_t       qfrozen_cnt;
880
881         status = done_ccb->ccb_h.status;
882         frozen = (status & CAM_DEV_QFRZN) != 0;
883         sense  = (status & CAM_AUTOSNS_VALID) != 0;
884         status &= CAM_STATUS_MASK;
885
886         timeout = 0;
887         relsim_flags = 0;
888
889         /* 
890          * Unfreeze the queue once if it is already frozen..
891          */
892         if (frozen != 0) {
893                 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
894                                               /*relsim_flags*/0,
895                                               /*openings*/0,
896                                               /*timeout*/0,
897                                               /*getcount_only*/0);
898         }
899
900         switch (status) {
901
902         case CAM_REQ_CMP:
903
904                 /*
905                  * If we have successfully taken a device from the not
906                  * ready to ready state, re-scan the device and re-get the
907                  * inquiry information.  Many devices (mostly disks) don't
908                  * properly report their inquiry information unless they
909                  * are spun up.
910                  */
911                 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
912                         scsi_cmd = (struct scsi_start_stop_unit *)
913                                         &done_ccb->csio.cdb_io.cdb_bytes;
914
915                         if (scsi_cmd->opcode == START_STOP_UNIT)
916                                 xpt_async(AC_INQ_CHANGED,
917                                           done_ccb->ccb_h.path, NULL);
918                 }
919                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
920                       sizeof(union ccb));
921
922                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
923
924                 xpt_action(done_ccb);
925
926                 break;
927         case CAM_SCSI_STATUS_ERROR:
928                 scsi_cmd = (struct scsi_start_stop_unit *)
929                                 &done_ccb->csio.cdb_io.cdb_bytes;
930                 if (sense != 0) {
931                         struct scsi_sense_data *sense;
932                         int    error_code, sense_key, asc, ascq;        
933
934                         sense = &done_ccb->csio.sense_data;
935                         scsi_extract_sense(sense, &error_code, 
936                                            &sense_key, &asc, &ascq);
937
938                         /*
939                          * If the error is "invalid field in CDB", 
940                          * and the load/eject flag is set, turn the 
941                          * flag off and try again.  This is just in 
942                          * case the drive in question barfs on the 
943                          * load eject flag.  The CAM code should set 
944                          * the load/eject flag by default for 
945                          * removable media.
946                          */
947
948                         /* XXX KDM 
949                          * Should we check to see what the specific
950                          * scsi status is??  Or does it not matter
951                          * since we already know that there was an
952                          * error, and we know what the specific
953                          * error code was, and we know what the
954                          * opcode is..
955                          */
956                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
957                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
958                              (asc == 0x24) && (ascq == 0x00) &&
959                              (done_ccb->ccb_h.retry_count > 0)) {
960
961                                 scsi_cmd->how &= ~SSS_LOEJ;
962
963                                 xpt_action(done_ccb);
964
965                         } else if (done_ccb->ccb_h.retry_count > 0) {
966                                 /*
967                                  * In this case, the error recovery
968                                  * command failed, but we've got 
969                                  * some retries left on it.  Give
970                                  * it another try.
971                                  */
972
973                                 /* set the timeout to .5 sec */
974                                 relsim_flags =
975                                         RELSIM_RELEASE_AFTER_TIMEOUT;
976                                 timeout = 500;
977
978                                 xpt_action(done_ccb);
979
980                                 break;
981
982                         } else {
983                                 /* 
984                                  * Copy the original CCB back and
985                                  * send it back to the caller.
986                                  */
987                                 bcopy(done_ccb->ccb_h.saved_ccb_ptr,            
988                                       done_ccb, sizeof(union ccb));
989
990                                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
991
992                                 xpt_action(done_ccb);
993                         }
994                 } else {
995                         /*
996                          * Eh??  The command failed, but we don't
997                          * have any sense.  What's up with that?
998                          * Fire the CCB again to return it to the
999                          * caller.
1000                          */
1001                         bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1002                               done_ccb, sizeof(union ccb));
1003
1004                         periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1005
1006                         xpt_action(done_ccb);
1007
1008                 }
1009                 break;
1010         default:
1011                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1012                       sizeof(union ccb));
1013
1014                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1015
1016                 xpt_action(done_ccb);
1017
1018                 break;
1019         }
1020
1021         /* decrement the retry count */
1022         if (done_ccb->ccb_h.retry_count > 0)
1023                 done_ccb->ccb_h.retry_count--;
1024
1025         qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1026                                       /*relsim_flags*/relsim_flags,
1027                                       /*openings*/0,
1028                                       /*timeout*/timeout,
1029                                       /*getcount_only*/0);
1030 }
1031
1032 /*
1033  * Generic Async Event handler.  Peripheral drivers usually
1034  * filter out the events that require personal attention,
1035  * and leave the rest to this function.
1036  */
1037 void
1038 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1039                  struct cam_path *path, void *arg)
1040 {
1041         switch (code) {
1042         case AC_LOST_DEVICE:
1043                 cam_periph_invalidate(periph);
1044                 break; 
1045         case AC_SENT_BDR:
1046         case AC_BUS_RESET:
1047         {
1048                 cam_periph_bus_settle(periph, SCSI_DELAY);
1049                 break;
1050         }
1051         default:
1052                 break;
1053         }
1054 }
1055
1056 void
1057 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1058 {
1059         struct ccb_getdevstats cgds;
1060
1061         xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1062         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1063         xpt_action((union ccb *)&cgds);
1064         cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1065 }
1066
1067 void
1068 cam_periph_freeze_after_event(struct cam_periph *periph,
1069                               struct timeval* event_time, u_int duration_ms)
1070 {
1071         struct timeval delta;
1072         struct timeval duration_tv;
1073
1074         microuptime(&delta);
1075         timevalsub(&delta, event_time);
1076         duration_tv.tv_sec = duration_ms / 1000;
1077         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1078         if (timevalcmp(&delta, &duration_tv, <)) {
1079                 timevalsub(&duration_tv, &delta);
1080
1081                 duration_ms = duration_tv.tv_sec * 1000;
1082                 duration_ms += duration_tv.tv_usec / 1000;
1083                 cam_freeze_devq(periph->path); 
1084                 cam_release_devq(periph->path,
1085                                 RELSIM_RELEASE_AFTER_TIMEOUT,
1086                                 /*reduction*/0,
1087                                 /*timeout*/duration_ms,
1088                                 /*getcount_only*/0);
1089         }
1090
1091 }
1092
1093 /*
1094  * Generic error handler.  Peripheral drivers usually filter
1095  * out the errors that they handle in a unique mannor, then
1096  * call this function.
1097  */
1098 int
1099 cam_periph_error(union ccb *ccb, cam_flags camflags,
1100                  u_int32_t sense_flags, union ccb *save_ccb)
1101 {
1102         cam_status status;
1103         int        frozen;
1104         int        sense;
1105         int        error;
1106         int        openings;
1107         int        retry;
1108         u_int32_t  relsim_flags;
1109         u_int32_t  timeout;
1110         
1111         status = ccb->ccb_h.status;
1112         frozen = (status & CAM_DEV_QFRZN) != 0;
1113         sense  = (status & CAM_AUTOSNS_VALID) != 0;
1114         status &= CAM_STATUS_MASK;
1115         relsim_flags = 0;
1116
1117         switch (status) {
1118         case CAM_REQ_CMP:
1119                 /* decrement the number of retries */
1120                 retry = ccb->ccb_h.retry_count > 0;
1121                 if (retry)
1122                         ccb->ccb_h.retry_count--;
1123                 error = 0;
1124                 break;
1125         case CAM_AUTOSENSE_FAIL:
1126         case CAM_SCSI_STATUS_ERROR:
1127
1128                 switch (ccb->csio.scsi_status) {
1129                 case SCSI_STATUS_OK:
1130                 case SCSI_STATUS_COND_MET:
1131                 case SCSI_STATUS_INTERMED:
1132                 case SCSI_STATUS_INTERMED_COND_MET:
1133                         error = 0;
1134                         break;
1135                 case SCSI_STATUS_CMD_TERMINATED:
1136                 case SCSI_STATUS_CHECK_COND:
1137                         if (sense != 0) {
1138                                 struct scsi_sense_data *sense;
1139                                 int    error_code, sense_key, asc, ascq;
1140                                 struct cam_periph *periph;
1141                                 scsi_sense_action err_action;
1142                                 struct ccb_getdev cgd;
1143
1144                                 sense = &ccb->csio.sense_data;
1145                                 scsi_extract_sense(sense, &error_code,
1146                                                    &sense_key, &asc, &ascq);
1147                                 periph = xpt_path_periph(ccb->ccb_h.path);
1148
1149                                 /*
1150                                  * Grab the inquiry data for this device.
1151                                  */
1152                                 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
1153                                               /*priority*/ 1);
1154                                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1155                                 xpt_action((union ccb *)&cgd);
1156
1157                                 err_action = scsi_error_action(asc, ascq, 
1158                                                                &cgd.inq_data);
1159
1160                                 /*
1161                                  * Send a Test Unit Ready to the device.
1162                                  * If the 'many' flag is set, we send 120
1163                                  * test unit ready commands, one every half 
1164                                  * second.  Otherwise, we just send one TUR.
1165                                  * We only want to do this if the retry 
1166                                  * count has not been exhausted.
1167                                  */
1168                                 if (((err_action & SS_MASK) == SS_TUR)
1169                                  && save_ccb != NULL 
1170                                  && ccb->ccb_h.retry_count > 0) {
1171
1172                                         /*
1173                                          * Since error recovery is already
1174                                          * in progress, don't attempt to
1175                                          * process this error.  It is probably
1176                                          * related to the error that caused
1177                                          * the currently active error recovery
1178                                          * action.  Also, we only have
1179                                          * space for one saved CCB, so if we
1180                                          * had two concurrent error recovery
1181                                          * actions, we would end up
1182                                          * over-writing one error recovery
1183                                          * CCB with another one.
1184                                          */
1185                                         if (periph->flags &
1186                                             CAM_PERIPH_RECOVERY_INPROG) {
1187                                                 error = ERESTART;
1188                                                 break;
1189                                         }
1190
1191                                         periph->flags |=
1192                                                 CAM_PERIPH_RECOVERY_INPROG;
1193
1194                                         /* decrement the number of retries */
1195                                         if ((err_action & 
1196                                              SSQ_DECREMENT_COUNT) != 0) {
1197                                                 retry = 1;
1198                                                 ccb->ccb_h.retry_count--;
1199                                         }
1200
1201                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1202
1203                                         /*
1204                                          * We retry this one every half
1205                                          * second for a minute.  If the
1206                                          * device hasn't become ready in a
1207                                          * minute's time, it's unlikely to
1208                                          * ever become ready.  If the table
1209                                          * doesn't specify SSQ_MANY, we can
1210                                          * only try this once.  Oh well.
1211                                          */
1212                                         if ((err_action & SSQ_MANY) != 0)
1213                                                 scsi_test_unit_ready(&ccb->csio,
1214                                                                /*retries*/120,
1215                                                                camperiphdone,
1216                                                                MSG_SIMPLE_Q_TAG,
1217                                                                SSD_FULL_SIZE,
1218                                                                /*timeout*/5000);
1219                                         else
1220                                                 scsi_test_unit_ready(&ccb->csio,
1221                                                                /*retries*/1,
1222                                                                camperiphdone,
1223                                                                MSG_SIMPLE_Q_TAG,
1224                                                                SSD_FULL_SIZE,
1225                                                                /*timeout*/5000);
1226
1227                                         /* release the queue after .5 sec.  */
1228                                         relsim_flags = 
1229                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1230                                         timeout = 500;
1231                                         /*
1232                                          * Drop the priority to 0 so that 
1233                                          * we are the first to execute.  Also 
1234                                          * freeze the queue after this command 
1235                                          * is sent so that we can restore the 
1236                                          * old csio and have it queued in the 
1237                                          * proper order before we let normal 
1238                                          * transactions go to the drive.
1239                                          */
1240                                         ccb->ccb_h.pinfo.priority = 0;
1241                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1242
1243                                         /*
1244                                          * Save a pointer to the original
1245                                          * CCB in the new CCB.
1246                                          */
1247                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1248
1249                                         error = ERESTART;
1250                                 }
1251                                 /*
1252                                  * Send a start unit command to the device,
1253                                  * and then retry the command.  We only 
1254                                  * want to do this if the retry count has 
1255                                  * not been exhausted.  If the user 
1256                                  * specified 0 retries, then we follow 
1257                                  * their request and do not retry.
1258                                  */
1259                                 else if (((err_action & SS_MASK) == SS_START)
1260                                       && save_ccb != NULL 
1261                                       && ccb->ccb_h.retry_count > 0) {
1262                                         int le;
1263
1264                                         /*
1265                                          * Only one error recovery action
1266                                          * at a time.  See above.
1267                                          */
1268                                         if (periph->flags &
1269                                             CAM_PERIPH_RECOVERY_INPROG) {
1270                                                 error = ERESTART;
1271                                                 break;
1272                                         }
1273
1274                                         periph->flags |=
1275                                                 CAM_PERIPH_RECOVERY_INPROG;
1276
1277                                         /* decrement the number of retries */
1278                                         retry = 1;
1279                                         ccb->ccb_h.retry_count--;
1280
1281                                         /*
1282                                          * Check for removable media and
1283                                          * set load/eject flag
1284                                          * appropriately.
1285                                          */
1286                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1287                                                 le = TRUE;
1288                                         else
1289                                                 le = FALSE;
1290
1291                                         /*
1292                                          * Attempt to start the drive up.
1293                                          *
1294                                          * Save the current ccb so it can 
1295                                          * be restored and retried once the 
1296                                          * drive is started up.
1297                                          */
1298                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1299
1300                                         scsi_start_stop(&ccb->csio,
1301                                                         /*retries*/1,
1302                                                         camperiphdone,
1303                                                         MSG_SIMPLE_Q_TAG,
1304                                                         /*start*/TRUE,
1305                                                         /*load/eject*/le,
1306                                                         /*immediate*/FALSE,
1307                                                         SSD_FULL_SIZE,
1308                                                         /*timeout*/50000);
1309                                         /*
1310                                          * Drop the priority to 0 so that 
1311                                          * we are the first to execute.  Also 
1312                                          * freeze the queue after this command 
1313                                          * is sent so that we can restore the 
1314                                          * old csio and have it queued in the 
1315                                          * proper order before we let normal 
1316                                          * transactions go to the drive.
1317                                          */
1318                                         ccb->ccb_h.pinfo.priority = 0;
1319                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1320
1321                                         /*
1322                                          * Save a pointer to the original
1323                                          * CCB in the new CCB.
1324                                          */
1325                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1326
1327                                         error = ERESTART;
1328                                 } else if ((sense_flags & SF_RETRY_UA) != 0) {
1329                                         /*
1330                                          * XXX KDM this is a *horrible*
1331                                          * hack.  
1332                                          */
1333                                         error = scsi_interpret_sense(ccb,
1334                                                                   sense_flags,
1335                                                                   &relsim_flags,
1336                                                                   &openings,
1337                                                                   &timeout,
1338                                                                   err_action);
1339                                 } 
1340
1341                                 /*
1342                                  * Theoretically, this code should send a
1343                                  * test unit ready to the given device, and 
1344                                  * if it returns and error, send a start 
1345                                  * unit command.  Since we don't yet have
1346                                  * the capability to do two-command error
1347                                  * recovery, just send a start unit.
1348                                  * XXX KDM fix this!
1349                                  */
1350                                 else if (((err_action & SS_MASK) == SS_TURSTART)
1351                                       && save_ccb != NULL
1352                                       && ccb->ccb_h.retry_count > 0) {
1353                                         int le;
1354
1355                                         /*
1356                                          * Only one error recovery action
1357                                          * at a time.  See above.
1358                                          */
1359                                         if (periph->flags &
1360                                             CAM_PERIPH_RECOVERY_INPROG) {
1361                                                 error = ERESTART;
1362                                                 break;
1363                                         }
1364
1365                                         periph->flags |=
1366                                                 CAM_PERIPH_RECOVERY_INPROG;
1367
1368                                         /* decrement the number of retries */
1369                                         retry = 1;
1370                                         ccb->ccb_h.retry_count--;
1371
1372                                         /*
1373                                          * Check for removable media and
1374                                          * set load/eject flag
1375                                          * appropriately.
1376                                          */
1377                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1378                                                 le = TRUE;
1379                                         else
1380                                                 le = FALSE;
1381
1382                                         /*
1383                                          * Attempt to start the drive up.
1384                                          *
1385                                          * Save the current ccb so it can 
1386                                          * be restored and retried once the 
1387                                          * drive is started up.
1388                                          */
1389                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1390
1391                                         scsi_start_stop(&ccb->csio,
1392                                                         /*retries*/1,
1393                                                         camperiphdone,
1394                                                         MSG_SIMPLE_Q_TAG,
1395                                                         /*start*/TRUE,
1396                                                         /*load/eject*/le,
1397                                                         /*immediate*/FALSE,
1398                                                         SSD_FULL_SIZE,
1399                                                         /*timeout*/50000);
1400
1401                                         /* release the queue after .5 sec.  */
1402                                         relsim_flags = 
1403                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1404                                         timeout = 500;
1405                                         /*
1406                                          * Drop the priority to 0 so that 
1407                                          * we are the first to execute.  Also 
1408                                          * freeze the queue after this command 
1409                                          * is sent so that we can restore the 
1410                                          * old csio and have it queued in the 
1411                                          * proper order before we let normal 
1412                                          * transactions go to the drive.
1413                                          */
1414                                         ccb->ccb_h.pinfo.priority = 0;
1415                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1416
1417                                         /*
1418                                          * Save a pointer to the original
1419                                          * CCB in the new CCB.
1420                                          */
1421                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1422
1423                                         error = ERESTART;
1424                                 } else {
1425                                         error = scsi_interpret_sense(ccb,
1426                                                                   sense_flags,
1427                                                                   &relsim_flags,
1428                                                                   &openings,
1429                                                                   &timeout,
1430                                                                   err_action);
1431                                 }
1432                         } else if (ccb->csio.scsi_status == 
1433                                    SCSI_STATUS_CHECK_COND
1434                                 && status != CAM_AUTOSENSE_FAIL) {
1435                                 /* no point in decrementing the retry count */
1436                                 panic("cam_periph_error: scsi status of "
1437                                       "CHECK COND returned but no sense "
1438                                       "information is available.  "
1439                                       "Controller should have returned "
1440                                       "CAM_AUTOSENSE_FAILED");
1441                                 /* NOTREACHED */
1442                                 error = EIO;
1443                         } else if (ccb->ccb_h.retry_count == 0) {
1444                                 /*
1445                                  * XXX KDM shouldn't there be a better
1446                                  * argument to return??
1447                                  */
1448                                 error = EIO;
1449                         } else {
1450                                 /* decrement the number of retries */
1451                                 retry = ccb->ccb_h.retry_count > 0;
1452                                 if (retry)
1453                                         ccb->ccb_h.retry_count--;
1454                                 /*
1455                                  * If it was aborted with no
1456                                  * clue as to the reason, just
1457                                  * retry it again.
1458                                  */
1459                                 error = ERESTART;
1460                         }
1461                         break;
1462                 case SCSI_STATUS_QUEUE_FULL:
1463                 {
1464                         /* no decrement */
1465                         struct ccb_getdevstats cgds;
1466
1467                         /*
1468                          * First off, find out what the current
1469                          * transaction counts are.
1470                          */
1471                         xpt_setup_ccb(&cgds.ccb_h,
1472                                       ccb->ccb_h.path,
1473                                       /*priority*/1);
1474                         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1475                         xpt_action((union ccb *)&cgds);
1476
1477                         /*
1478                          * If we were the only transaction active, treat
1479                          * the QUEUE FULL as if it were a BUSY condition.
1480                          */
1481                         if (cgds.dev_active != 0) {
1482                                 int total_openings;
1483
1484                                 /*
1485                                  * Reduce the number of openings to
1486                                  * be 1 less than the amount it took
1487                                  * to get a queue full bounded by the
1488                                  * minimum allowed tag count for this
1489                                  * device.
1490                                  */
1491                                 total_openings =
1492                                     cgds.dev_active+cgds.dev_openings;
1493                                 openings = cgds.dev_active;
1494                                 if (openings < cgds.mintags)
1495                                         openings = cgds.mintags;
1496                                 if (openings < total_openings)
1497                                         relsim_flags = RELSIM_ADJUST_OPENINGS;
1498                                 else {
1499                                         /*
1500                                          * Some devices report queue full for
1501                                          * temporary resource shortages.  For
1502                                          * this reason, we allow a minimum
1503                                          * tag count to be entered via a
1504                                          * quirk entry to prevent the queue
1505                                          * count on these devices from falling
1506                                          * to a pessimisticly low value.  We
1507                                          * still wait for the next successful
1508                                          * completion, however, before queueing
1509                                          * more transactions to the device.
1510                                          */
1511                                         relsim_flags =
1512                                             RELSIM_RELEASE_AFTER_CMDCMPLT;
1513                                 }
1514                                 timeout = 0;
1515                                 error = ERESTART;
1516                                 break;
1517                         }
1518                         /* FALLTHROUGH */
1519                 }
1520                 case SCSI_STATUS_BUSY:
1521                         /*
1522                          * Restart the queue after either another
1523                          * command completes or a 1 second timeout.
1524                          * If we have any retries left, that is.
1525                          */
1526                         retry = ccb->ccb_h.retry_count > 0;
1527                         if (retry) {
1528                                 ccb->ccb_h.retry_count--;
1529                                 error = ERESTART;
1530                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1531                                              | RELSIM_RELEASE_AFTER_CMDCMPLT;
1532                                 timeout = 1000;
1533                         } else {
1534                                 error = EIO;
1535                         }
1536                         break;
1537                 case SCSI_STATUS_RESERV_CONFLICT:
1538                         error = EIO;
1539                         break;
1540                 default:
1541                         error = EIO;
1542                         break;
1543                 }
1544                 break;
1545         case CAM_REQ_CMP_ERR:
1546         case CAM_CMD_TIMEOUT:
1547         case CAM_UNEXP_BUSFREE:
1548         case CAM_UNCOR_PARITY:
1549         case CAM_DATA_RUN_ERR:
1550                 /* decrement the number of retries */
1551                 retry = ccb->ccb_h.retry_count > 0;
1552                 if (retry) {
1553                         ccb->ccb_h.retry_count--;
1554                         error = ERESTART;
1555                 } else {
1556                         error = EIO;
1557                 }
1558                 break;
1559         case CAM_UA_ABORT:
1560         case CAM_UA_TERMIO:
1561         case CAM_MSG_REJECT_REC:
1562                 /* XXX Don't know that these are correct */
1563                 error = EIO;
1564                 break;
1565         case CAM_SEL_TIMEOUT:
1566         {
1567                 /*
1568                  * XXX
1569                  * A single selection timeout should not be enough
1570                  * to invalidate a device.  We should retry for multiple
1571                  * seconds assuming this isn't a probe.  We'll probably
1572                  * need a special flag for that.
1573                  */
1574 #if 0
1575                 struct cam_path *newpath;
1576
1577                 /* Should we do more if we can't create the path?? */
1578                 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1579                                     xpt_path_path_id(ccb->ccb_h.path),
1580                                     xpt_path_target_id(ccb->ccb_h.path),
1581                                     CAM_LUN_WILDCARD) != CAM_REQ_CMP) 
1582                         break;
1583                 /*
1584                  * Let peripheral drivers know that this device has gone
1585                  * away.
1586                  */
1587                 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1588                 xpt_free_path(newpath);
1589 #endif
1590                 if ((sense_flags & SF_RETRY_SELTO) != 0) {
1591                         retry = ccb->ccb_h.retry_count > 0;
1592                         if (retry) {
1593                                 ccb->ccb_h.retry_count--;
1594                                 error = ERESTART;
1595                                 /*
1596                                  * Wait half a second to give the device
1597                                  * time to recover before we try again.
1598                                  */
1599                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1600                                 timeout = 500;
1601                         } else {
1602                                 error = ENXIO;
1603                         }
1604                 } else {
1605                         error = ENXIO;
1606                 }
1607                 break;
1608         }
1609         case CAM_REQ_INVALID:
1610         case CAM_PATH_INVALID:
1611         case CAM_DEV_NOT_THERE:
1612         case CAM_NO_HBA:
1613         case CAM_PROVIDE_FAIL:
1614         case CAM_REQ_TOO_BIG:           
1615                 error = EINVAL;
1616                 break;
1617         case CAM_SCSI_BUS_RESET:
1618         case CAM_BDR_SENT:              
1619         case CAM_REQUEUE_REQ:
1620                 /* Unconditional requeue, dammit */
1621                 error = ERESTART;
1622                 break;
1623         case CAM_RESRC_UNAVAIL:
1624         case CAM_BUSY:
1625                 /* timeout??? */
1626         default:
1627                 /* decrement the number of retries */
1628                 retry = ccb->ccb_h.retry_count > 0;
1629                 if (retry) {
1630                         ccb->ccb_h.retry_count--;
1631                         error = ERESTART;
1632                 } else {
1633                         /* Check the sense codes */
1634                         error = EIO;
1635                 }
1636                 break;
1637         }
1638
1639         /* Attempt a retry */
1640         if (error == ERESTART || error == 0) {  
1641                 if (frozen != 0)
1642                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1643
1644                 if (error == ERESTART)
1645                         xpt_action(ccb);
1646                 
1647                 if (frozen != 0) {
1648                         cam_release_devq(ccb->ccb_h.path,
1649                                          relsim_flags,
1650                                          openings,
1651                                          timeout,
1652                                          /*getcount_only*/0);
1653                 }
1654         }
1655
1656
1657         return (error);
1658 }