Remove unneeded includes.
[dragonfly.git] / sys / bus / cam / cam_periph.c
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_periph.c,v 1.24.2.3 2003/01/25 19:04:40 dillon Exp $
30  * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.22 2007/11/13 00:28:27 pavalos Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/linker_set.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/devicestat.h>
41 #include <sys/bus.h>
42 #include <vm/vm.h>
43 #include <vm/vm_extern.h>
44
45 #include <sys/thread2.h>
46
47 #include "cam.h"
48 #include "cam_ccb.h"
49 #include "cam_xpt_periph.h"
50 #include "cam_periph.h"
51 #include "cam_debug.h"
52
53 #include <bus/cam/scsi/scsi_all.h>
54 #include <bus/cam/scsi/scsi_message.h>
55 #include <bus/cam/scsi/scsi_pass.h>
56
57 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
58                                           u_int newunit, int wired,
59                                           path_id_t pathid, target_id_t target,
60                                           lun_id_t lun);
61 static  u_int           camperiphunit(struct periph_driver *p_drv,
62                                       path_id_t pathid, target_id_t target,
63                                       lun_id_t lun); 
64 static  void            camperiphdone(struct cam_periph *periph, 
65                                         union ccb *done_ccb);
66 static  void            camperiphfree(struct cam_periph *periph);
67
68 cam_status
69 cam_periph_alloc(periph_ctor_t *periph_ctor,
70                  periph_oninv_t *periph_oninvalidate,
71                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
72                  char *name, cam_periph_type type, struct cam_path *path,
73                  ac_callback_t *ac_callback, ac_code code, void *arg)
74 {
75         struct          periph_driver **p_drv;
76         struct          cam_periph *periph;
77         struct          cam_periph *cur_periph;
78         path_id_t       path_id;
79         target_id_t     target_id;
80         lun_id_t        lun_id;
81         cam_status      status;
82         u_int           init_level;
83
84         init_level = 0;
85         /*
86          * Handle Hot-Plug scenarios.  If there is already a peripheral
87          * of our type assigned to this path, we are likely waiting for
88          * final close on an old, invalidated, peripheral.  If this is
89          * the case, queue up a deferred call to the peripheral's async
90          * handler.  If it looks like a mistaken re-alloation, complain.
91          */
92         if ((periph = cam_periph_find(path, name)) != NULL) {
93
94                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
95                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
96                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
97                         periph->deferred_callback = ac_callback;
98                         periph->deferred_ac = code;
99                         return (CAM_REQ_INPROG);
100                 } else {
101                         kprintf("cam_periph_alloc: attempt to re-allocate "
102                                "valid device %s%d rejected\n",
103                                periph->periph_name, periph->unit_number);
104                 }
105                 return (CAM_REQ_INVALID);
106         }
107         
108         periph = kmalloc(sizeof(*periph), M_DEVBUF, M_INTWAIT | M_ZERO);
109         
110         init_level++;
111
112         SET_FOREACH(p_drv, periphdriver_set) {
113                 if (strcmp((*p_drv)->driver_name, name) == 0)
114                         break;
115         }
116         
117         path_id = xpt_path_path_id(path);
118         target_id = xpt_path_target_id(path);
119         lun_id = xpt_path_lun_id(path);
120         cam_init_pinfo(&periph->pinfo);
121         periph->periph_start = periph_start;
122         periph->periph_dtor = periph_dtor;
123         periph->periph_oninval = periph_oninvalidate;
124         periph->type = type;
125         periph->periph_name = name;
126         periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
127         periph->immediate_priority = CAM_PRIORITY_NONE;
128         periph->refcount = 0;
129         SLIST_INIT(&periph->ccb_list);
130         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
131         if (status != CAM_REQ_CMP)
132                 goto failure;
133
134         periph->path = path;
135         init_level++;
136
137         status = xpt_add_periph(periph);
138
139         if (status != CAM_REQ_CMP)
140                 goto failure;
141
142         crit_enter();
143         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
144         while (cur_periph != NULL
145             && cur_periph->unit_number < periph->unit_number)
146                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
147
148         if (cur_periph != NULL)
149                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
150         else {
151                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
152                 (*p_drv)->generation++;
153         }
154
155         crit_exit();
156
157         init_level++;
158
159         status = periph_ctor(periph, arg);
160
161         if (status == CAM_REQ_CMP)
162                 init_level++;
163
164 failure:
165         switch (init_level) {
166         case 4:
167                 /* Initialized successfully */
168                 break;
169         case 3:
170                 crit_enter();
171                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
172                 crit_exit();
173                 xpt_remove_periph(periph);
174         case 2:
175                 xpt_free_path(periph->path);
176         case 1:
177                 kfree(periph, M_DEVBUF);
178         case 0:
179                 /* No cleanup to perform. */
180                 break;
181         default:
182                 panic("cam_periph_alloc: Unknown init level");
183         }
184         return(status);
185 }
186
187 /*
188  * Find a peripheral structure with the specified path, target, lun, 
189  * and (optionally) type.  If the name is NULL, this function will return
190  * the first peripheral driver that matches the specified path.
191  */
192 struct cam_periph *
193 cam_periph_find(struct cam_path *path, char *name)
194 {
195         struct periph_driver **p_drv;
196         struct cam_periph *periph;
197
198         SET_FOREACH(p_drv, periphdriver_set) {
199                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
200                         continue;
201
202                 crit_enter();
203                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
204                      periph = TAILQ_NEXT(periph, unit_links)) {
205                         if (xpt_path_comp(periph->path, path) == 0) {
206                                 crit_exit();
207                                 return(periph);
208                         }
209                 }
210                 crit_exit();
211                 if (name != NULL)
212                         return(NULL);
213         }
214         return(NULL);
215 }
216
217 cam_status
218 cam_periph_acquire(struct cam_periph *periph)
219 {
220         if (periph == NULL)
221                 return(CAM_REQ_CMP_ERR);
222
223         crit_enter();
224         periph->refcount++;
225         crit_exit();
226
227         return(CAM_REQ_CMP);
228 }
229
230 void
231 cam_periph_release(struct cam_periph *periph)
232 {
233         if (periph == NULL)
234                 return;
235
236         crit_enter();
237         if ((--periph->refcount == 0)
238          && (periph->flags & CAM_PERIPH_INVALID)) {
239                 camperiphfree(periph);
240         }
241         crit_exit();
242 }
243
244 /*
245  * Look for the next unit number that is not currently in use for this
246  * peripheral type starting at "newunit".  Also exclude unit numbers that
247  * are reserved by for future "hardwiring" unless we already know that this
248  * is a potential wired device.  Only assume that the device is "wired" the
249  * first time through the loop since after that we'll be looking at unit
250  * numbers that did not match a wiring entry.
251  */
252 static u_int
253 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
254                   path_id_t pathid, target_id_t target, lun_id_t lun)
255 {
256         struct  cam_periph *periph;
257         char    *periph_name, *strval;
258         int     i, val, dunit;
259         const char *dname;
260
261         crit_enter();
262         periph_name = p_drv->driver_name;
263         for (;;newunit++) {
264
265                 for (periph = TAILQ_FIRST(&p_drv->units);
266                      periph != NULL && periph->unit_number != newunit;
267                      periph = TAILQ_NEXT(periph, unit_links))
268                         ;
269
270                 if (periph != NULL && periph->unit_number == newunit) {
271                         if (wired != 0) {
272                                 xpt_print_path(periph->path);
273                                 kprintf("Duplicate Wired Device entry!\n");
274                                 xpt_print_path(periph->path);
275                                 kprintf("Second device (%s device at scbus%d "
276                                        "target %d lun %d) will not be wired\n",
277                                        periph_name, pathid, target, lun);
278                                 wired = 0;
279                         }
280                         continue;
281                 }
282                 if (wired)
283                         break;
284
285                 /*
286                  * Don't match entries like "da 4" as a wired down
287                  * device, but do match entries like "da 4 target 5"
288                  * or even "da 4 scbus 1". 
289                  */
290                 i = -1;
291                 while ((i = resource_locate(i, periph_name)) != -1) {
292                         dname = resource_query_name(i);
293                         dunit = resource_query_unit(i);
294                         /* if no "target" and no specific scbus, skip */
295                         if (resource_int_value(dname, dunit, "target", &val) &&
296                             (resource_string_value(dname, dunit, "at",&strval)||
297                              strcmp(strval, "scbus") == 0))
298                                 continue;
299                         if (newunit == dunit)
300                                 break;
301                 }
302                 if (i == -1)
303                         break;
304         }
305         crit_exit();
306         return (newunit);
307 }
308
309 static u_int
310 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
311               target_id_t target, lun_id_t lun)
312 {
313         u_int   unit;
314         int     hit, i, val, dunit;
315         const char *dname;
316         char    pathbuf[32], *strval, *periph_name;
317
318         unit = 0;
319
320         periph_name = p_drv->driver_name;
321         ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
322         i = -1;
323         for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
324                 dname = resource_query_name(i);
325                 dunit = resource_query_unit(i);
326                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
327                         if (strcmp(strval, pathbuf) != 0)
328                                 continue;
329                         hit++;
330                 }
331                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
332                         if (val != target)
333                                 continue;
334                         hit++;
335                 }
336                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
337                         if (val != lun)
338                                 continue;
339                         hit++;
340                 }
341                 if (hit != 0) {
342                         unit = dunit;
343                         break;
344                 }
345         }
346
347         /*
348          * Either start from 0 looking for the next unit or from
349          * the unit number given in the resource config.  This way,
350          * if we have wildcard matches, we don't return the same
351          * unit number twice.
352          */
353         unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
354                                  target, lun);
355
356         return (unit);
357 }
358
359 void
360 cam_periph_invalidate(struct cam_periph *periph)
361 {
362         /*
363          * We only call this routine the first time a peripheral is
364          * invalidated.  The oninvalidate() routine is always called in
365          * a critical section.
366          */
367         crit_enter();
368         if (((periph->flags & CAM_PERIPH_INVALID) == 0)
369          && (periph->periph_oninval != NULL))
370                 periph->periph_oninval(periph);
371
372         periph->flags |= CAM_PERIPH_INVALID;
373         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
374
375         if (periph->refcount == 0)
376                 camperiphfree(periph);
377         else if (periph->refcount < 0)
378                 kprintf("cam_invalidate_periph: refcount < 0!!\n");
379         crit_exit();
380 }
381
382 static void
383 camperiphfree(struct cam_periph *periph)
384 {
385         struct periph_driver **p_drv;
386
387         SET_FOREACH(p_drv, periphdriver_set) {
388                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
389                         break;
390         }
391
392         if (*p_drv == NULL) {
393                 kprintf("camperiphfree: attempt to free "
394                         "non-existent periph: %s\n", periph->periph_name);
395                 return;
396         }
397         
398         if (periph->periph_dtor != NULL)
399                 periph->periph_dtor(periph);
400         
401         crit_enter();
402         TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
403         (*p_drv)->generation++;
404         crit_exit();
405
406         xpt_remove_periph(periph);
407
408         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
409                 union ccb ccb;
410                 void *arg;
411
412                 switch (periph->deferred_ac) {
413                 case AC_FOUND_DEVICE:
414                         ccb.ccb_h.func_code = XPT_GDEV_TYPE;
415                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
416                         xpt_action(&ccb);
417                         arg = &ccb;
418                         break;
419                 case AC_PATH_REGISTERED:
420                         ccb.ccb_h.func_code = XPT_PATH_INQ;
421                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
422                         xpt_action(&ccb);
423                         arg = &ccb;
424                         break;
425                 default:
426                         arg = NULL;
427                         break;
428                 }
429                 periph->deferred_callback(NULL, periph->deferred_ac,
430                                           periph->path, arg);
431         }
432         xpt_free_path(periph->path);
433         kfree(periph, M_DEVBUF);
434 }
435
436 /*
437  * Wait interruptibly for an exclusive lock.
438  */
439 int
440 cam_periph_lock(struct cam_periph *periph, int flags)
441 {
442         int error;
443
444         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
445                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
446                 if ((error = tsleep(periph, flags, "caplck", 0)) != 0)
447                         return error;
448         }
449
450         if (cam_periph_acquire(periph) != CAM_REQ_CMP)
451                 return(ENXIO);
452
453         periph->flags |= CAM_PERIPH_LOCKED;
454         return 0;
455 }
456
457 /*
458  * Unlock and wake up any waiters.
459  */
460 void
461 cam_periph_unlock(struct cam_periph *periph)
462 {
463         periph->flags &= ~CAM_PERIPH_LOCKED;
464         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
465                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
466                 wakeup(periph);
467         }
468
469         cam_periph_release(periph);
470 }
471
472 /*
473  * Map user virtual pointers into kernel virtual address space, so we can
474  * access the memory.  This won't work on physical pointers, for now it's
475  * up to the caller to check for that.  (XXX KDM -- should we do that here
476  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
477  * buffers to map stuff in and out, we're limited to the buffer size.
478  */
479 int
480 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
481 {
482         int numbufs, i, j;
483         buf_cmd_t cmd[CAM_PERIPH_MAXMAPS];
484         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
485         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
486         u_int32_t dirs[CAM_PERIPH_MAXMAPS];
487
488         switch(ccb->ccb_h.func_code) {
489         case XPT_DEV_MATCH:
490                 if (ccb->cdm.match_buf_len == 0) {
491                         kprintf("cam_periph_mapmem: invalid match buffer "
492                                "length 0\n");
493                         return(EINVAL);
494                 }
495                 if (ccb->cdm.pattern_buf_len > 0) {
496                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
497                         lengths[0] = ccb->cdm.pattern_buf_len;
498                         dirs[0] = CAM_DIR_OUT;
499                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
500                         lengths[1] = ccb->cdm.match_buf_len;
501                         dirs[1] = CAM_DIR_IN;
502                         numbufs = 2;
503                 } else {
504                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
505                         lengths[0] = ccb->cdm.match_buf_len;
506                         dirs[0] = CAM_DIR_IN;
507                         numbufs = 1;
508                 }
509                 break;
510         case XPT_SCSI_IO:
511         case XPT_CONT_TARGET_IO:
512                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
513                         return(0);
514
515                 data_ptrs[0] = &ccb->csio.data_ptr;
516                 lengths[0] = ccb->csio.dxfer_len;
517                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
518                 numbufs = 1;
519                 break;
520         default:
521                 return(EINVAL);
522                 break; /* NOTREACHED */
523         }
524
525         /*
526          * Check the transfer length and permissions first, so we don't
527          * have to unmap any previously mapped buffers.
528          */
529         for (i = 0; i < numbufs; i++) {
530                 /*
531                  * Its kinda bogus, we need a R+W command.  For now the
532                  * buffer needs some sort of command.  Use BUF_CMD_WRITE
533                  * to indicate a write and BUF_CMD_READ to indicate R+W.
534                  */
535                 cmd[i] = BUF_CMD_WRITE;
536
537                 /*
538                  * The userland data pointer passed in may not be page
539                  * aligned.  vmapbuf() truncates the address to a page
540                  * boundary, so if the address isn't page aligned, we'll
541                  * need enough space for the given transfer length, plus
542                  * whatever extra space is necessary to make it to the page
543                  * boundary.
544                  */
545                 if ((lengths[i] +
546                     (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
547                         kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
548                                "which is greater than DFLTPHYS(%d)\n",
549                                (long)(lengths[i] +
550                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
551                                DFLTPHYS);
552                         return(E2BIG);
553                 }
554
555                 if (dirs[i] & CAM_DIR_OUT) {
556                         if (!useracc(*data_ptrs[i], lengths[i], 
557                                      VM_PROT_READ)) {
558                                 kprintf("cam_periph_mapmem: error, "
559                                         "address %p, length %lu isn't "
560                                         "user accessible for READ\n",
561                                         (void *)*data_ptrs[i],
562                                         (u_long)lengths[i]);
563                                 return(EACCES);
564                         }
565                 }
566
567                 if (dirs[i] & CAM_DIR_IN) {
568                         cmd[i] = BUF_CMD_READ;
569                         if (!useracc(*data_ptrs[i], lengths[i], 
570                                      VM_PROT_WRITE)) {
571                                 kprintf("cam_periph_mapmem: error, "
572                                         "address %p, length %lu isn't "
573                                         "user accessible for WRITE\n",
574                                         (void *)*data_ptrs[i],
575                                         (u_long)lengths[i]);
576
577                                 return(EACCES);
578                         }
579                 }
580
581         }
582
583         for (i = 0; i < numbufs; i++) {
584                 /*
585                  * Get the buffer.
586                  */
587                 mapinfo->bp[i] = getpbuf(NULL);
588
589                 /* save the original user pointer */
590                 mapinfo->saved_ptrs[i] = *data_ptrs[i];
591
592                 /* set the flags */
593                 mapinfo->bp[i]->b_cmd = cmd[i];
594
595                 /* map the user buffer into kernel memory */
596                 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i]) < 0) {
597                         kprintf("cam_periph_mapmem: error, "
598                                 "address %p, length %lu isn't "
599                                 "user accessible any more\n",
600                                 (void *)*data_ptrs[i],
601                                 (u_long)lengths[i]);
602                         for (j = 0; j < i; ++j) {
603                                 *data_ptrs[j] = mapinfo->saved_ptrs[j];
604                                 vunmapbuf(mapinfo->bp[j]);
605                                 relpbuf(mapinfo->bp[j], NULL);
606                         }
607                         mapinfo->num_bufs_used -= i;
608                         return(EACCES);
609                 }
610
611                 /* set our pointer to the new mapped area */
612                 *data_ptrs[i] = mapinfo->bp[i]->b_data;
613
614                 mapinfo->num_bufs_used++;
615         }
616
617         return(0);
618 }
619
620 /*
621  * Unmap memory segments mapped into kernel virtual address space by
622  * cam_periph_mapmem().
623  */
624 void
625 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
626 {
627         int numbufs, i;
628         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
629
630         if (mapinfo->num_bufs_used <= 0) {
631                 /* allow ourselves to be swapped once again */
632                 return;
633         }
634
635         switch (ccb->ccb_h.func_code) {
636         case XPT_DEV_MATCH:
637                 numbufs = min(mapinfo->num_bufs_used, 2);
638
639                 if (numbufs == 1) {
640                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
641                 } else {
642                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
643                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
644                 }
645                 break;
646         case XPT_SCSI_IO:
647         case XPT_CONT_TARGET_IO:
648                 data_ptrs[0] = &ccb->csio.data_ptr;
649                 numbufs = min(mapinfo->num_bufs_used, 1);
650                 break;
651         default:
652                 /* allow ourselves to be swapped once again */
653                 return;
654                 break; /* NOTREACHED */ 
655         }
656
657         for (i = 0; i < numbufs; i++) {
658                 /* Set the user's pointer back to the original value */
659                 *data_ptrs[i] = mapinfo->saved_ptrs[i];
660
661                 /* unmap the buffer */
662                 vunmapbuf(mapinfo->bp[i]);
663
664                 /* release the buffer */
665                 relpbuf(mapinfo->bp[i], NULL);
666         }
667
668         /* allow ourselves to be swapped once again */
669 }
670
671 union ccb *
672 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
673 {
674         struct ccb_hdr *ccb_h;
675
676         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
677
678         crit_enter();
679         
680         while (periph->ccb_list.slh_first == NULL) {
681                 if (periph->immediate_priority > priority)
682                         periph->immediate_priority = priority;
683                 xpt_schedule(periph, priority);
684                 if ((periph->ccb_list.slh_first != NULL)
685                  && (periph->ccb_list.slh_first->pinfo.priority == priority))
686                         break;
687                 tsleep(&periph->ccb_list, 0, "cgticb", 0);
688         }
689
690         ccb_h = periph->ccb_list.slh_first;
691         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
692         crit_exit();
693         return ((union ccb *)ccb_h);
694 }
695
696 void
697 cam_periph_ccbwait(union ccb *ccb)
698 {
699         crit_enter();
700         if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
701          || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
702                 tsleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0);
703         crit_exit();
704 }
705
706 int
707 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
708                  int (*error_routine)(union ccb *ccb, 
709                                       cam_flags camflags,
710                                       u_int32_t sense_flags))
711 {
712         union ccb            *ccb;
713         int                  error;
714         int                  found;
715
716         error = found = 0;
717
718         switch(cmd){
719         case CAMGETPASSTHRU:
720                 ccb = cam_periph_getccb(periph, /* priority */ 1);
721                 xpt_setup_ccb(&ccb->ccb_h,
722                               ccb->ccb_h.path,
723                               /*priority*/1);
724                 ccb->ccb_h.func_code = XPT_GDEVLIST;
725
726                 /*
727                  * Basically, the point of this is that we go through
728                  * getting the list of devices, until we find a passthrough
729                  * device.  In the current version of the CAM code, the
730                  * only way to determine what type of device we're dealing
731                  * with is by its name.
732                  */
733                 while (found == 0) {
734                         ccb->cgdl.index = 0;
735                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
736                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
737
738                                 /* we want the next device in the list */
739                                 xpt_action(ccb);
740                                 if (strncmp(ccb->cgdl.periph_name, 
741                                     "pass", 4) == 0){
742                                         found = 1;
743                                         break;
744                                 }
745                         }
746                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
747                             (found == 0)) {
748                                 ccb->cgdl.periph_name[0] = '\0';
749                                 ccb->cgdl.unit_number = 0;
750                                 break;
751                         }
752                 }
753
754                 /* copy the result back out */  
755                 bcopy(ccb, addr, sizeof(union ccb));
756
757                 /* and release the ccb */
758                 xpt_release_ccb(ccb);
759
760                 break;
761         default:
762                 error = ENOTTY;
763                 break;
764         }
765         return(error);
766 }
767
768 int
769 cam_periph_runccb(union ccb *ccb,
770                   int (*error_routine)(union ccb *ccb,
771                                        cam_flags camflags,
772                                        u_int32_t sense_flags),
773                   cam_flags camflags, u_int32_t sense_flags,
774                   struct devstat *ds)
775 {
776         int error;
777  
778         error = 0;
779         
780         /*
781          * If the user has supplied a stats structure, and if we understand
782          * this particular type of ccb, record the transaction start.
783          */
784         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
785                 devstat_start_transaction(ds);
786
787         xpt_action(ccb);
788  
789         do {
790                 cam_periph_ccbwait(ccb);
791                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
792                         error = 0;
793                 else if (error_routine != NULL)
794                         error = (*error_routine)(ccb, camflags, sense_flags);
795                 else
796                         error = 0;
797
798         } while (error == ERESTART);
799           
800         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 
801                 cam_release_devq(ccb->ccb_h.path,
802                                  /* relsim_flags */0,
803                                  /* openings */0,
804                                  /* timeout */0,
805                                  /* getcount_only */ FALSE);
806
807         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
808                 devstat_end_transaction(ds,
809                                         ccb->csio.dxfer_len,
810                                         ccb->csio.tag_action & 0xf,
811                                         ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
812                                         CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
813                                         (ccb->ccb_h.flags & CAM_DIR_OUT) ?
814                                         DEVSTAT_WRITE : 
815                                         DEVSTAT_READ);
816
817         return(error);
818 }
819
820 void
821 cam_freeze_devq(struct cam_path *path)
822 {
823         struct ccb_hdr ccb_h;
824
825         xpt_setup_ccb(&ccb_h, path, /*priority*/1);
826         ccb_h.func_code = XPT_NOOP;
827         ccb_h.flags = CAM_DEV_QFREEZE;
828         xpt_action((union ccb *)&ccb_h);
829 }
830
831 u_int32_t
832 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
833                  u_int32_t openings, u_int32_t timeout,
834                  int getcount_only)
835 {
836         struct ccb_relsim crs;
837
838         xpt_setup_ccb(&crs.ccb_h, path,
839                       /*priority*/1);
840         crs.ccb_h.func_code = XPT_REL_SIMQ;
841         crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
842         crs.release_flags = relsim_flags;
843         crs.openings = openings;
844         crs.release_timeout = timeout;
845         xpt_action((union ccb *)&crs);
846         return (crs.qfrozen_cnt);
847 }
848
849 #define saved_ccb_ptr ppriv_ptr0
850 static void
851 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
852 {
853         cam_status      status;
854         int             frozen;
855         int             sense;
856         struct scsi_start_stop_unit *scsi_cmd;
857         u_int32_t       relsim_flags, timeout;
858         u_int32_t       qfrozen_cnt;
859
860         status = done_ccb->ccb_h.status;
861         frozen = (status & CAM_DEV_QFRZN) != 0;
862         sense  = (status & CAM_AUTOSNS_VALID) != 0;
863         status &= CAM_STATUS_MASK;
864
865         timeout = 0;
866         relsim_flags = 0;
867
868         /* 
869          * Unfreeze the queue once if it is already frozen..
870          */
871         if (frozen != 0) {
872                 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
873                                               /*relsim_flags*/0,
874                                               /*openings*/0,
875                                               /*timeout*/0,
876                                               /*getcount_only*/0);
877         }
878
879         switch (status) {
880
881         case CAM_REQ_CMP:
882
883                 /*
884                  * If we have successfully taken a device from the not
885                  * ready to ready state, re-scan the device and re-get the
886                  * inquiry information.  Many devices (mostly disks) don't
887                  * properly report their inquiry information unless they
888                  * are spun up.
889                  */
890                 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
891                         scsi_cmd = (struct scsi_start_stop_unit *)
892                                         &done_ccb->csio.cdb_io.cdb_bytes;
893
894                         if (scsi_cmd->opcode == START_STOP_UNIT)
895                                 xpt_async(AC_INQ_CHANGED,
896                                           done_ccb->ccb_h.path, NULL);
897                 }
898                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
899                       sizeof(union ccb));
900
901                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
902
903                 xpt_action(done_ccb);
904
905                 break;
906         case CAM_SCSI_STATUS_ERROR:
907                 scsi_cmd = (struct scsi_start_stop_unit *)
908                                 &done_ccb->csio.cdb_io.cdb_bytes;
909                 if (sense != 0) {
910                         struct scsi_sense_data *sense;
911                         int    error_code, sense_key, asc, ascq;        
912
913                         sense = &done_ccb->csio.sense_data;
914                         scsi_extract_sense(sense, &error_code, 
915                                            &sense_key, &asc, &ascq);
916
917                         /*
918                          * If the error is "invalid field in CDB", 
919                          * and the load/eject flag is set, turn the 
920                          * flag off and try again.  This is just in 
921                          * case the drive in question barfs on the 
922                          * load eject flag.  The CAM code should set 
923                          * the load/eject flag by default for 
924                          * removable media.
925                          */
926
927                         /* XXX KDM 
928                          * Should we check to see what the specific
929                          * scsi status is??  Or does it not matter
930                          * since we already know that there was an
931                          * error, and we know what the specific
932                          * error code was, and we know what the
933                          * opcode is..
934                          */
935                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
936                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
937                              (asc == 0x24) && (ascq == 0x00) &&
938                              (done_ccb->ccb_h.retry_count > 0)) {
939
940                                 scsi_cmd->how &= ~SSS_LOEJ;
941
942                                 xpt_action(done_ccb);
943
944                         } else if (done_ccb->ccb_h.retry_count > 0) {
945                                 /*
946                                  * In this case, the error recovery
947                                  * command failed, but we've got 
948                                  * some retries left on it.  Give
949                                  * it another try.
950                                  */
951
952                                 /* set the timeout to .5 sec */
953                                 relsim_flags =
954                                         RELSIM_RELEASE_AFTER_TIMEOUT;
955                                 timeout = 500;
956
957                                 xpt_action(done_ccb);
958
959                                 break;
960
961                         } else {
962                                 /* 
963                                  * Copy the original CCB back and
964                                  * send it back to the caller.
965                                  */
966                                 bcopy(done_ccb->ccb_h.saved_ccb_ptr,            
967                                       done_ccb, sizeof(union ccb));
968
969                                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
970
971                                 xpt_action(done_ccb);
972                         }
973                 } else {
974                         /*
975                          * Eh??  The command failed, but we don't
976                          * have any sense.  What's up with that?
977                          * Fire the CCB again to return it to the
978                          * caller.
979                          */
980                         bcopy(done_ccb->ccb_h.saved_ccb_ptr,
981                               done_ccb, sizeof(union ccb));
982
983                         periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
984
985                         xpt_action(done_ccb);
986
987                 }
988                 break;
989         default:
990                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
991                       sizeof(union ccb));
992
993                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
994
995                 xpt_action(done_ccb);
996
997                 break;
998         }
999
1000         /* decrement the retry count */
1001         if (done_ccb->ccb_h.retry_count > 0)
1002                 done_ccb->ccb_h.retry_count--;
1003
1004         qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1005                                       /*relsim_flags*/relsim_flags,
1006                                       /*openings*/0,
1007                                       /*timeout*/timeout,
1008                                       /*getcount_only*/0);
1009 }
1010
1011 /*
1012  * Generic Async Event handler.  Peripheral drivers usually
1013  * filter out the events that require personal attention,
1014  * and leave the rest to this function.
1015  */
1016 void
1017 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1018                  struct cam_path *path, void *arg)
1019 {
1020         switch (code) {
1021         case AC_LOST_DEVICE:
1022                 cam_periph_invalidate(periph);
1023                 break; 
1024         case AC_SENT_BDR:
1025         case AC_BUS_RESET:
1026         {
1027                 cam_periph_bus_settle(periph, SCSI_DELAY);
1028                 break;
1029         }
1030         default:
1031                 break;
1032         }
1033 }
1034
1035 void
1036 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1037 {
1038         struct ccb_getdevstats cgds;
1039
1040         xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1041         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1042         xpt_action((union ccb *)&cgds);
1043         cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1044 }
1045
1046 void
1047 cam_periph_freeze_after_event(struct cam_periph *periph,
1048                               struct timeval* event_time, u_int duration_ms)
1049 {
1050         struct timeval delta;
1051         struct timeval duration_tv;
1052
1053         microuptime(&delta);
1054         timevalsub(&delta, event_time);
1055         duration_tv.tv_sec = duration_ms / 1000;
1056         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1057         if (timevalcmp(&delta, &duration_tv, <)) {
1058                 timevalsub(&duration_tv, &delta);
1059
1060                 duration_ms = duration_tv.tv_sec * 1000;
1061                 duration_ms += duration_tv.tv_usec / 1000;
1062                 cam_freeze_devq(periph->path); 
1063                 cam_release_devq(periph->path,
1064                                 RELSIM_RELEASE_AFTER_TIMEOUT,
1065                                 /*reduction*/0,
1066                                 /*timeout*/duration_ms,
1067                                 /*getcount_only*/0);
1068         }
1069
1070 }
1071
1072 /*
1073  * Generic error handler.  Peripheral drivers usually filter
1074  * out the errors that they handle in a unique mannor, then
1075  * call this function.
1076  */
1077 int
1078 cam_periph_error(union ccb *ccb, cam_flags camflags,
1079                  u_int32_t sense_flags, union ccb *save_ccb)
1080 {
1081         cam_status status;
1082         int        frozen;
1083         int        sense;
1084         int        error;
1085         int        openings;
1086         int        retry;
1087         u_int32_t  relsim_flags;
1088         u_int32_t  timeout;
1089         
1090         status = ccb->ccb_h.status;
1091         frozen = (status & CAM_DEV_QFRZN) != 0;
1092         sense  = (status & CAM_AUTOSNS_VALID) != 0;
1093         status &= CAM_STATUS_MASK;
1094         relsim_flags = 0;
1095
1096         switch (status) {
1097         case CAM_REQ_CMP:
1098                 /* decrement the number of retries */
1099                 retry = ccb->ccb_h.retry_count > 0;
1100                 if (retry)
1101                         ccb->ccb_h.retry_count--;
1102                 error = 0;
1103                 break;
1104         case CAM_AUTOSENSE_FAIL:
1105         case CAM_SCSI_STATUS_ERROR:
1106
1107                 switch (ccb->csio.scsi_status) {
1108                 case SCSI_STATUS_OK:
1109                 case SCSI_STATUS_COND_MET:
1110                 case SCSI_STATUS_INTERMED:
1111                 case SCSI_STATUS_INTERMED_COND_MET:
1112                         error = 0;
1113                         break;
1114                 case SCSI_STATUS_CMD_TERMINATED:
1115                 case SCSI_STATUS_CHECK_COND:
1116                         if (sense != 0) {
1117                                 struct scsi_sense_data *sense;
1118                                 int    error_code, sense_key, asc, ascq;
1119                                 struct cam_periph *periph;
1120                                 scsi_sense_action err_action;
1121                                 struct ccb_getdev cgd;
1122
1123                                 sense = &ccb->csio.sense_data;
1124                                 scsi_extract_sense(sense, &error_code,
1125                                                    &sense_key, &asc, &ascq);
1126                                 periph = xpt_path_periph(ccb->ccb_h.path);
1127
1128                                 /*
1129                                  * Grab the inquiry data for this device.
1130                                  */
1131                                 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
1132                                               /*priority*/ 1);
1133                                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1134                                 xpt_action((union ccb *)&cgd);
1135
1136                                 err_action = scsi_error_action(asc, ascq, 
1137                                                                &cgd.inq_data);
1138
1139                                 /*
1140                                  * Send a Test Unit Ready to the device.
1141                                  * If the 'many' flag is set, we send 120
1142                                  * test unit ready commands, one every half 
1143                                  * second.  Otherwise, we just send one TUR.
1144                                  * We only want to do this if the retry 
1145                                  * count has not been exhausted.
1146                                  */
1147                                 if (((err_action & SS_MASK) == SS_TUR)
1148                                  && save_ccb != NULL 
1149                                  && ccb->ccb_h.retry_count > 0) {
1150
1151                                         /*
1152                                          * Since error recovery is already
1153                                          * in progress, don't attempt to
1154                                          * process this error.  It is probably
1155                                          * related to the error that caused
1156                                          * the currently active error recovery
1157                                          * action.  Also, we only have
1158                                          * space for one saved CCB, so if we
1159                                          * had two concurrent error recovery
1160                                          * actions, we would end up
1161                                          * over-writing one error recovery
1162                                          * CCB with another one.
1163                                          */
1164                                         if (periph->flags &
1165                                             CAM_PERIPH_RECOVERY_INPROG) {
1166                                                 error = ERESTART;
1167                                                 break;
1168                                         }
1169
1170                                         periph->flags |=
1171                                                 CAM_PERIPH_RECOVERY_INPROG;
1172
1173                                         /* decrement the number of retries */
1174                                         if ((err_action & 
1175                                              SSQ_DECREMENT_COUNT) != 0) {
1176                                                 retry = 1;
1177                                                 ccb->ccb_h.retry_count--;
1178                                         }
1179
1180                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1181
1182                                         /*
1183                                          * We retry this one every half
1184                                          * second for a minute.  If the
1185                                          * device hasn't become ready in a
1186                                          * minute's time, it's unlikely to
1187                                          * ever become ready.  If the table
1188                                          * doesn't specify SSQ_MANY, we can
1189                                          * only try this once.  Oh well.
1190                                          */
1191                                         if ((err_action & SSQ_MANY) != 0)
1192                                                 scsi_test_unit_ready(&ccb->csio,
1193                                                                /*retries*/120,
1194                                                                camperiphdone,
1195                                                                MSG_SIMPLE_Q_TAG,
1196                                                                SSD_FULL_SIZE,
1197                                                                /*timeout*/5000);
1198                                         else
1199                                                 scsi_test_unit_ready(&ccb->csio,
1200                                                                /*retries*/1,
1201                                                                camperiphdone,
1202                                                                MSG_SIMPLE_Q_TAG,
1203                                                                SSD_FULL_SIZE,
1204                                                                /*timeout*/5000);
1205
1206                                         /* release the queue after .5 sec.  */
1207                                         relsim_flags = 
1208                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1209                                         timeout = 500;
1210                                         /*
1211                                          * Drop the priority to 0 so that 
1212                                          * we are the first to execute.  Also 
1213                                          * freeze the queue after this command 
1214                                          * is sent so that we can restore the 
1215                                          * old csio and have it queued in the 
1216                                          * proper order before we let normal 
1217                                          * transactions go to the drive.
1218                                          */
1219                                         ccb->ccb_h.pinfo.priority = 0;
1220                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1221
1222                                         /*
1223                                          * Save a pointer to the original
1224                                          * CCB in the new CCB.
1225                                          */
1226                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1227
1228                                         error = ERESTART;
1229                                 }
1230                                 /*
1231                                  * Send a start unit command to the device,
1232                                  * and then retry the command.  We only 
1233                                  * want to do this if the retry count has 
1234                                  * not been exhausted.  If the user 
1235                                  * specified 0 retries, then we follow 
1236                                  * their request and do not retry.
1237                                  */
1238                                 else if (((err_action & SS_MASK) == SS_START)
1239                                       && save_ccb != NULL 
1240                                       && ccb->ccb_h.retry_count > 0) {
1241                                         int le;
1242
1243                                         /*
1244                                          * Only one error recovery action
1245                                          * at a time.  See above.
1246                                          */
1247                                         if (periph->flags &
1248                                             CAM_PERIPH_RECOVERY_INPROG) {
1249                                                 error = ERESTART;
1250                                                 break;
1251                                         }
1252
1253                                         periph->flags |=
1254                                                 CAM_PERIPH_RECOVERY_INPROG;
1255
1256                                         /* decrement the number of retries */
1257                                         retry = 1;
1258                                         ccb->ccb_h.retry_count--;
1259
1260                                         /*
1261                                          * Check for removable media and
1262                                          * set load/eject flag
1263                                          * appropriately.
1264                                          */
1265                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1266                                                 le = TRUE;
1267                                         else
1268                                                 le = FALSE;
1269
1270                                         /*
1271                                          * Attempt to start the drive up.
1272                                          *
1273                                          * Save the current ccb so it can 
1274                                          * be restored and retried once the 
1275                                          * drive is started up.
1276                                          */
1277                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1278
1279                                         scsi_start_stop(&ccb->csio,
1280                                                         /*retries*/1,
1281                                                         camperiphdone,
1282                                                         MSG_SIMPLE_Q_TAG,
1283                                                         /*start*/TRUE,
1284                                                         /*load/eject*/le,
1285                                                         /*immediate*/FALSE,
1286                                                         SSD_FULL_SIZE,
1287                                                         /*timeout*/50000);
1288                                         /*
1289                                          * Drop the priority to 0 so that 
1290                                          * we are the first to execute.  Also 
1291                                          * freeze the queue after this command 
1292                                          * is sent so that we can restore the 
1293                                          * old csio and have it queued in the 
1294                                          * proper order before we let normal 
1295                                          * transactions go to the drive.
1296                                          */
1297                                         ccb->ccb_h.pinfo.priority = 0;
1298                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1299
1300                                         /*
1301                                          * Save a pointer to the original
1302                                          * CCB in the new CCB.
1303                                          */
1304                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1305
1306                                         error = ERESTART;
1307                                 } else if ((sense_flags & SF_RETRY_UA) != 0) {
1308                                         /*
1309                                          * XXX KDM this is a *horrible*
1310                                          * hack.  
1311                                          */
1312                                         error = scsi_interpret_sense(ccb,
1313                                                                   sense_flags,
1314                                                                   &relsim_flags,
1315                                                                   &openings,
1316                                                                   &timeout,
1317                                                                   err_action);
1318                                 } 
1319
1320                                 /*
1321                                  * Theoretically, this code should send a
1322                                  * test unit ready to the given device, and 
1323                                  * if it returns and error, send a start 
1324                                  * unit command.  Since we don't yet have
1325                                  * the capability to do two-command error
1326                                  * recovery, just send a start unit.
1327                                  * XXX KDM fix this!
1328                                  */
1329                                 else if (((err_action & SS_MASK) == SS_TURSTART)
1330                                       && save_ccb != NULL
1331                                       && ccb->ccb_h.retry_count > 0) {
1332                                         int le;
1333
1334                                         /*
1335                                          * Only one error recovery action
1336                                          * at a time.  See above.
1337                                          */
1338                                         if (periph->flags &
1339                                             CAM_PERIPH_RECOVERY_INPROG) {
1340                                                 error = ERESTART;
1341                                                 break;
1342                                         }
1343
1344                                         periph->flags |=
1345                                                 CAM_PERIPH_RECOVERY_INPROG;
1346
1347                                         /* decrement the number of retries */
1348                                         retry = 1;
1349                                         ccb->ccb_h.retry_count--;
1350
1351                                         /*
1352                                          * Check for removable media and
1353                                          * set load/eject flag
1354                                          * appropriately.
1355                                          */
1356                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1357                                                 le = TRUE;
1358                                         else
1359                                                 le = FALSE;
1360
1361                                         /*
1362                                          * Attempt to start the drive up.
1363                                          *
1364                                          * Save the current ccb so it can 
1365                                          * be restored and retried once the 
1366                                          * drive is started up.
1367                                          */
1368                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1369
1370                                         scsi_start_stop(&ccb->csio,
1371                                                         /*retries*/1,
1372                                                         camperiphdone,
1373                                                         MSG_SIMPLE_Q_TAG,
1374                                                         /*start*/TRUE,
1375                                                         /*load/eject*/le,
1376                                                         /*immediate*/FALSE,
1377                                                         SSD_FULL_SIZE,
1378                                                         /*timeout*/50000);
1379
1380                                         /* release the queue after .5 sec.  */
1381                                         relsim_flags = 
1382                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1383                                         timeout = 500;
1384                                         /*
1385                                          * Drop the priority to 0 so that 
1386                                          * we are the first to execute.  Also 
1387                                          * freeze the queue after this command 
1388                                          * is sent so that we can restore the 
1389                                          * old csio and have it queued in the 
1390                                          * proper order before we let normal 
1391                                          * transactions go to the drive.
1392                                          */
1393                                         ccb->ccb_h.pinfo.priority = 0;
1394                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1395
1396                                         /*
1397                                          * Save a pointer to the original
1398                                          * CCB in the new CCB.
1399                                          */
1400                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1401
1402                                         error = ERESTART;
1403                                 } else {
1404                                         error = scsi_interpret_sense(ccb,
1405                                                                   sense_flags,
1406                                                                   &relsim_flags,
1407                                                                   &openings,
1408                                                                   &timeout,
1409                                                                   err_action);
1410                                 }
1411                         } else if (ccb->csio.scsi_status == 
1412                                    SCSI_STATUS_CHECK_COND
1413                                 && status != CAM_AUTOSENSE_FAIL) {
1414                                 /* no point in decrementing the retry count */
1415                                 panic("cam_periph_error: scsi status of "
1416                                       "CHECK COND returned but no sense "
1417                                       "information is available.  "
1418                                       "Controller should have returned "
1419                                       "CAM_AUTOSENSE_FAILED");
1420                                 /* NOTREACHED */
1421                                 error = EIO;
1422                         } else if (ccb->ccb_h.retry_count == 0) {
1423                                 /*
1424                                  * XXX KDM shouldn't there be a better
1425                                  * argument to return??
1426                                  */
1427                                 error = EIO;
1428                         } else {
1429                                 /* decrement the number of retries */
1430                                 retry = ccb->ccb_h.retry_count > 0;
1431                                 if (retry)
1432                                         ccb->ccb_h.retry_count--;
1433                                 /*
1434                                  * If it was aborted with no
1435                                  * clue as to the reason, just
1436                                  * retry it again.
1437                                  */
1438                                 error = ERESTART;
1439                         }
1440                         break;
1441                 case SCSI_STATUS_QUEUE_FULL:
1442                 {
1443                         /* no decrement */
1444                         struct ccb_getdevstats cgds;
1445
1446                         /*
1447                          * First off, find out what the current
1448                          * transaction counts are.
1449                          */
1450                         xpt_setup_ccb(&cgds.ccb_h,
1451                                       ccb->ccb_h.path,
1452                                       /*priority*/1);
1453                         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1454                         xpt_action((union ccb *)&cgds);
1455
1456                         /*
1457                          * If we were the only transaction active, treat
1458                          * the QUEUE FULL as if it were a BUSY condition.
1459                          */
1460                         if (cgds.dev_active != 0) {
1461                                 int total_openings;
1462
1463                                 /*
1464                                  * Reduce the number of openings to
1465                                  * be 1 less than the amount it took
1466                                  * to get a queue full bounded by the
1467                                  * minimum allowed tag count for this
1468                                  * device.
1469                                  */
1470                                 total_openings =
1471                                     cgds.dev_active+cgds.dev_openings;
1472                                 openings = cgds.dev_active;
1473                                 if (openings < cgds.mintags)
1474                                         openings = cgds.mintags;
1475                                 if (openings < total_openings)
1476                                         relsim_flags = RELSIM_ADJUST_OPENINGS;
1477                                 else {
1478                                         /*
1479                                          * Some devices report queue full for
1480                                          * temporary resource shortages.  For
1481                                          * this reason, we allow a minimum
1482                                          * tag count to be entered via a
1483                                          * quirk entry to prevent the queue
1484                                          * count on these devices from falling
1485                                          * to a pessimisticly low value.  We
1486                                          * still wait for the next successful
1487                                          * completion, however, before queueing
1488                                          * more transactions to the device.
1489                                          */
1490                                         relsim_flags =
1491                                             RELSIM_RELEASE_AFTER_CMDCMPLT;
1492                                 }
1493                                 timeout = 0;
1494                                 error = ERESTART;
1495                                 break;
1496                         }
1497                         /* FALLTHROUGH */
1498                 }
1499                 case SCSI_STATUS_BUSY:
1500                         /*
1501                          * Restart the queue after either another
1502                          * command completes or a 1 second timeout.
1503                          * If we have any retries left, that is.
1504                          */
1505                         retry = ccb->ccb_h.retry_count > 0;
1506                         if (retry) {
1507                                 ccb->ccb_h.retry_count--;
1508                                 error = ERESTART;
1509                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1510                                              | RELSIM_RELEASE_AFTER_CMDCMPLT;
1511                                 timeout = 1000;
1512                         } else {
1513                                 error = EIO;
1514                         }
1515                         break;
1516                 case SCSI_STATUS_RESERV_CONFLICT:
1517                         error = EIO;
1518                         break;
1519                 default:
1520                         error = EIO;
1521                         break;
1522                 }
1523                 break;
1524         case CAM_REQ_CMP_ERR:
1525         case CAM_CMD_TIMEOUT:
1526         case CAM_UNEXP_BUSFREE:
1527         case CAM_UNCOR_PARITY:
1528         case CAM_DATA_RUN_ERR:
1529                 /* decrement the number of retries */
1530                 retry = ccb->ccb_h.retry_count > 0;
1531                 if (retry) {
1532                         ccb->ccb_h.retry_count--;
1533                         error = ERESTART;
1534                 } else {
1535                         error = EIO;
1536                 }
1537                 break;
1538         case CAM_UA_ABORT:
1539         case CAM_UA_TERMIO:
1540         case CAM_MSG_REJECT_REC:
1541                 /* XXX Don't know that these are correct */
1542                 error = EIO;
1543                 break;
1544         case CAM_SEL_TIMEOUT:
1545         {
1546                 /*
1547                  * XXX
1548                  * A single selection timeout should not be enough
1549                  * to invalidate a device.  We should retry for multiple
1550                  * seconds assuming this isn't a probe.  We'll probably
1551                  * need a special flag for that.
1552                  */
1553 #if 0
1554                 struct cam_path *newpath;
1555
1556                 /* Should we do more if we can't create the path?? */
1557                 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1558                                     xpt_path_path_id(ccb->ccb_h.path),
1559                                     xpt_path_target_id(ccb->ccb_h.path),
1560                                     CAM_LUN_WILDCARD) != CAM_REQ_CMP) 
1561                         break;
1562                 /*
1563                  * Let peripheral drivers know that this device has gone
1564                  * away.
1565                  */
1566                 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1567                 xpt_free_path(newpath);
1568 #endif
1569                 if ((sense_flags & SF_RETRY_SELTO) != 0) {
1570                         retry = ccb->ccb_h.retry_count > 0;
1571                         if (retry) {
1572                                 ccb->ccb_h.retry_count--;
1573                                 error = ERESTART;
1574                                 /*
1575                                  * Wait half a second to give the device
1576                                  * time to recover before we try again.
1577                                  */
1578                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1579                                 timeout = 500;
1580                         } else {
1581                                 error = ENXIO;
1582                         }
1583                 } else {
1584                         error = ENXIO;
1585                 }
1586                 break;
1587         }
1588         case CAM_REQ_INVALID:
1589         case CAM_PATH_INVALID:
1590         case CAM_DEV_NOT_THERE:
1591         case CAM_NO_HBA:
1592         case CAM_PROVIDE_FAIL:
1593         case CAM_REQ_TOO_BIG:           
1594                 error = EINVAL;
1595                 break;
1596         case CAM_SCSI_BUS_RESET:
1597         case CAM_BDR_SENT:              
1598         case CAM_REQUEUE_REQ:
1599                 /* Unconditional requeue, dammit */
1600                 error = ERESTART;
1601                 break;
1602         case CAM_RESRC_UNAVAIL:
1603         case CAM_BUSY:
1604                 /* timeout??? */
1605         default:
1606                 /* decrement the number of retries */
1607                 retry = ccb->ccb_h.retry_count > 0;
1608                 if (retry) {
1609                         ccb->ccb_h.retry_count--;
1610                         error = ERESTART;
1611                 } else {
1612                         /* Check the sense codes */
1613                         error = EIO;
1614                 }
1615                 break;
1616         }
1617
1618         /* Attempt a retry */
1619         if (error == ERESTART || error == 0) {  
1620                 if (frozen != 0)
1621                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1622
1623                 if (error == ERESTART)
1624                         xpt_action(ccb);
1625                 
1626                 if (frozen != 0) {
1627                         cam_release_devq(ccb->ccb_h.path,
1628                                          relsim_flags,
1629                                          openings,
1630                                          timeout,
1631                                          /*getcount_only*/0);
1632                 }
1633         }
1634
1635
1636         return (error);
1637 }