Switch to sys/queue.h macros.
[dragonfly.git] / sys / bus / cam / cam_periph.c
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_periph.c,v 1.24.2.3 2003/01/25 19:04:40 dillon Exp $
30  * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.23 2007/11/14 02:05:35 pavalos Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/linker_set.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/devicestat.h>
41 #include <sys/bus.h>
42 #include <vm/vm.h>
43 #include <vm/vm_extern.h>
44
45 #include <sys/thread2.h>
46
47 #include "cam.h"
48 #include "cam_ccb.h"
49 #include "cam_xpt_periph.h"
50 #include "cam_periph.h"
51 #include "cam_debug.h"
52
53 #include <bus/cam/scsi/scsi_all.h>
54 #include <bus/cam/scsi/scsi_message.h>
55 #include <bus/cam/scsi/scsi_pass.h>
56
57 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
58                                           u_int newunit, int wired,
59                                           path_id_t pathid, target_id_t target,
60                                           lun_id_t lun);
61 static  u_int           camperiphunit(struct periph_driver *p_drv,
62                                       path_id_t pathid, target_id_t target,
63                                       lun_id_t lun); 
64 static  void            camperiphdone(struct cam_periph *periph, 
65                                         union ccb *done_ccb);
66 static  void            camperiphfree(struct cam_periph *periph);
67
68 cam_status
69 cam_periph_alloc(periph_ctor_t *periph_ctor,
70                  periph_oninv_t *periph_oninvalidate,
71                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
72                  char *name, cam_periph_type type, struct cam_path *path,
73                  ac_callback_t *ac_callback, ac_code code, void *arg)
74 {
75         struct          periph_driver **p_drv;
76         struct          cam_periph *periph;
77         struct          cam_periph *cur_periph;
78         path_id_t       path_id;
79         target_id_t     target_id;
80         lun_id_t        lun_id;
81         cam_status      status;
82         u_int           init_level;
83
84         init_level = 0;
85         /*
86          * Handle Hot-Plug scenarios.  If there is already a peripheral
87          * of our type assigned to this path, we are likely waiting for
88          * final close on an old, invalidated, peripheral.  If this is
89          * the case, queue up a deferred call to the peripheral's async
90          * handler.  If it looks like a mistaken re-alloation, complain.
91          */
92         if ((periph = cam_periph_find(path, name)) != NULL) {
93
94                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
95                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
96                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
97                         periph->deferred_callback = ac_callback;
98                         periph->deferred_ac = code;
99                         return (CAM_REQ_INPROG);
100                 } else {
101                         kprintf("cam_periph_alloc: attempt to re-allocate "
102                                "valid device %s%d rejected\n",
103                                periph->periph_name, periph->unit_number);
104                 }
105                 return (CAM_REQ_INVALID);
106         }
107         
108         periph = kmalloc(sizeof(*periph), M_DEVBUF, M_INTWAIT | M_ZERO);
109         
110         init_level++;
111
112         SET_FOREACH(p_drv, periphdriver_set) {
113                 if (strcmp((*p_drv)->driver_name, name) == 0)
114                         break;
115         }
116         
117         path_id = xpt_path_path_id(path);
118         target_id = xpt_path_target_id(path);
119         lun_id = xpt_path_lun_id(path);
120         cam_init_pinfo(&periph->pinfo);
121         periph->periph_start = periph_start;
122         periph->periph_dtor = periph_dtor;
123         periph->periph_oninval = periph_oninvalidate;
124         periph->type = type;
125         periph->periph_name = name;
126         periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
127         periph->immediate_priority = CAM_PRIORITY_NONE;
128         periph->refcount = 0;
129         SLIST_INIT(&periph->ccb_list);
130         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
131         if (status != CAM_REQ_CMP)
132                 goto failure;
133
134         periph->path = path;
135         init_level++;
136
137         status = xpt_add_periph(periph);
138
139         if (status != CAM_REQ_CMP)
140                 goto failure;
141
142         crit_enter();
143         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
144         while (cur_periph != NULL
145             && cur_periph->unit_number < periph->unit_number)
146                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
147
148         if (cur_periph != NULL)
149                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
150         else {
151                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
152                 (*p_drv)->generation++;
153         }
154
155         crit_exit();
156
157         init_level++;
158
159         status = periph_ctor(periph, arg);
160
161         if (status == CAM_REQ_CMP)
162                 init_level++;
163
164 failure:
165         switch (init_level) {
166         case 4:
167                 /* Initialized successfully */
168                 break;
169         case 3:
170                 crit_enter();
171                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
172                 crit_exit();
173                 xpt_remove_periph(periph);
174         case 2:
175                 xpt_free_path(periph->path);
176         case 1:
177                 kfree(periph, M_DEVBUF);
178         case 0:
179                 /* No cleanup to perform. */
180                 break;
181         default:
182                 panic("cam_periph_alloc: Unknown init level");
183         }
184         return(status);
185 }
186
187 /*
188  * Find a peripheral structure with the specified path, target, lun, 
189  * and (optionally) type.  If the name is NULL, this function will return
190  * the first peripheral driver that matches the specified path.
191  */
192 struct cam_periph *
193 cam_periph_find(struct cam_path *path, char *name)
194 {
195         struct periph_driver **p_drv;
196         struct cam_periph *periph;
197
198         SET_FOREACH(p_drv, periphdriver_set) {
199                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
200                         continue;
201
202                 crit_enter();
203                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
204                         if (xpt_path_comp(periph->path, path) == 0) {
205                                 crit_exit();
206                                 return(periph);
207                         }
208                 }
209                 crit_exit();
210                 if (name != NULL)
211                         return(NULL);
212         }
213         return(NULL);
214 }
215
216 cam_status
217 cam_periph_acquire(struct cam_periph *periph)
218 {
219         if (periph == NULL)
220                 return(CAM_REQ_CMP_ERR);
221
222         crit_enter();
223         periph->refcount++;
224         crit_exit();
225
226         return(CAM_REQ_CMP);
227 }
228
229 void
230 cam_periph_release(struct cam_periph *periph)
231 {
232         if (periph == NULL)
233                 return;
234
235         crit_enter();
236         if ((--periph->refcount == 0)
237          && (periph->flags & CAM_PERIPH_INVALID)) {
238                 camperiphfree(periph);
239         }
240         crit_exit();
241 }
242
243 /*
244  * Look for the next unit number that is not currently in use for this
245  * peripheral type starting at "newunit".  Also exclude unit numbers that
246  * are reserved by for future "hardwiring" unless we already know that this
247  * is a potential wired device.  Only assume that the device is "wired" the
248  * first time through the loop since after that we'll be looking at unit
249  * numbers that did not match a wiring entry.
250  */
251 static u_int
252 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
253                   path_id_t pathid, target_id_t target, lun_id_t lun)
254 {
255         struct  cam_periph *periph;
256         char    *periph_name, *strval;
257         int     i, val, dunit;
258         const char *dname;
259
260         crit_enter();
261         periph_name = p_drv->driver_name;
262         for (;;newunit++) {
263
264                 for (periph = TAILQ_FIRST(&p_drv->units);
265                      periph != NULL && periph->unit_number != newunit;
266                      periph = TAILQ_NEXT(periph, unit_links))
267                         ;
268
269                 if (periph != NULL && periph->unit_number == newunit) {
270                         if (wired != 0) {
271                                 xpt_print_path(periph->path);
272                                 kprintf("Duplicate Wired Device entry!\n");
273                                 xpt_print_path(periph->path);
274                                 kprintf("Second device (%s device at scbus%d "
275                                        "target %d lun %d) will not be wired\n",
276                                        periph_name, pathid, target, lun);
277                                 wired = 0;
278                         }
279                         continue;
280                 }
281                 if (wired)
282                         break;
283
284                 /*
285                  * Don't match entries like "da 4" as a wired down
286                  * device, but do match entries like "da 4 target 5"
287                  * or even "da 4 scbus 1". 
288                  */
289                 i = -1;
290                 while ((i = resource_locate(i, periph_name)) != -1) {
291                         dname = resource_query_name(i);
292                         dunit = resource_query_unit(i);
293                         /* if no "target" and no specific scbus, skip */
294                         if (resource_int_value(dname, dunit, "target", &val) &&
295                             (resource_string_value(dname, dunit, "at",&strval)||
296                              strcmp(strval, "scbus") == 0))
297                                 continue;
298                         if (newunit == dunit)
299                                 break;
300                 }
301                 if (i == -1)
302                         break;
303         }
304         crit_exit();
305         return (newunit);
306 }
307
308 static u_int
309 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
310               target_id_t target, lun_id_t lun)
311 {
312         u_int   unit;
313         int     hit, i, val, dunit;
314         const char *dname;
315         char    pathbuf[32], *strval, *periph_name;
316
317         unit = 0;
318
319         periph_name = p_drv->driver_name;
320         ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
321         i = -1;
322         for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
323                 dname = resource_query_name(i);
324                 dunit = resource_query_unit(i);
325                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
326                         if (strcmp(strval, pathbuf) != 0)
327                                 continue;
328                         hit++;
329                 }
330                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
331                         if (val != target)
332                                 continue;
333                         hit++;
334                 }
335                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
336                         if (val != lun)
337                                 continue;
338                         hit++;
339                 }
340                 if (hit != 0) {
341                         unit = dunit;
342                         break;
343                 }
344         }
345
346         /*
347          * Either start from 0 looking for the next unit or from
348          * the unit number given in the resource config.  This way,
349          * if we have wildcard matches, we don't return the same
350          * unit number twice.
351          */
352         unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
353                                  target, lun);
354
355         return (unit);
356 }
357
358 void
359 cam_periph_invalidate(struct cam_periph *periph)
360 {
361         /*
362          * We only call this routine the first time a peripheral is
363          * invalidated.  The oninvalidate() routine is always called in
364          * a critical section.
365          */
366         crit_enter();
367         if (((periph->flags & CAM_PERIPH_INVALID) == 0)
368          && (periph->periph_oninval != NULL))
369                 periph->periph_oninval(periph);
370
371         periph->flags |= CAM_PERIPH_INVALID;
372         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
373
374         if (periph->refcount == 0)
375                 camperiphfree(periph);
376         else if (periph->refcount < 0)
377                 kprintf("cam_invalidate_periph: refcount < 0!!\n");
378         crit_exit();
379 }
380
381 static void
382 camperiphfree(struct cam_periph *periph)
383 {
384         struct periph_driver **p_drv;
385
386         SET_FOREACH(p_drv, periphdriver_set) {
387                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
388                         break;
389         }
390
391         if (*p_drv == NULL) {
392                 kprintf("camperiphfree: attempt to free "
393                         "non-existent periph: %s\n", periph->periph_name);
394                 return;
395         }
396         
397         if (periph->periph_dtor != NULL)
398                 periph->periph_dtor(periph);
399         
400         crit_enter();
401         TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
402         (*p_drv)->generation++;
403         crit_exit();
404
405         xpt_remove_periph(periph);
406
407         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
408                 union ccb ccb;
409                 void *arg;
410
411                 switch (periph->deferred_ac) {
412                 case AC_FOUND_DEVICE:
413                         ccb.ccb_h.func_code = XPT_GDEV_TYPE;
414                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
415                         xpt_action(&ccb);
416                         arg = &ccb;
417                         break;
418                 case AC_PATH_REGISTERED:
419                         ccb.ccb_h.func_code = XPT_PATH_INQ;
420                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
421                         xpt_action(&ccb);
422                         arg = &ccb;
423                         break;
424                 default:
425                         arg = NULL;
426                         break;
427                 }
428                 periph->deferred_callback(NULL, periph->deferred_ac,
429                                           periph->path, arg);
430         }
431         xpt_free_path(periph->path);
432         kfree(periph, M_DEVBUF);
433 }
434
435 /*
436  * Wait interruptibly for an exclusive lock.
437  */
438 int
439 cam_periph_lock(struct cam_periph *periph, int flags)
440 {
441         int error;
442
443         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
444                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
445                 if ((error = tsleep(periph, flags, "caplck", 0)) != 0)
446                         return error;
447         }
448
449         if (cam_periph_acquire(periph) != CAM_REQ_CMP)
450                 return(ENXIO);
451
452         periph->flags |= CAM_PERIPH_LOCKED;
453         return 0;
454 }
455
456 /*
457  * Unlock and wake up any waiters.
458  */
459 void
460 cam_periph_unlock(struct cam_periph *periph)
461 {
462         periph->flags &= ~CAM_PERIPH_LOCKED;
463         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
464                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
465                 wakeup(periph);
466         }
467
468         cam_periph_release(periph);
469 }
470
471 /*
472  * Map user virtual pointers into kernel virtual address space, so we can
473  * access the memory.  This won't work on physical pointers, for now it's
474  * up to the caller to check for that.  (XXX KDM -- should we do that here
475  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
476  * buffers to map stuff in and out, we're limited to the buffer size.
477  */
478 int
479 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
480 {
481         int numbufs, i, j;
482         buf_cmd_t cmd[CAM_PERIPH_MAXMAPS];
483         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
484         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
485         u_int32_t dirs[CAM_PERIPH_MAXMAPS];
486
487         switch(ccb->ccb_h.func_code) {
488         case XPT_DEV_MATCH:
489                 if (ccb->cdm.match_buf_len == 0) {
490                         kprintf("cam_periph_mapmem: invalid match buffer "
491                                "length 0\n");
492                         return(EINVAL);
493                 }
494                 if (ccb->cdm.pattern_buf_len > 0) {
495                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
496                         lengths[0] = ccb->cdm.pattern_buf_len;
497                         dirs[0] = CAM_DIR_OUT;
498                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
499                         lengths[1] = ccb->cdm.match_buf_len;
500                         dirs[1] = CAM_DIR_IN;
501                         numbufs = 2;
502                 } else {
503                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
504                         lengths[0] = ccb->cdm.match_buf_len;
505                         dirs[0] = CAM_DIR_IN;
506                         numbufs = 1;
507                 }
508                 break;
509         case XPT_SCSI_IO:
510         case XPT_CONT_TARGET_IO:
511                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
512                         return(0);
513
514                 data_ptrs[0] = &ccb->csio.data_ptr;
515                 lengths[0] = ccb->csio.dxfer_len;
516                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
517                 numbufs = 1;
518                 break;
519         default:
520                 return(EINVAL);
521                 break; /* NOTREACHED */
522         }
523
524         /*
525          * Check the transfer length and permissions first, so we don't
526          * have to unmap any previously mapped buffers.
527          */
528         for (i = 0; i < numbufs; i++) {
529                 /*
530                  * Its kinda bogus, we need a R+W command.  For now the
531                  * buffer needs some sort of command.  Use BUF_CMD_WRITE
532                  * to indicate a write and BUF_CMD_READ to indicate R+W.
533                  */
534                 cmd[i] = BUF_CMD_WRITE;
535
536                 /*
537                  * The userland data pointer passed in may not be page
538                  * aligned.  vmapbuf() truncates the address to a page
539                  * boundary, so if the address isn't page aligned, we'll
540                  * need enough space for the given transfer length, plus
541                  * whatever extra space is necessary to make it to the page
542                  * boundary.
543                  */
544                 if ((lengths[i] +
545                     (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
546                         kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
547                                "which is greater than DFLTPHYS(%d)\n",
548                                (long)(lengths[i] +
549                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
550                                DFLTPHYS);
551                         return(E2BIG);
552                 }
553
554                 if (dirs[i] & CAM_DIR_OUT) {
555                         if (!useracc(*data_ptrs[i], lengths[i], 
556                                      VM_PROT_READ)) {
557                                 kprintf("cam_periph_mapmem: error, "
558                                         "address %p, length %lu isn't "
559                                         "user accessible for READ\n",
560                                         (void *)*data_ptrs[i],
561                                         (u_long)lengths[i]);
562                                 return(EACCES);
563                         }
564                 }
565
566                 if (dirs[i] & CAM_DIR_IN) {
567                         cmd[i] = BUF_CMD_READ;
568                         if (!useracc(*data_ptrs[i], lengths[i], 
569                                      VM_PROT_WRITE)) {
570                                 kprintf("cam_periph_mapmem: error, "
571                                         "address %p, length %lu isn't "
572                                         "user accessible for WRITE\n",
573                                         (void *)*data_ptrs[i],
574                                         (u_long)lengths[i]);
575
576                                 return(EACCES);
577                         }
578                 }
579
580         }
581
582         for (i = 0; i < numbufs; i++) {
583                 /*
584                  * Get the buffer.
585                  */
586                 mapinfo->bp[i] = getpbuf(NULL);
587
588                 /* save the original user pointer */
589                 mapinfo->saved_ptrs[i] = *data_ptrs[i];
590
591                 /* set the flags */
592                 mapinfo->bp[i]->b_cmd = cmd[i];
593
594                 /* map the user buffer into kernel memory */
595                 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i]) < 0) {
596                         kprintf("cam_periph_mapmem: error, "
597                                 "address %p, length %lu isn't "
598                                 "user accessible any more\n",
599                                 (void *)*data_ptrs[i],
600                                 (u_long)lengths[i]);
601                         for (j = 0; j < i; ++j) {
602                                 *data_ptrs[j] = mapinfo->saved_ptrs[j];
603                                 vunmapbuf(mapinfo->bp[j]);
604                                 relpbuf(mapinfo->bp[j], NULL);
605                         }
606                         mapinfo->num_bufs_used -= i;
607                         return(EACCES);
608                 }
609
610                 /* set our pointer to the new mapped area */
611                 *data_ptrs[i] = mapinfo->bp[i]->b_data;
612
613                 mapinfo->num_bufs_used++;
614         }
615
616         return(0);
617 }
618
619 /*
620  * Unmap memory segments mapped into kernel virtual address space by
621  * cam_periph_mapmem().
622  */
623 void
624 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
625 {
626         int numbufs, i;
627         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
628
629         if (mapinfo->num_bufs_used <= 0) {
630                 /* allow ourselves to be swapped once again */
631                 return;
632         }
633
634         switch (ccb->ccb_h.func_code) {
635         case XPT_DEV_MATCH:
636                 numbufs = min(mapinfo->num_bufs_used, 2);
637
638                 if (numbufs == 1) {
639                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
640                 } else {
641                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
642                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
643                 }
644                 break;
645         case XPT_SCSI_IO:
646         case XPT_CONT_TARGET_IO:
647                 data_ptrs[0] = &ccb->csio.data_ptr;
648                 numbufs = min(mapinfo->num_bufs_used, 1);
649                 break;
650         default:
651                 /* allow ourselves to be swapped once again */
652                 return;
653                 break; /* NOTREACHED */ 
654         }
655
656         for (i = 0; i < numbufs; i++) {
657                 /* Set the user's pointer back to the original value */
658                 *data_ptrs[i] = mapinfo->saved_ptrs[i];
659
660                 /* unmap the buffer */
661                 vunmapbuf(mapinfo->bp[i]);
662
663                 /* release the buffer */
664                 relpbuf(mapinfo->bp[i], NULL);
665         }
666
667         /* allow ourselves to be swapped once again */
668 }
669
670 union ccb *
671 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
672 {
673         struct ccb_hdr *ccb_h;
674
675         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
676
677         crit_enter();
678         
679         while (SLIST_FIRST(&periph->ccb_list) == NULL) {
680                 if (periph->immediate_priority > priority)
681                         periph->immediate_priority = priority;
682                 xpt_schedule(periph, priority);
683                 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
684                  && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
685                         break;
686                 tsleep(&periph->ccb_list, 0, "cgticb", 0);
687         }
688
689         ccb_h = SLIST_FIRST(&periph->ccb_list);
690         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
691         crit_exit();
692         return ((union ccb *)ccb_h);
693 }
694
695 void
696 cam_periph_ccbwait(union ccb *ccb)
697 {
698         crit_enter();
699         if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
700          || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
701                 tsleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0);
702         crit_exit();
703 }
704
705 int
706 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
707                  int (*error_routine)(union ccb *ccb, 
708                                       cam_flags camflags,
709                                       u_int32_t sense_flags))
710 {
711         union ccb            *ccb;
712         int                  error;
713         int                  found;
714
715         error = found = 0;
716
717         switch(cmd){
718         case CAMGETPASSTHRU:
719                 ccb = cam_periph_getccb(periph, /* priority */ 1);
720                 xpt_setup_ccb(&ccb->ccb_h,
721                               ccb->ccb_h.path,
722                               /*priority*/1);
723                 ccb->ccb_h.func_code = XPT_GDEVLIST;
724
725                 /*
726                  * Basically, the point of this is that we go through
727                  * getting the list of devices, until we find a passthrough
728                  * device.  In the current version of the CAM code, the
729                  * only way to determine what type of device we're dealing
730                  * with is by its name.
731                  */
732                 while (found == 0) {
733                         ccb->cgdl.index = 0;
734                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
735                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
736
737                                 /* we want the next device in the list */
738                                 xpt_action(ccb);
739                                 if (strncmp(ccb->cgdl.periph_name, 
740                                     "pass", 4) == 0){
741                                         found = 1;
742                                         break;
743                                 }
744                         }
745                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
746                             (found == 0)) {
747                                 ccb->cgdl.periph_name[0] = '\0';
748                                 ccb->cgdl.unit_number = 0;
749                                 break;
750                         }
751                 }
752
753                 /* copy the result back out */  
754                 bcopy(ccb, addr, sizeof(union ccb));
755
756                 /* and release the ccb */
757                 xpt_release_ccb(ccb);
758
759                 break;
760         default:
761                 error = ENOTTY;
762                 break;
763         }
764         return(error);
765 }
766
767 int
768 cam_periph_runccb(union ccb *ccb,
769                   int (*error_routine)(union ccb *ccb,
770                                        cam_flags camflags,
771                                        u_int32_t sense_flags),
772                   cam_flags camflags, u_int32_t sense_flags,
773                   struct devstat *ds)
774 {
775         int error;
776  
777         error = 0;
778         
779         /*
780          * If the user has supplied a stats structure, and if we understand
781          * this particular type of ccb, record the transaction start.
782          */
783         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
784                 devstat_start_transaction(ds);
785
786         xpt_action(ccb);
787  
788         do {
789                 cam_periph_ccbwait(ccb);
790                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
791                         error = 0;
792                 else if (error_routine != NULL)
793                         error = (*error_routine)(ccb, camflags, sense_flags);
794                 else
795                         error = 0;
796
797         } while (error == ERESTART);
798           
799         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 
800                 cam_release_devq(ccb->ccb_h.path,
801                                  /* relsim_flags */0,
802                                  /* openings */0,
803                                  /* timeout */0,
804                                  /* getcount_only */ FALSE);
805
806         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
807                 devstat_end_transaction(ds,
808                                         ccb->csio.dxfer_len,
809                                         ccb->csio.tag_action & 0xf,
810                                         ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
811                                         CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
812                                         (ccb->ccb_h.flags & CAM_DIR_OUT) ?
813                                         DEVSTAT_WRITE : 
814                                         DEVSTAT_READ);
815
816         return(error);
817 }
818
819 void
820 cam_freeze_devq(struct cam_path *path)
821 {
822         struct ccb_hdr ccb_h;
823
824         xpt_setup_ccb(&ccb_h, path, /*priority*/1);
825         ccb_h.func_code = XPT_NOOP;
826         ccb_h.flags = CAM_DEV_QFREEZE;
827         xpt_action((union ccb *)&ccb_h);
828 }
829
830 u_int32_t
831 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
832                  u_int32_t openings, u_int32_t timeout,
833                  int getcount_only)
834 {
835         struct ccb_relsim crs;
836
837         xpt_setup_ccb(&crs.ccb_h, path,
838                       /*priority*/1);
839         crs.ccb_h.func_code = XPT_REL_SIMQ;
840         crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
841         crs.release_flags = relsim_flags;
842         crs.openings = openings;
843         crs.release_timeout = timeout;
844         xpt_action((union ccb *)&crs);
845         return (crs.qfrozen_cnt);
846 }
847
848 #define saved_ccb_ptr ppriv_ptr0
849 static void
850 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
851 {
852         cam_status      status;
853         int             frozen;
854         int             sense;
855         struct scsi_start_stop_unit *scsi_cmd;
856         u_int32_t       relsim_flags, timeout;
857         u_int32_t       qfrozen_cnt;
858
859         status = done_ccb->ccb_h.status;
860         frozen = (status & CAM_DEV_QFRZN) != 0;
861         sense  = (status & CAM_AUTOSNS_VALID) != 0;
862         status &= CAM_STATUS_MASK;
863
864         timeout = 0;
865         relsim_flags = 0;
866
867         /* 
868          * Unfreeze the queue once if it is already frozen..
869          */
870         if (frozen != 0) {
871                 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
872                                               /*relsim_flags*/0,
873                                               /*openings*/0,
874                                               /*timeout*/0,
875                                               /*getcount_only*/0);
876         }
877
878         switch (status) {
879
880         case CAM_REQ_CMP:
881
882                 /*
883                  * If we have successfully taken a device from the not
884                  * ready to ready state, re-scan the device and re-get the
885                  * inquiry information.  Many devices (mostly disks) don't
886                  * properly report their inquiry information unless they
887                  * are spun up.
888                  */
889                 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
890                         scsi_cmd = (struct scsi_start_stop_unit *)
891                                         &done_ccb->csio.cdb_io.cdb_bytes;
892
893                         if (scsi_cmd->opcode == START_STOP_UNIT)
894                                 xpt_async(AC_INQ_CHANGED,
895                                           done_ccb->ccb_h.path, NULL);
896                 }
897                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
898                       sizeof(union ccb));
899
900                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
901
902                 xpt_action(done_ccb);
903
904                 break;
905         case CAM_SCSI_STATUS_ERROR:
906                 scsi_cmd = (struct scsi_start_stop_unit *)
907                                 &done_ccb->csio.cdb_io.cdb_bytes;
908                 if (sense != 0) {
909                         struct scsi_sense_data *sense;
910                         int    error_code, sense_key, asc, ascq;        
911
912                         sense = &done_ccb->csio.sense_data;
913                         scsi_extract_sense(sense, &error_code, 
914                                            &sense_key, &asc, &ascq);
915
916                         /*
917                          * If the error is "invalid field in CDB", 
918                          * and the load/eject flag is set, turn the 
919                          * flag off and try again.  This is just in 
920                          * case the drive in question barfs on the 
921                          * load eject flag.  The CAM code should set 
922                          * the load/eject flag by default for 
923                          * removable media.
924                          */
925
926                         /* XXX KDM 
927                          * Should we check to see what the specific
928                          * scsi status is??  Or does it not matter
929                          * since we already know that there was an
930                          * error, and we know what the specific
931                          * error code was, and we know what the
932                          * opcode is..
933                          */
934                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
935                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
936                              (asc == 0x24) && (ascq == 0x00) &&
937                              (done_ccb->ccb_h.retry_count > 0)) {
938
939                                 scsi_cmd->how &= ~SSS_LOEJ;
940
941                                 xpt_action(done_ccb);
942
943                         } else if (done_ccb->ccb_h.retry_count > 0) {
944                                 /*
945                                  * In this case, the error recovery
946                                  * command failed, but we've got 
947                                  * some retries left on it.  Give
948                                  * it another try.
949                                  */
950
951                                 /* set the timeout to .5 sec */
952                                 relsim_flags =
953                                         RELSIM_RELEASE_AFTER_TIMEOUT;
954                                 timeout = 500;
955
956                                 xpt_action(done_ccb);
957
958                                 break;
959
960                         } else {
961                                 /* 
962                                  * Copy the original CCB back and
963                                  * send it back to the caller.
964                                  */
965                                 bcopy(done_ccb->ccb_h.saved_ccb_ptr,            
966                                       done_ccb, sizeof(union ccb));
967
968                                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
969
970                                 xpt_action(done_ccb);
971                         }
972                 } else {
973                         /*
974                          * Eh??  The command failed, but we don't
975                          * have any sense.  What's up with that?
976                          * Fire the CCB again to return it to the
977                          * caller.
978                          */
979                         bcopy(done_ccb->ccb_h.saved_ccb_ptr,
980                               done_ccb, sizeof(union ccb));
981
982                         periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
983
984                         xpt_action(done_ccb);
985
986                 }
987                 break;
988         default:
989                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
990                       sizeof(union ccb));
991
992                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
993
994                 xpt_action(done_ccb);
995
996                 break;
997         }
998
999         /* decrement the retry count */
1000         if (done_ccb->ccb_h.retry_count > 0)
1001                 done_ccb->ccb_h.retry_count--;
1002
1003         qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1004                                       /*relsim_flags*/relsim_flags,
1005                                       /*openings*/0,
1006                                       /*timeout*/timeout,
1007                                       /*getcount_only*/0);
1008 }
1009
1010 /*
1011  * Generic Async Event handler.  Peripheral drivers usually
1012  * filter out the events that require personal attention,
1013  * and leave the rest to this function.
1014  */
1015 void
1016 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1017                  struct cam_path *path, void *arg)
1018 {
1019         switch (code) {
1020         case AC_LOST_DEVICE:
1021                 cam_periph_invalidate(periph);
1022                 break; 
1023         case AC_SENT_BDR:
1024         case AC_BUS_RESET:
1025         {
1026                 cam_periph_bus_settle(periph, SCSI_DELAY);
1027                 break;
1028         }
1029         default:
1030                 break;
1031         }
1032 }
1033
1034 void
1035 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1036 {
1037         struct ccb_getdevstats cgds;
1038
1039         xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1040         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1041         xpt_action((union ccb *)&cgds);
1042         cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1043 }
1044
1045 void
1046 cam_periph_freeze_after_event(struct cam_periph *periph,
1047                               struct timeval* event_time, u_int duration_ms)
1048 {
1049         struct timeval delta;
1050         struct timeval duration_tv;
1051
1052         microuptime(&delta);
1053         timevalsub(&delta, event_time);
1054         duration_tv.tv_sec = duration_ms / 1000;
1055         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1056         if (timevalcmp(&delta, &duration_tv, <)) {
1057                 timevalsub(&duration_tv, &delta);
1058
1059                 duration_ms = duration_tv.tv_sec * 1000;
1060                 duration_ms += duration_tv.tv_usec / 1000;
1061                 cam_freeze_devq(periph->path); 
1062                 cam_release_devq(periph->path,
1063                                 RELSIM_RELEASE_AFTER_TIMEOUT,
1064                                 /*reduction*/0,
1065                                 /*timeout*/duration_ms,
1066                                 /*getcount_only*/0);
1067         }
1068
1069 }
1070
1071 /*
1072  * Generic error handler.  Peripheral drivers usually filter
1073  * out the errors that they handle in a unique mannor, then
1074  * call this function.
1075  */
1076 int
1077 cam_periph_error(union ccb *ccb, cam_flags camflags,
1078                  u_int32_t sense_flags, union ccb *save_ccb)
1079 {
1080         cam_status status;
1081         int        frozen;
1082         int        sense;
1083         int        error;
1084         int        openings;
1085         int        retry;
1086         u_int32_t  relsim_flags;
1087         u_int32_t  timeout;
1088         
1089         status = ccb->ccb_h.status;
1090         frozen = (status & CAM_DEV_QFRZN) != 0;
1091         sense  = (status & CAM_AUTOSNS_VALID) != 0;
1092         status &= CAM_STATUS_MASK;
1093         relsim_flags = 0;
1094
1095         switch (status) {
1096         case CAM_REQ_CMP:
1097                 /* decrement the number of retries */
1098                 retry = ccb->ccb_h.retry_count > 0;
1099                 if (retry)
1100                         ccb->ccb_h.retry_count--;
1101                 error = 0;
1102                 break;
1103         case CAM_AUTOSENSE_FAIL:
1104         case CAM_SCSI_STATUS_ERROR:
1105
1106                 switch (ccb->csio.scsi_status) {
1107                 case SCSI_STATUS_OK:
1108                 case SCSI_STATUS_COND_MET:
1109                 case SCSI_STATUS_INTERMED:
1110                 case SCSI_STATUS_INTERMED_COND_MET:
1111                         error = 0;
1112                         break;
1113                 case SCSI_STATUS_CMD_TERMINATED:
1114                 case SCSI_STATUS_CHECK_COND:
1115                         if (sense != 0) {
1116                                 struct scsi_sense_data *sense;
1117                                 int    error_code, sense_key, asc, ascq;
1118                                 struct cam_periph *periph;
1119                                 scsi_sense_action err_action;
1120                                 struct ccb_getdev cgd;
1121
1122                                 sense = &ccb->csio.sense_data;
1123                                 scsi_extract_sense(sense, &error_code,
1124                                                    &sense_key, &asc, &ascq);
1125                                 periph = xpt_path_periph(ccb->ccb_h.path);
1126
1127                                 /*
1128                                  * Grab the inquiry data for this device.
1129                                  */
1130                                 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
1131                                               /*priority*/ 1);
1132                                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1133                                 xpt_action((union ccb *)&cgd);
1134
1135                                 err_action = scsi_error_action(asc, ascq, 
1136                                                                &cgd.inq_data);
1137
1138                                 /*
1139                                  * Send a Test Unit Ready to the device.
1140                                  * If the 'many' flag is set, we send 120
1141                                  * test unit ready commands, one every half 
1142                                  * second.  Otherwise, we just send one TUR.
1143                                  * We only want to do this if the retry 
1144                                  * count has not been exhausted.
1145                                  */
1146                                 if (((err_action & SS_MASK) == SS_TUR)
1147                                  && save_ccb != NULL 
1148                                  && ccb->ccb_h.retry_count > 0) {
1149
1150                                         /*
1151                                          * Since error recovery is already
1152                                          * in progress, don't attempt to
1153                                          * process this error.  It is probably
1154                                          * related to the error that caused
1155                                          * the currently active error recovery
1156                                          * action.  Also, we only have
1157                                          * space for one saved CCB, so if we
1158                                          * had two concurrent error recovery
1159                                          * actions, we would end up
1160                                          * over-writing one error recovery
1161                                          * CCB with another one.
1162                                          */
1163                                         if (periph->flags &
1164                                             CAM_PERIPH_RECOVERY_INPROG) {
1165                                                 error = ERESTART;
1166                                                 break;
1167                                         }
1168
1169                                         periph->flags |=
1170                                                 CAM_PERIPH_RECOVERY_INPROG;
1171
1172                                         /* decrement the number of retries */
1173                                         if ((err_action & 
1174                                              SSQ_DECREMENT_COUNT) != 0) {
1175                                                 retry = 1;
1176                                                 ccb->ccb_h.retry_count--;
1177                                         }
1178
1179                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1180
1181                                         /*
1182                                          * We retry this one every half
1183                                          * second for a minute.  If the
1184                                          * device hasn't become ready in a
1185                                          * minute's time, it's unlikely to
1186                                          * ever become ready.  If the table
1187                                          * doesn't specify SSQ_MANY, we can
1188                                          * only try this once.  Oh well.
1189                                          */
1190                                         if ((err_action & SSQ_MANY) != 0)
1191                                                 scsi_test_unit_ready(&ccb->csio,
1192                                                                /*retries*/120,
1193                                                                camperiphdone,
1194                                                                MSG_SIMPLE_Q_TAG,
1195                                                                SSD_FULL_SIZE,
1196                                                                /*timeout*/5000);
1197                                         else
1198                                                 scsi_test_unit_ready(&ccb->csio,
1199                                                                /*retries*/1,
1200                                                                camperiphdone,
1201                                                                MSG_SIMPLE_Q_TAG,
1202                                                                SSD_FULL_SIZE,
1203                                                                /*timeout*/5000);
1204
1205                                         /* release the queue after .5 sec.  */
1206                                         relsim_flags = 
1207                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1208                                         timeout = 500;
1209                                         /*
1210                                          * Drop the priority to 0 so that 
1211                                          * we are the first to execute.  Also 
1212                                          * freeze the queue after this command 
1213                                          * is sent so that we can restore the 
1214                                          * old csio and have it queued in the 
1215                                          * proper order before we let normal 
1216                                          * transactions go to the drive.
1217                                          */
1218                                         ccb->ccb_h.pinfo.priority = 0;
1219                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1220
1221                                         /*
1222                                          * Save a pointer to the original
1223                                          * CCB in the new CCB.
1224                                          */
1225                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1226
1227                                         error = ERESTART;
1228                                 }
1229                                 /*
1230                                  * Send a start unit command to the device,
1231                                  * and then retry the command.  We only 
1232                                  * want to do this if the retry count has 
1233                                  * not been exhausted.  If the user 
1234                                  * specified 0 retries, then we follow 
1235                                  * their request and do not retry.
1236                                  */
1237                                 else if (((err_action & SS_MASK) == SS_START)
1238                                       && save_ccb != NULL 
1239                                       && ccb->ccb_h.retry_count > 0) {
1240                                         int le;
1241
1242                                         /*
1243                                          * Only one error recovery action
1244                                          * at a time.  See above.
1245                                          */
1246                                         if (periph->flags &
1247                                             CAM_PERIPH_RECOVERY_INPROG) {
1248                                                 error = ERESTART;
1249                                                 break;
1250                                         }
1251
1252                                         periph->flags |=
1253                                                 CAM_PERIPH_RECOVERY_INPROG;
1254
1255                                         /* decrement the number of retries */
1256                                         retry = 1;
1257                                         ccb->ccb_h.retry_count--;
1258
1259                                         /*
1260                                          * Check for removable media and
1261                                          * set load/eject flag
1262                                          * appropriately.
1263                                          */
1264                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1265                                                 le = TRUE;
1266                                         else
1267                                                 le = FALSE;
1268
1269                                         /*
1270                                          * Attempt to start the drive up.
1271                                          *
1272                                          * Save the current ccb so it can 
1273                                          * be restored and retried once the 
1274                                          * drive is started up.
1275                                          */
1276                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1277
1278                                         scsi_start_stop(&ccb->csio,
1279                                                         /*retries*/1,
1280                                                         camperiphdone,
1281                                                         MSG_SIMPLE_Q_TAG,
1282                                                         /*start*/TRUE,
1283                                                         /*load/eject*/le,
1284                                                         /*immediate*/FALSE,
1285                                                         SSD_FULL_SIZE,
1286                                                         /*timeout*/50000);
1287                                         /*
1288                                          * Drop the priority to 0 so that 
1289                                          * we are the first to execute.  Also 
1290                                          * freeze the queue after this command 
1291                                          * is sent so that we can restore the 
1292                                          * old csio and have it queued in the 
1293                                          * proper order before we let normal 
1294                                          * transactions go to the drive.
1295                                          */
1296                                         ccb->ccb_h.pinfo.priority = 0;
1297                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1298
1299                                         /*
1300                                          * Save a pointer to the original
1301                                          * CCB in the new CCB.
1302                                          */
1303                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1304
1305                                         error = ERESTART;
1306                                 } else if ((sense_flags & SF_RETRY_UA) != 0) {
1307                                         /*
1308                                          * XXX KDM this is a *horrible*
1309                                          * hack.  
1310                                          */
1311                                         error = scsi_interpret_sense(ccb,
1312                                                                   sense_flags,
1313                                                                   &relsim_flags,
1314                                                                   &openings,
1315                                                                   &timeout,
1316                                                                   err_action);
1317                                 } 
1318
1319                                 /*
1320                                  * Theoretically, this code should send a
1321                                  * test unit ready to the given device, and 
1322                                  * if it returns and error, send a start 
1323                                  * unit command.  Since we don't yet have
1324                                  * the capability to do two-command error
1325                                  * recovery, just send a start unit.
1326                                  * XXX KDM fix this!
1327                                  */
1328                                 else if (((err_action & SS_MASK) == SS_TURSTART)
1329                                       && save_ccb != NULL
1330                                       && ccb->ccb_h.retry_count > 0) {
1331                                         int le;
1332
1333                                         /*
1334                                          * Only one error recovery action
1335                                          * at a time.  See above.
1336                                          */
1337                                         if (periph->flags &
1338                                             CAM_PERIPH_RECOVERY_INPROG) {
1339                                                 error = ERESTART;
1340                                                 break;
1341                                         }
1342
1343                                         periph->flags |=
1344                                                 CAM_PERIPH_RECOVERY_INPROG;
1345
1346                                         /* decrement the number of retries */
1347                                         retry = 1;
1348                                         ccb->ccb_h.retry_count--;
1349
1350                                         /*
1351                                          * Check for removable media and
1352                                          * set load/eject flag
1353                                          * appropriately.
1354                                          */
1355                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1356                                                 le = TRUE;
1357                                         else
1358                                                 le = FALSE;
1359
1360                                         /*
1361                                          * Attempt to start the drive up.
1362                                          *
1363                                          * Save the current ccb so it can 
1364                                          * be restored and retried once the 
1365                                          * drive is started up.
1366                                          */
1367                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1368
1369                                         scsi_start_stop(&ccb->csio,
1370                                                         /*retries*/1,
1371                                                         camperiphdone,
1372                                                         MSG_SIMPLE_Q_TAG,
1373                                                         /*start*/TRUE,
1374                                                         /*load/eject*/le,
1375                                                         /*immediate*/FALSE,
1376                                                         SSD_FULL_SIZE,
1377                                                         /*timeout*/50000);
1378
1379                                         /* release the queue after .5 sec.  */
1380                                         relsim_flags = 
1381                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1382                                         timeout = 500;
1383                                         /*
1384                                          * Drop the priority to 0 so that 
1385                                          * we are the first to execute.  Also 
1386                                          * freeze the queue after this command 
1387                                          * is sent so that we can restore the 
1388                                          * old csio and have it queued in the 
1389                                          * proper order before we let normal 
1390                                          * transactions go to the drive.
1391                                          */
1392                                         ccb->ccb_h.pinfo.priority = 0;
1393                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1394
1395                                         /*
1396                                          * Save a pointer to the original
1397                                          * CCB in the new CCB.
1398                                          */
1399                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1400
1401                                         error = ERESTART;
1402                                 } else {
1403                                         error = scsi_interpret_sense(ccb,
1404                                                                   sense_flags,
1405                                                                   &relsim_flags,
1406                                                                   &openings,
1407                                                                   &timeout,
1408                                                                   err_action);
1409                                 }
1410                         } else if (ccb->csio.scsi_status == 
1411                                    SCSI_STATUS_CHECK_COND
1412                                 && status != CAM_AUTOSENSE_FAIL) {
1413                                 /* no point in decrementing the retry count */
1414                                 panic("cam_periph_error: scsi status of "
1415                                       "CHECK COND returned but no sense "
1416                                       "information is available.  "
1417                                       "Controller should have returned "
1418                                       "CAM_AUTOSENSE_FAILED");
1419                                 /* NOTREACHED */
1420                                 error = EIO;
1421                         } else if (ccb->ccb_h.retry_count == 0) {
1422                                 /*
1423                                  * XXX KDM shouldn't there be a better
1424                                  * argument to return??
1425                                  */
1426                                 error = EIO;
1427                         } else {
1428                                 /* decrement the number of retries */
1429                                 retry = ccb->ccb_h.retry_count > 0;
1430                                 if (retry)
1431                                         ccb->ccb_h.retry_count--;
1432                                 /*
1433                                  * If it was aborted with no
1434                                  * clue as to the reason, just
1435                                  * retry it again.
1436                                  */
1437                                 error = ERESTART;
1438                         }
1439                         break;
1440                 case SCSI_STATUS_QUEUE_FULL:
1441                 {
1442                         /* no decrement */
1443                         struct ccb_getdevstats cgds;
1444
1445                         /*
1446                          * First off, find out what the current
1447                          * transaction counts are.
1448                          */
1449                         xpt_setup_ccb(&cgds.ccb_h,
1450                                       ccb->ccb_h.path,
1451                                       /*priority*/1);
1452                         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1453                         xpt_action((union ccb *)&cgds);
1454
1455                         /*
1456                          * If we were the only transaction active, treat
1457                          * the QUEUE FULL as if it were a BUSY condition.
1458                          */
1459                         if (cgds.dev_active != 0) {
1460                                 int total_openings;
1461
1462                                 /*
1463                                  * Reduce the number of openings to
1464                                  * be 1 less than the amount it took
1465                                  * to get a queue full bounded by the
1466                                  * minimum allowed tag count for this
1467                                  * device.
1468                                  */
1469                                 total_openings =
1470                                     cgds.dev_active+cgds.dev_openings;
1471                                 openings = cgds.dev_active;
1472                                 if (openings < cgds.mintags)
1473                                         openings = cgds.mintags;
1474                                 if (openings < total_openings)
1475                                         relsim_flags = RELSIM_ADJUST_OPENINGS;
1476                                 else {
1477                                         /*
1478                                          * Some devices report queue full for
1479                                          * temporary resource shortages.  For
1480                                          * this reason, we allow a minimum
1481                                          * tag count to be entered via a
1482                                          * quirk entry to prevent the queue
1483                                          * count on these devices from falling
1484                                          * to a pessimisticly low value.  We
1485                                          * still wait for the next successful
1486                                          * completion, however, before queueing
1487                                          * more transactions to the device.
1488                                          */
1489                                         relsim_flags =
1490                                             RELSIM_RELEASE_AFTER_CMDCMPLT;
1491                                 }
1492                                 timeout = 0;
1493                                 error = ERESTART;
1494                                 break;
1495                         }
1496                         /* FALLTHROUGH */
1497                 }
1498                 case SCSI_STATUS_BUSY:
1499                         /*
1500                          * Restart the queue after either another
1501                          * command completes or a 1 second timeout.
1502                          * If we have any retries left, that is.
1503                          */
1504                         retry = ccb->ccb_h.retry_count > 0;
1505                         if (retry) {
1506                                 ccb->ccb_h.retry_count--;
1507                                 error = ERESTART;
1508                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1509                                              | RELSIM_RELEASE_AFTER_CMDCMPLT;
1510                                 timeout = 1000;
1511                         } else {
1512                                 error = EIO;
1513                         }
1514                         break;
1515                 case SCSI_STATUS_RESERV_CONFLICT:
1516                         error = EIO;
1517                         break;
1518                 default:
1519                         error = EIO;
1520                         break;
1521                 }
1522                 break;
1523         case CAM_REQ_CMP_ERR:
1524         case CAM_CMD_TIMEOUT:
1525         case CAM_UNEXP_BUSFREE:
1526         case CAM_UNCOR_PARITY:
1527         case CAM_DATA_RUN_ERR:
1528                 /* decrement the number of retries */
1529                 retry = ccb->ccb_h.retry_count > 0;
1530                 if (retry) {
1531                         ccb->ccb_h.retry_count--;
1532                         error = ERESTART;
1533                 } else {
1534                         error = EIO;
1535                 }
1536                 break;
1537         case CAM_UA_ABORT:
1538         case CAM_UA_TERMIO:
1539         case CAM_MSG_REJECT_REC:
1540                 /* XXX Don't know that these are correct */
1541                 error = EIO;
1542                 break;
1543         case CAM_SEL_TIMEOUT:
1544         {
1545                 /*
1546                  * XXX
1547                  * A single selection timeout should not be enough
1548                  * to invalidate a device.  We should retry for multiple
1549                  * seconds assuming this isn't a probe.  We'll probably
1550                  * need a special flag for that.
1551                  */
1552 #if 0
1553                 struct cam_path *newpath;
1554
1555                 /* Should we do more if we can't create the path?? */
1556                 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1557                                     xpt_path_path_id(ccb->ccb_h.path),
1558                                     xpt_path_target_id(ccb->ccb_h.path),
1559                                     CAM_LUN_WILDCARD) != CAM_REQ_CMP) 
1560                         break;
1561                 /*
1562                  * Let peripheral drivers know that this device has gone
1563                  * away.
1564                  */
1565                 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1566                 xpt_free_path(newpath);
1567 #endif
1568                 if ((sense_flags & SF_RETRY_SELTO) != 0) {
1569                         retry = ccb->ccb_h.retry_count > 0;
1570                         if (retry) {
1571                                 ccb->ccb_h.retry_count--;
1572                                 error = ERESTART;
1573                                 /*
1574                                  * Wait half a second to give the device
1575                                  * time to recover before we try again.
1576                                  */
1577                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1578                                 timeout = 500;
1579                         } else {
1580                                 error = ENXIO;
1581                         }
1582                 } else {
1583                         error = ENXIO;
1584                 }
1585                 break;
1586         }
1587         case CAM_REQ_INVALID:
1588         case CAM_PATH_INVALID:
1589         case CAM_DEV_NOT_THERE:
1590         case CAM_NO_HBA:
1591         case CAM_PROVIDE_FAIL:
1592         case CAM_REQ_TOO_BIG:           
1593                 error = EINVAL;
1594                 break;
1595         case CAM_SCSI_BUS_RESET:
1596         case CAM_BDR_SENT:              
1597         case CAM_REQUEUE_REQ:
1598                 /* Unconditional requeue, dammit */
1599                 error = ERESTART;
1600                 break;
1601         case CAM_RESRC_UNAVAIL:
1602         case CAM_BUSY:
1603                 /* timeout??? */
1604         default:
1605                 /* decrement the number of retries */
1606                 retry = ccb->ccb_h.retry_count > 0;
1607                 if (retry) {
1608                         ccb->ccb_h.retry_count--;
1609                         error = ERESTART;
1610                 } else {
1611                         /* Check the sense codes */
1612                         error = EIO;
1613                 }
1614                 break;
1615         }
1616
1617         /* Attempt a retry */
1618         if (error == ERESTART || error == 0) {  
1619                 if (frozen != 0)
1620                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1621
1622                 if (error == ERESTART)
1623                         xpt_action(ccb);
1624                 
1625                 if (frozen != 0) {
1626                         cam_release_devq(ccb->ccb_h.path,
1627                                          relsim_flags,
1628                                          openings,
1629                                          timeout,
1630                                          /*getcount_only*/0);
1631                 }
1632         }
1633
1634
1635         return (error);
1636 }