Merge from vendor branch SENDMAIL:
[dragonfly.git] / sys / bus / cam / cam_periph.c
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_periph.c,v 1.24.2.3 2003/01/25 19:04:40 dillon Exp $
30  * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.11 2005/06/02 20:40:29 dillon Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/linker_set.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/devicestat.h>
41 #include <sys/bus.h>
42 #include <vm/vm.h>
43 #include <vm/vm_extern.h>
44
45 #include <sys/thread2.h>
46
47 #include "cam.h"
48 #include "cam_ccb.h"
49 #include "cam_xpt_periph.h"
50 #include "cam_periph.h"
51 #include "cam_debug.h"
52
53 #include <bus/cam/scsi/scsi_all.h>
54 #include <bus/cam/scsi/scsi_message.h>
55 #include <bus/cam/scsi/scsi_da.h>
56 #include <bus/cam/scsi/scsi_pass.h>
57
58 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
59                                           u_int newunit, int wired,
60                                           path_id_t pathid, target_id_t target,
61                                           lun_id_t lun);
62 static  u_int           camperiphunit(struct periph_driver *p_drv,
63                                       path_id_t pathid, target_id_t target,
64                                       lun_id_t lun); 
65 static  void            camperiphdone(struct cam_periph *periph, 
66                                         union ccb *done_ccb);
67 static  void            camperiphfree(struct cam_periph *periph);
68
69 cam_status
70 cam_periph_alloc(periph_ctor_t *periph_ctor,
71                  periph_oninv_t *periph_oninvalidate,
72                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
73                  char *name, cam_periph_type type, struct cam_path *path,
74                  ac_callback_t *ac_callback, ac_code code, void *arg)
75 {
76         struct          periph_driver **p_drv;
77         struct          cam_periph *periph;
78         struct          cam_periph *cur_periph;
79         path_id_t       path_id;
80         target_id_t     target_id;
81         lun_id_t        lun_id;
82         cam_status      status;
83         u_int           init_level;
84
85         init_level = 0;
86         /*
87          * Handle Hot-Plug scenarios.  If there is already a peripheral
88          * of our type assigned to this path, we are likely waiting for
89          * final close on an old, invalidated, peripheral.  If this is
90          * the case, queue up a deferred call to the peripheral's async
91          * handler.  If it looks like a mistaken re-alloation, complain.
92          */
93         if ((periph = cam_periph_find(path, name)) != NULL) {
94
95                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
96                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
97                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
98                         periph->deferred_callback = ac_callback;
99                         periph->deferred_ac = code;
100                         return (CAM_REQ_INPROG);
101                 } else {
102                         printf("cam_periph_alloc: attempt to re-allocate "
103                                "valid device %s%d rejected\n",
104                                periph->periph_name, periph->unit_number);
105                 }
106                 return (CAM_REQ_INVALID);
107         }
108         
109         periph = malloc(sizeof(*periph), M_DEVBUF, M_INTWAIT | M_ZERO);
110         
111         init_level++;
112
113         SET_FOREACH(p_drv, periphdriver_set) {
114                 if (strcmp((*p_drv)->driver_name, name) == 0)
115                         break;
116         }
117         
118         path_id = xpt_path_path_id(path);
119         target_id = xpt_path_target_id(path);
120         lun_id = xpt_path_lun_id(path);
121         cam_init_pinfo(&periph->pinfo);
122         periph->periph_start = periph_start;
123         periph->periph_dtor = periph_dtor;
124         periph->periph_oninval = periph_oninvalidate;
125         periph->type = type;
126         periph->periph_name = name;
127         periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
128         periph->immediate_priority = CAM_PRIORITY_NONE;
129         periph->refcount = 0;
130         SLIST_INIT(&periph->ccb_list);
131         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
132         if (status != CAM_REQ_CMP)
133                 goto failure;
134
135         periph->path = path;
136         init_level++;
137
138         status = xpt_add_periph(periph);
139
140         if (status != CAM_REQ_CMP)
141                 goto failure;
142
143         crit_enter();
144         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
145         while (cur_periph != NULL
146             && cur_periph->unit_number < periph->unit_number)
147                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
148
149         if (cur_periph != NULL)
150                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
151         else {
152                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
153                 (*p_drv)->generation++;
154         }
155
156         crit_exit();
157
158         init_level++;
159
160         status = periph_ctor(periph, arg);
161
162         if (status == CAM_REQ_CMP)
163                 init_level++;
164
165 failure:
166         switch (init_level) {
167         case 4:
168                 /* Initialized successfully */
169                 break;
170         case 3:
171                 crit_enter();
172                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
173                 crit_exit();
174                 xpt_remove_periph(periph);
175         case 2:
176                 xpt_free_path(periph->path);
177         case 1:
178                 free(periph, M_DEVBUF);
179         case 0:
180                 /* No cleanup to perform. */
181                 break;
182         default:
183                 panic("cam_periph_alloc: Unkown init level");
184         }
185         return(status);
186 }
187
188 /*
189  * Find a peripheral structure with the specified path, target, lun, 
190  * and (optionally) type.  If the name is NULL, this function will return
191  * the first peripheral driver that matches the specified path.
192  */
193 struct cam_periph *
194 cam_periph_find(struct cam_path *path, char *name)
195 {
196         struct periph_driver **p_drv;
197         struct cam_periph *periph;
198
199         SET_FOREACH(p_drv, periphdriver_set) {
200                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
201                         continue;
202
203                 crit_enter();
204                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
205                      periph = TAILQ_NEXT(periph, unit_links)) {
206                         if (xpt_path_comp(periph->path, path) == 0) {
207                                 crit_exit();
208                                 return(periph);
209                         }
210                 }
211                 crit_exit();
212                 if (name != NULL)
213                         return(NULL);
214         }
215         return(NULL);
216 }
217
218 cam_status
219 cam_periph_acquire(struct cam_periph *periph)
220 {
221         if (periph == NULL)
222                 return(CAM_REQ_CMP_ERR);
223
224         crit_enter();
225         periph->refcount++;
226         crit_exit();
227
228         return(CAM_REQ_CMP);
229 }
230
231 void
232 cam_periph_release(struct cam_periph *periph)
233 {
234         if (periph == NULL)
235                 return;
236
237         crit_enter();
238         if ((--periph->refcount == 0)
239          && (periph->flags & CAM_PERIPH_INVALID)) {
240                 camperiphfree(periph);
241         }
242         crit_exit();
243 }
244
245 /*
246  * Look for the next unit number that is not currently in use for this
247  * peripheral type starting at "newunit".  Also exclude unit numbers that
248  * are reserved by for future "hardwiring" unless we already know that this
249  * is a potential wired device.  Only assume that the device is "wired" the
250  * first time through the loop since after that we'll be looking at unit
251  * numbers that did not match a wiring entry.
252  */
253 static u_int
254 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
255                   path_id_t pathid, target_id_t target, lun_id_t lun)
256 {
257         struct  cam_periph *periph;
258         char    *periph_name, *strval;
259         int     i, val, dunit;
260         const char *dname;
261
262         crit_enter();
263         periph_name = p_drv->driver_name;
264         for (;;newunit++) {
265
266                 for (periph = TAILQ_FIRST(&p_drv->units);
267                      periph != NULL && periph->unit_number != newunit;
268                      periph = TAILQ_NEXT(periph, unit_links))
269                         ;
270
271                 if (periph != NULL && periph->unit_number == newunit) {
272                         if (wired != 0) {
273                                 xpt_print_path(periph->path);
274                                 printf("Duplicate Wired Device entry!\n");
275                                 xpt_print_path(periph->path);
276                                 printf("Second device (%s device at scbus%d "
277                                        "target %d lun %d) will not be wired\n",
278                                        periph_name, pathid, target, lun);
279                                 wired = 0;
280                         }
281                         continue;
282                 }
283                 if (wired)
284                         break;
285
286                 /*
287                  * Don't match entries like "da 4" as a wired down
288                  * device, but do match entries like "da 4 target 5"
289                  * or even "da 4 scbus 1". 
290                  */
291                 i = -1;
292                 while ((i = resource_locate(i, periph_name)) != -1) {
293                         dname = resource_query_name(i);
294                         dunit = resource_query_unit(i);
295                         /* if no "target" and no specific scbus, skip */
296                         if (resource_int_value(dname, dunit, "target", &val) &&
297                             (resource_string_value(dname, dunit, "at",&strval)||
298                              strcmp(strval, "scbus") == 0))
299                                 continue;
300                         if (newunit == dunit)
301                                 break;
302                 }
303                 if (i == -1)
304                         break;
305         }
306         crit_exit();
307         return (newunit);
308 }
309
310 static u_int
311 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
312               target_id_t target, lun_id_t lun)
313 {
314         u_int   unit;
315         int     hit, i, val, dunit;
316         const char *dname;
317         char    pathbuf[32], *strval, *periph_name;
318
319         unit = 0;
320
321         periph_name = p_drv->driver_name;
322         snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
323         i = -1;
324         for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
325                 dname = resource_query_name(i);
326                 dunit = resource_query_unit(i);
327                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
328                         if (strcmp(strval, pathbuf) != 0)
329                                 continue;
330                         hit++;
331                 }
332                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
333                         if (val != target)
334                                 continue;
335                         hit++;
336                 }
337                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
338                         if (val != lun)
339                                 continue;
340                         hit++;
341                 }
342                 if (hit != 0) {
343                         unit = dunit;
344                         break;
345                 }
346         }
347
348         /*
349          * Either start from 0 looking for the next unit or from
350          * the unit number given in the resource config.  This way,
351          * if we have wildcard matches, we don't return the same
352          * unit number twice.
353          */
354         unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
355                                  target, lun);
356
357         return (unit);
358 }
359
360 void
361 cam_periph_invalidate(struct cam_periph *periph)
362 {
363         /*
364          * We only call this routine the first time a peripheral is
365          * invalidated.  The oninvalidate() routine is always called in
366          * a critical section.
367          */
368         crit_enter();
369         if (((periph->flags & CAM_PERIPH_INVALID) == 0)
370          && (periph->periph_oninval != NULL))
371                 periph->periph_oninval(periph);
372
373         periph->flags |= CAM_PERIPH_INVALID;
374         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
375
376         if (periph->refcount == 0)
377                 camperiphfree(periph);
378         else if (periph->refcount < 0)
379                 printf("cam_invalidate_periph: refcount < 0!!\n");
380         crit_exit();
381 }
382
383 static void
384 camperiphfree(struct cam_periph *periph)
385 {
386         struct periph_driver **p_drv;
387
388         SET_FOREACH(p_drv, periphdriver_set) {
389                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
390                         break;
391         }
392
393         if (*p_drv == NULL) {
394                 printf("camperiphfree: attempt to free "
395                         "non-existant periph: %s\n", periph->periph_name);
396                 return;
397         }
398         
399         if (periph->periph_dtor != NULL)
400                 periph->periph_dtor(periph);
401         
402         crit_enter();
403         TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
404         (*p_drv)->generation++;
405         crit_exit();
406
407         xpt_remove_periph(periph);
408
409         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
410                 union ccb ccb;
411                 void *arg;
412
413                 switch (periph->deferred_ac) {
414                 case AC_FOUND_DEVICE:
415                         ccb.ccb_h.func_code = XPT_GDEV_TYPE;
416                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
417                         xpt_action(&ccb);
418                         arg = &ccb;
419                         break;
420                 case AC_PATH_REGISTERED:
421                         ccb.ccb_h.func_code = XPT_PATH_INQ;
422                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
423                         xpt_action(&ccb);
424                         arg = &ccb;
425                         break;
426                 default:
427                         arg = NULL;
428                         break;
429                 }
430                 periph->deferred_callback(NULL, periph->deferred_ac,
431                                           periph->path, arg);
432         }
433         xpt_free_path(periph->path);
434         free(periph, M_DEVBUF);
435 }
436
437 /*
438  * Wait interruptibly for an exclusive lock.
439  */
440 int
441 cam_periph_lock(struct cam_periph *periph, int flags)
442 {
443         int error;
444
445         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
446                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
447                 if ((error = tsleep(periph, flags, "caplck", 0)) != 0)
448                         return error;
449         }
450
451         if (cam_periph_acquire(periph) != CAM_REQ_CMP)
452                 return(ENXIO);
453
454         periph->flags |= CAM_PERIPH_LOCKED;
455         return 0;
456 }
457
458 /*
459  * Unlock and wake up any waiters.
460  */
461 void
462 cam_periph_unlock(struct cam_periph *periph)
463 {
464         periph->flags &= ~CAM_PERIPH_LOCKED;
465         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
466                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
467                 wakeup(periph);
468         }
469
470         cam_periph_release(periph);
471 }
472
473 /*
474  * Map user virtual pointers into kernel virtual address space, so we can
475  * access the memory.  This won't work on physical pointers, for now it's
476  * up to the caller to check for that.  (XXX KDM -- should we do that here
477  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
478  * buffers to map stuff in and out, we're limited to the buffer size.
479  */
480 int
481 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
482 {
483         int numbufs, i, j;
484         int flags[CAM_PERIPH_MAXMAPS];
485         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
486         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
487         u_int32_t dirs[CAM_PERIPH_MAXMAPS];
488
489         switch(ccb->ccb_h.func_code) {
490         case XPT_DEV_MATCH:
491                 if (ccb->cdm.match_buf_len == 0) {
492                         printf("cam_periph_mapmem: invalid match buffer "
493                                "length 0\n");
494                         return(EINVAL);
495                 }
496                 if (ccb->cdm.pattern_buf_len > 0) {
497                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
498                         lengths[0] = ccb->cdm.pattern_buf_len;
499                         dirs[0] = CAM_DIR_OUT;
500                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
501                         lengths[1] = ccb->cdm.match_buf_len;
502                         dirs[1] = CAM_DIR_IN;
503                         numbufs = 2;
504                 } else {
505                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
506                         lengths[0] = ccb->cdm.match_buf_len;
507                         dirs[0] = CAM_DIR_IN;
508                         numbufs = 1;
509                 }
510                 break;
511         case XPT_SCSI_IO:
512         case XPT_CONT_TARGET_IO:
513                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
514                         return(0);
515
516                 data_ptrs[0] = &ccb->csio.data_ptr;
517                 lengths[0] = ccb->csio.dxfer_len;
518                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
519                 numbufs = 1;
520                 break;
521         default:
522                 return(EINVAL);
523                 break; /* NOTREACHED */
524         }
525
526         /*
527          * Check the transfer length and permissions first, so we don't
528          * have to unmap any previously mapped buffers.
529          */
530         for (i = 0; i < numbufs; i++) {
531
532                 flags[i] = 0;
533
534                 /*
535                  * The userland data pointer passed in may not be page
536                  * aligned.  vmapbuf() truncates the address to a page
537                  * boundary, so if the address isn't page aligned, we'll
538                  * need enough space for the given transfer length, plus
539                  * whatever extra space is necessary to make it to the page
540                  * boundary.
541                  */
542                 if ((lengths[i] +
543                     (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
544                         printf("cam_periph_mapmem: attempt to map %lu bytes, "
545                                "which is greater than DFLTPHYS(%d)\n",
546                                (long)(lengths[i] +
547                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
548                                DFLTPHYS);
549                         return(E2BIG);
550                 }
551
552                 if (dirs[i] & CAM_DIR_OUT) {
553                         flags[i] = B_WRITE;
554                         if (!useracc(*data_ptrs[i], lengths[i], 
555                                      VM_PROT_READ)) {
556                                 printf("cam_periph_mapmem: error, "
557                                         "address %p, length %lu isn't "
558                                         "user accessible for READ\n",
559                                         (void *)*data_ptrs[i],
560                                         (u_long)lengths[i]);
561                                 return(EACCES);
562                         }
563                 }
564
565                 /*
566                  * XXX this check is really bogus, since B_WRITE currently
567                  * is all 0's, and so it is "set" all the time.
568                  */
569                 if (dirs[i] & CAM_DIR_IN) {
570                         flags[i] |= B_READ;
571                         if (!useracc(*data_ptrs[i], lengths[i], 
572                                      VM_PROT_WRITE)) {
573                                 printf("cam_periph_mapmem: error, "
574                                         "address %p, length %lu isn't "
575                                         "user accessible for WRITE\n",
576                                         (void *)*data_ptrs[i],
577                                         (u_long)lengths[i]);
578
579                                 return(EACCES);
580                         }
581                 }
582
583         }
584
585         for (i = 0; i < numbufs; i++) {
586                 /*
587                  * Get the buffer.
588                  */
589                 mapinfo->bp[i] = getpbuf(NULL);
590
591                 /* save the buffer's data address */
592                 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
593
594                 /* put our pointer in the data slot */
595                 mapinfo->bp[i]->b_data = *data_ptrs[i];
596
597                 /* set the transfer length, we know it's < DFLTPHYS */
598                 mapinfo->bp[i]->b_bufsize = lengths[i];
599
600                 /* set the flags */
601                 mapinfo->bp[i]->b_flags = flags[i] | B_PHYS;
602
603                 /* map the buffer into kernel memory */
604                 if (vmapbuf(mapinfo->bp[i]) < 0) {
605                         printf("cam_periph_mapmem: error, "
606                                 "address %p, length %lu isn't "
607                                 "user accessible any more\n",
608                                 (void *)*data_ptrs[i],
609                                 (u_long)lengths[i]);
610                         for (j = 0; j < i; ++j) {
611                                 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
612                                 mapinfo->bp[j]->b_flags &= ~B_PHYS;
613                                 relpbuf(mapinfo->bp[j], NULL);
614                         }
615                         return(EACCES);
616                 }
617
618                 /* set our pointer to the new mapped area */
619                 *data_ptrs[i] = mapinfo->bp[i]->b_data;
620
621                 mapinfo->num_bufs_used++;
622         }
623
624         return(0);
625 }
626
627 /*
628  * Unmap memory segments mapped into kernel virtual address space by
629  * cam_periph_mapmem().
630  */
631 void
632 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
633 {
634         int numbufs, i;
635         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
636
637         if (mapinfo->num_bufs_used <= 0) {
638                 /* allow ourselves to be swapped once again */
639                 return;
640         }
641
642         switch (ccb->ccb_h.func_code) {
643         case XPT_DEV_MATCH:
644                 numbufs = min(mapinfo->num_bufs_used, 2);
645
646                 if (numbufs == 1) {
647                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
648                 } else {
649                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
650                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
651                 }
652                 break;
653         case XPT_SCSI_IO:
654         case XPT_CONT_TARGET_IO:
655                 data_ptrs[0] = &ccb->csio.data_ptr;
656                 numbufs = min(mapinfo->num_bufs_used, 1);
657                 break;
658         default:
659                 /* allow ourselves to be swapped once again */
660                 return;
661                 break; /* NOTREACHED */ 
662         }
663
664         for (i = 0; i < numbufs; i++) {
665                 /* Set the user's pointer back to the original value */
666                 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
667
668                 /* unmap the buffer */
669                 vunmapbuf(mapinfo->bp[i]);
670
671                 /* clear the flags we set above */
672                 mapinfo->bp[i]->b_flags &= ~B_PHYS;
673
674                 /* release the buffer */
675                 relpbuf(mapinfo->bp[i], NULL);
676         }
677
678         /* allow ourselves to be swapped once again */
679 }
680
681 union ccb *
682 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
683 {
684         struct ccb_hdr *ccb_h;
685
686         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
687
688         crit_enter();
689         
690         while (periph->ccb_list.slh_first == NULL) {
691                 if (periph->immediate_priority > priority)
692                         periph->immediate_priority = priority;
693                 xpt_schedule(periph, priority);
694                 if ((periph->ccb_list.slh_first != NULL)
695                  && (periph->ccb_list.slh_first->pinfo.priority == priority))
696                         break;
697                 tsleep(&periph->ccb_list, 0, "cgticb", 0);
698         }
699
700         ccb_h = periph->ccb_list.slh_first;
701         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
702         crit_exit();
703         return ((union ccb *)ccb_h);
704 }
705
706 void
707 cam_periph_ccbwait(union ccb *ccb)
708 {
709         crit_enter();
710         if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
711          || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
712                 tsleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0);
713         crit_exit();
714 }
715
716 int
717 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
718                  int (*error_routine)(union ccb *ccb, 
719                                       cam_flags camflags,
720                                       u_int32_t sense_flags))
721 {
722         union ccb            *ccb;
723         int                  error;
724         int                  found;
725
726         error = found = 0;
727
728         switch(cmd){
729         case CAMGETPASSTHRU:
730                 ccb = cam_periph_getccb(periph, /* priority */ 1);
731                 xpt_setup_ccb(&ccb->ccb_h,
732                               ccb->ccb_h.path,
733                               /*priority*/1);
734                 ccb->ccb_h.func_code = XPT_GDEVLIST;
735
736                 /*
737                  * Basically, the point of this is that we go through
738                  * getting the list of devices, until we find a passthrough
739                  * device.  In the current version of the CAM code, the
740                  * only way to determine what type of device we're dealing
741                  * with is by its name.
742                  */
743                 while (found == 0) {
744                         ccb->cgdl.index = 0;
745                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
746                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
747
748                                 /* we want the next device in the list */
749                                 xpt_action(ccb);
750                                 if (strncmp(ccb->cgdl.periph_name, 
751                                     "pass", 4) == 0){
752                                         found = 1;
753                                         break;
754                                 }
755                         }
756                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
757                             (found == 0)) {
758                                 ccb->cgdl.periph_name[0] = '\0';
759                                 ccb->cgdl.unit_number = 0;
760                                 break;
761                         }
762                 }
763
764                 /* copy the result back out */  
765                 bcopy(ccb, addr, sizeof(union ccb));
766
767                 /* and release the ccb */
768                 xpt_release_ccb(ccb);
769
770                 break;
771         default:
772                 error = ENOTTY;
773                 break;
774         }
775         return(error);
776 }
777
778 int
779 cam_periph_runccb(union ccb *ccb,
780                   int (*error_routine)(union ccb *ccb,
781                                        cam_flags camflags,
782                                        u_int32_t sense_flags),
783                   cam_flags camflags, u_int32_t sense_flags,
784                   struct devstat *ds)
785 {
786         int error;
787  
788         error = 0;
789         
790         /*
791          * If the user has supplied a stats structure, and if we understand
792          * this particular type of ccb, record the transaction start.
793          */
794         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
795                 devstat_start_transaction(ds);
796
797         xpt_action(ccb);
798  
799         do {
800                 cam_periph_ccbwait(ccb);
801                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
802                         error = 0;
803                 else if (error_routine != NULL)
804                         error = (*error_routine)(ccb, camflags, sense_flags);
805                 else
806                         error = 0;
807
808         } while (error == ERESTART);
809           
810         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 
811                 cam_release_devq(ccb->ccb_h.path,
812                                  /* relsim_flags */0,
813                                  /* openings */0,
814                                  /* timeout */0,
815                                  /* getcount_only */ FALSE);
816
817         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
818                 devstat_end_transaction(ds,
819                                         ccb->csio.dxfer_len,
820                                         ccb->csio.tag_action & 0xf,
821                                         ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
822                                         CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
823                                         (ccb->ccb_h.flags & CAM_DIR_OUT) ?
824                                         DEVSTAT_WRITE : 
825                                         DEVSTAT_READ);
826
827         return(error);
828 }
829
830 void
831 cam_freeze_devq(struct cam_path *path)
832 {
833         struct ccb_hdr ccb_h;
834
835         xpt_setup_ccb(&ccb_h, path, /*priority*/1);
836         ccb_h.func_code = XPT_NOOP;
837         ccb_h.flags = CAM_DEV_QFREEZE;
838         xpt_action((union ccb *)&ccb_h);
839 }
840
841 u_int32_t
842 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
843                  u_int32_t openings, u_int32_t timeout,
844                  int getcount_only)
845 {
846         struct ccb_relsim crs;
847
848         xpt_setup_ccb(&crs.ccb_h, path,
849                       /*priority*/1);
850         crs.ccb_h.func_code = XPT_REL_SIMQ;
851         crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
852         crs.release_flags = relsim_flags;
853         crs.openings = openings;
854         crs.release_timeout = timeout;
855         xpt_action((union ccb *)&crs);
856         return (crs.qfrozen_cnt);
857 }
858
859 #define saved_ccb_ptr ppriv_ptr0
860 static void
861 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
862 {
863         cam_status      status;
864         int             frozen;
865         int             sense;
866         struct scsi_start_stop_unit *scsi_cmd;
867         u_int32_t       relsim_flags, timeout;
868         u_int32_t       qfrozen_cnt;
869
870         status = done_ccb->ccb_h.status;
871         frozen = (status & CAM_DEV_QFRZN) != 0;
872         sense  = (status & CAM_AUTOSNS_VALID) != 0;
873         status &= CAM_STATUS_MASK;
874
875         timeout = 0;
876         relsim_flags = 0;
877
878         /* 
879          * Unfreeze the queue once if it is already frozen..
880          */
881         if (frozen != 0) {
882                 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
883                                               /*relsim_flags*/0,
884                                               /*openings*/0,
885                                               /*timeout*/0,
886                                               /*getcount_only*/0);
887         }
888
889         switch (status) {
890
891         case CAM_REQ_CMP:
892
893                 /*
894                  * If we have successfully taken a device from the not
895                  * ready to ready state, re-scan the device and re-get the
896                  * inquiry information.  Many devices (mostly disks) don't
897                  * properly report their inquiry information unless they
898                  * are spun up.
899                  */
900                 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
901                         scsi_cmd = (struct scsi_start_stop_unit *)
902                                         &done_ccb->csio.cdb_io.cdb_bytes;
903
904                         if (scsi_cmd->opcode == START_STOP_UNIT)
905                                 xpt_async(AC_INQ_CHANGED,
906                                           done_ccb->ccb_h.path, NULL);
907                 }
908                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
909                       sizeof(union ccb));
910
911                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
912
913                 xpt_action(done_ccb);
914
915                 break;
916         case CAM_SCSI_STATUS_ERROR:
917                 scsi_cmd = (struct scsi_start_stop_unit *)
918                                 &done_ccb->csio.cdb_io.cdb_bytes;
919                 if (sense != 0) {
920                         struct scsi_sense_data *sense;
921                         int    error_code, sense_key, asc, ascq;        
922
923                         sense = &done_ccb->csio.sense_data;
924                         scsi_extract_sense(sense, &error_code, 
925                                            &sense_key, &asc, &ascq);
926
927                         /*
928                          * If the error is "invalid field in CDB", 
929                          * and the load/eject flag is set, turn the 
930                          * flag off and try again.  This is just in 
931                          * case the drive in question barfs on the 
932                          * load eject flag.  The CAM code should set 
933                          * the load/eject flag by default for 
934                          * removable media.
935                          */
936
937                         /* XXX KDM 
938                          * Should we check to see what the specific
939                          * scsi status is??  Or does it not matter
940                          * since we already know that there was an
941                          * error, and we know what the specific
942                          * error code was, and we know what the
943                          * opcode is..
944                          */
945                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
946                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
947                              (asc == 0x24) && (ascq == 0x00) &&
948                              (done_ccb->ccb_h.retry_count > 0)) {
949
950                                 scsi_cmd->how &= ~SSS_LOEJ;
951
952                                 xpt_action(done_ccb);
953
954                         } else if (done_ccb->ccb_h.retry_count > 0) {
955                                 /*
956                                  * In this case, the error recovery
957                                  * command failed, but we've got 
958                                  * some retries left on it.  Give
959                                  * it another try.
960                                  */
961
962                                 /* set the timeout to .5 sec */
963                                 relsim_flags =
964                                         RELSIM_RELEASE_AFTER_TIMEOUT;
965                                 timeout = 500;
966
967                                 xpt_action(done_ccb);
968
969                                 break;
970
971                         } else {
972                                 /* 
973                                  * Copy the original CCB back and
974                                  * send it back to the caller.
975                                  */
976                                 bcopy(done_ccb->ccb_h.saved_ccb_ptr,            
977                                       done_ccb, sizeof(union ccb));
978
979                                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
980
981                                 xpt_action(done_ccb);
982                         }
983                 } else {
984                         /*
985                          * Eh??  The command failed, but we don't
986                          * have any sense.  What's up with that?
987                          * Fire the CCB again to return it to the
988                          * caller.
989                          */
990                         bcopy(done_ccb->ccb_h.saved_ccb_ptr,
991                               done_ccb, sizeof(union ccb));
992
993                         periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
994
995                         xpt_action(done_ccb);
996
997                 }
998                 break;
999         default:
1000                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1001                       sizeof(union ccb));
1002
1003                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1004
1005                 xpt_action(done_ccb);
1006
1007                 break;
1008         }
1009
1010         /* decrement the retry count */
1011         if (done_ccb->ccb_h.retry_count > 0)
1012                 done_ccb->ccb_h.retry_count--;
1013
1014         qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1015                                       /*relsim_flags*/relsim_flags,
1016                                       /*openings*/0,
1017                                       /*timeout*/timeout,
1018                                       /*getcount_only*/0);
1019 }
1020
1021 /*
1022  * Generic Async Event handler.  Peripheral drivers usually
1023  * filter out the events that require personal attention,
1024  * and leave the rest to this function.
1025  */
1026 void
1027 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1028                  struct cam_path *path, void *arg)
1029 {
1030         switch (code) {
1031         case AC_LOST_DEVICE:
1032                 cam_periph_invalidate(periph);
1033                 break; 
1034         case AC_SENT_BDR:
1035         case AC_BUS_RESET:
1036         {
1037                 cam_periph_bus_settle(periph, SCSI_DELAY);
1038                 break;
1039         }
1040         default:
1041                 break;
1042         }
1043 }
1044
1045 void
1046 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1047 {
1048         struct ccb_getdevstats cgds;
1049
1050         xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1051         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1052         xpt_action((union ccb *)&cgds);
1053         cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1054 }
1055
1056 void
1057 cam_periph_freeze_after_event(struct cam_periph *periph,
1058                               struct timeval* event_time, u_int duration_ms)
1059 {
1060         struct timeval delta;
1061         struct timeval duration_tv;
1062
1063         microuptime(&delta);
1064         timevalsub(&delta, event_time);
1065         duration_tv.tv_sec = duration_ms / 1000;
1066         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1067         if (timevalcmp(&delta, &duration_tv, <)) {
1068                 timevalsub(&duration_tv, &delta);
1069
1070                 duration_ms = duration_tv.tv_sec * 1000;
1071                 duration_ms += duration_tv.tv_usec / 1000;
1072                 cam_freeze_devq(periph->path); 
1073                 cam_release_devq(periph->path,
1074                                 RELSIM_RELEASE_AFTER_TIMEOUT,
1075                                 /*reduction*/0,
1076                                 /*timeout*/duration_ms,
1077                                 /*getcount_only*/0);
1078         }
1079
1080 }
1081
1082 /*
1083  * Generic error handler.  Peripheral drivers usually filter
1084  * out the errors that they handle in a unique mannor, then
1085  * call this function.
1086  */
1087 int
1088 cam_periph_error(union ccb *ccb, cam_flags camflags,
1089                  u_int32_t sense_flags, union ccb *save_ccb)
1090 {
1091         cam_status status;
1092         int        frozen;
1093         int        sense;
1094         int        error;
1095         int        openings;
1096         int        retry;
1097         u_int32_t  relsim_flags;
1098         u_int32_t  timeout;
1099         
1100         status = ccb->ccb_h.status;
1101         frozen = (status & CAM_DEV_QFRZN) != 0;
1102         sense  = (status & CAM_AUTOSNS_VALID) != 0;
1103         status &= CAM_STATUS_MASK;
1104         relsim_flags = 0;
1105
1106         switch (status) {
1107         case CAM_REQ_CMP:
1108                 /* decrement the number of retries */
1109                 retry = ccb->ccb_h.retry_count > 0;
1110                 if (retry)
1111                         ccb->ccb_h.retry_count--;
1112                 error = 0;
1113                 break;
1114         case CAM_AUTOSENSE_FAIL:
1115         case CAM_SCSI_STATUS_ERROR:
1116
1117                 switch (ccb->csio.scsi_status) {
1118                 case SCSI_STATUS_OK:
1119                 case SCSI_STATUS_COND_MET:
1120                 case SCSI_STATUS_INTERMED:
1121                 case SCSI_STATUS_INTERMED_COND_MET:
1122                         error = 0;
1123                         break;
1124                 case SCSI_STATUS_CMD_TERMINATED:
1125                 case SCSI_STATUS_CHECK_COND:
1126                         if (sense != 0) {
1127                                 struct scsi_sense_data *sense;
1128                                 int    error_code, sense_key, asc, ascq;
1129                                 struct cam_periph *periph;
1130                                 scsi_sense_action err_action;
1131                                 struct ccb_getdev cgd;
1132
1133                                 sense = &ccb->csio.sense_data;
1134                                 scsi_extract_sense(sense, &error_code,
1135                                                    &sense_key, &asc, &ascq);
1136                                 periph = xpt_path_periph(ccb->ccb_h.path);
1137
1138                                 /*
1139                                  * Grab the inquiry data for this device.
1140                                  */
1141                                 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
1142                                               /*priority*/ 1);
1143                                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1144                                 xpt_action((union ccb *)&cgd);
1145
1146                                 err_action = scsi_error_action(asc, ascq, 
1147                                                                &cgd.inq_data);
1148
1149                                 /*
1150                                  * Send a Test Unit Ready to the device.
1151                                  * If the 'many' flag is set, we send 120
1152                                  * test unit ready commands, one every half 
1153                                  * second.  Otherwise, we just send one TUR.
1154                                  * We only want to do this if the retry 
1155                                  * count has not been exhausted.
1156                                  */
1157                                 if (((err_action & SS_MASK) == SS_TUR)
1158                                  && save_ccb != NULL 
1159                                  && ccb->ccb_h.retry_count > 0) {
1160
1161                                         /*
1162                                          * Since error recovery is already
1163                                          * in progress, don't attempt to
1164                                          * process this error.  It is probably
1165                                          * related to the error that caused
1166                                          * the currently active error recovery
1167                                          * action.  Also, we only have
1168                                          * space for one saved CCB, so if we
1169                                          * had two concurrent error recovery
1170                                          * actions, we would end up
1171                                          * over-writing one error recovery
1172                                          * CCB with another one.
1173                                          */
1174                                         if (periph->flags &
1175                                             CAM_PERIPH_RECOVERY_INPROG) {
1176                                                 error = ERESTART;
1177                                                 break;
1178                                         }
1179
1180                                         periph->flags |=
1181                                                 CAM_PERIPH_RECOVERY_INPROG;
1182
1183                                         /* decrement the number of retries */
1184                                         if ((err_action & 
1185                                              SSQ_DECREMENT_COUNT) != 0) {
1186                                                 retry = 1;
1187                                                 ccb->ccb_h.retry_count--;
1188                                         }
1189
1190                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1191
1192                                         /*
1193                                          * We retry this one every half
1194                                          * second for a minute.  If the
1195                                          * device hasn't become ready in a
1196                                          * minute's time, it's unlikely to
1197                                          * ever become ready.  If the table
1198                                          * doesn't specify SSQ_MANY, we can
1199                                          * only try this once.  Oh well.
1200                                          */
1201                                         if ((err_action & SSQ_MANY) != 0)
1202                                                 scsi_test_unit_ready(&ccb->csio,
1203                                                                /*retries*/120,
1204                                                                camperiphdone,
1205                                                                MSG_SIMPLE_Q_TAG,
1206                                                                SSD_FULL_SIZE,
1207                                                                /*timeout*/5000);
1208                                         else
1209                                                 scsi_test_unit_ready(&ccb->csio,
1210                                                                /*retries*/1,
1211                                                                camperiphdone,
1212                                                                MSG_SIMPLE_Q_TAG,
1213                                                                SSD_FULL_SIZE,
1214                                                                /*timeout*/5000);
1215
1216                                         /* release the queue after .5 sec.  */
1217                                         relsim_flags = 
1218                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1219                                         timeout = 500;
1220                                         /*
1221                                          * Drop the priority to 0 so that 
1222                                          * we are the first to execute.  Also 
1223                                          * freeze the queue after this command 
1224                                          * is sent so that we can restore the 
1225                                          * old csio and have it queued in the 
1226                                          * proper order before we let normal 
1227                                          * transactions go to the drive.
1228                                          */
1229                                         ccb->ccb_h.pinfo.priority = 0;
1230                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1231
1232                                         /*
1233                                          * Save a pointer to the original
1234                                          * CCB in the new CCB.
1235                                          */
1236                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1237
1238                                         error = ERESTART;
1239                                 }
1240                                 /*
1241                                  * Send a start unit command to the device,
1242                                  * and then retry the command.  We only 
1243                                  * want to do this if the retry count has 
1244                                  * not been exhausted.  If the user 
1245                                  * specified 0 retries, then we follow 
1246                                  * their request and do not retry.
1247                                  */
1248                                 else if (((err_action & SS_MASK) == SS_START)
1249                                       && save_ccb != NULL 
1250                                       && ccb->ccb_h.retry_count > 0) {
1251                                         int le;
1252
1253                                         /*
1254                                          * Only one error recovery action
1255                                          * at a time.  See above.
1256                                          */
1257                                         if (periph->flags &
1258                                             CAM_PERIPH_RECOVERY_INPROG) {
1259                                                 error = ERESTART;
1260                                                 break;
1261                                         }
1262
1263                                         periph->flags |=
1264                                                 CAM_PERIPH_RECOVERY_INPROG;
1265
1266                                         /* decrement the number of retries */
1267                                         retry = 1;
1268                                         ccb->ccb_h.retry_count--;
1269
1270                                         /*
1271                                          * Check for removable media and
1272                                          * set load/eject flag
1273                                          * appropriately.
1274                                          */
1275                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1276                                                 le = TRUE;
1277                                         else
1278                                                 le = FALSE;
1279
1280                                         /*
1281                                          * Attempt to start the drive up.
1282                                          *
1283                                          * Save the current ccb so it can 
1284                                          * be restored and retried once the 
1285                                          * drive is started up.
1286                                          */
1287                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1288
1289                                         scsi_start_stop(&ccb->csio,
1290                                                         /*retries*/1,
1291                                                         camperiphdone,
1292                                                         MSG_SIMPLE_Q_TAG,
1293                                                         /*start*/TRUE,
1294                                                         /*load/eject*/le,
1295                                                         /*immediate*/FALSE,
1296                                                         SSD_FULL_SIZE,
1297                                                         /*timeout*/50000);
1298                                         /*
1299                                          * Drop the priority to 0 so that 
1300                                          * we are the first to execute.  Also 
1301                                          * freeze the queue after this command 
1302                                          * is sent so that we can restore the 
1303                                          * old csio and have it queued in the 
1304                                          * proper order before we let normal 
1305                                          * transactions go to the drive.
1306                                          */
1307                                         ccb->ccb_h.pinfo.priority = 0;
1308                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1309
1310                                         /*
1311                                          * Save a pointer to the original
1312                                          * CCB in the new CCB.
1313                                          */
1314                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1315
1316                                         error = ERESTART;
1317                                 } else if ((sense_flags & SF_RETRY_UA) != 0) {
1318                                         /*
1319                                          * XXX KDM this is a *horrible*
1320                                          * hack.  
1321                                          */
1322                                         error = scsi_interpret_sense(ccb,
1323                                                                   sense_flags,
1324                                                                   &relsim_flags,
1325                                                                   &openings,
1326                                                                   &timeout,
1327                                                                   err_action);
1328                                 } 
1329
1330                                 /*
1331                                  * Theoretically, this code should send a
1332                                  * test unit ready to the given device, and 
1333                                  * if it returns and error, send a start 
1334                                  * unit command.  Since we don't yet have
1335                                  * the capability to do two-command error
1336                                  * recovery, just send a start unit.
1337                                  * XXX KDM fix this!
1338                                  */
1339                                 else if (((err_action & SS_MASK) == SS_TURSTART)
1340                                       && save_ccb != NULL
1341                                       && ccb->ccb_h.retry_count > 0) {
1342                                         int le;
1343
1344                                         /*
1345                                          * Only one error recovery action
1346                                          * at a time.  See above.
1347                                          */
1348                                         if (periph->flags &
1349                                             CAM_PERIPH_RECOVERY_INPROG) {
1350                                                 error = ERESTART;
1351                                                 break;
1352                                         }
1353
1354                                         periph->flags |=
1355                                                 CAM_PERIPH_RECOVERY_INPROG;
1356
1357                                         /* decrement the number of retries */
1358                                         retry = 1;
1359                                         ccb->ccb_h.retry_count--;
1360
1361                                         /*
1362                                          * Check for removable media and
1363                                          * set load/eject flag
1364                                          * appropriately.
1365                                          */
1366                                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1367                                                 le = TRUE;
1368                                         else
1369                                                 le = FALSE;
1370
1371                                         /*
1372                                          * Attempt to start the drive up.
1373                                          *
1374                                          * Save the current ccb so it can 
1375                                          * be restored and retried once the 
1376                                          * drive is started up.
1377                                          */
1378                                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1379
1380                                         scsi_start_stop(&ccb->csio,
1381                                                         /*retries*/1,
1382                                                         camperiphdone,
1383                                                         MSG_SIMPLE_Q_TAG,
1384                                                         /*start*/TRUE,
1385                                                         /*load/eject*/le,
1386                                                         /*immediate*/FALSE,
1387                                                         SSD_FULL_SIZE,
1388                                                         /*timeout*/50000);
1389
1390                                         /* release the queue after .5 sec.  */
1391                                         relsim_flags = 
1392                                                 RELSIM_RELEASE_AFTER_TIMEOUT;
1393                                         timeout = 500;
1394                                         /*
1395                                          * Drop the priority to 0 so that 
1396                                          * we are the first to execute.  Also 
1397                                          * freeze the queue after this command 
1398                                          * is sent so that we can restore the 
1399                                          * old csio and have it queued in the 
1400                                          * proper order before we let normal 
1401                                          * transactions go to the drive.
1402                                          */
1403                                         ccb->ccb_h.pinfo.priority = 0;
1404                                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1405
1406                                         /*
1407                                          * Save a pointer to the original
1408                                          * CCB in the new CCB.
1409                                          */
1410                                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1411
1412                                         error = ERESTART;
1413                                 } else {
1414                                         error = scsi_interpret_sense(ccb,
1415                                                                   sense_flags,
1416                                                                   &relsim_flags,
1417                                                                   &openings,
1418                                                                   &timeout,
1419                                                                   err_action);
1420                                 }
1421                         } else if (ccb->csio.scsi_status == 
1422                                    SCSI_STATUS_CHECK_COND
1423                                 && status != CAM_AUTOSENSE_FAIL) {
1424                                 /* no point in decrementing the retry count */
1425                                 panic("cam_periph_error: scsi status of "
1426                                       "CHECK COND returned but no sense "
1427                                       "information is availible.  "
1428                                       "Controller should have returned "
1429                                       "CAM_AUTOSENSE_FAILED");
1430                                 /* NOTREACHED */
1431                                 error = EIO;
1432                         } else if (ccb->ccb_h.retry_count == 0) {
1433                                 /*
1434                                  * XXX KDM shouldn't there be a better
1435                                  * argument to return??
1436                                  */
1437                                 error = EIO;
1438                         } else {
1439                                 /* decrement the number of retries */
1440                                 retry = ccb->ccb_h.retry_count > 0;
1441                                 if (retry)
1442                                         ccb->ccb_h.retry_count--;
1443                                 /*
1444                                  * If it was aborted with no
1445                                  * clue as to the reason, just
1446                                  * retry it again.
1447                                  */
1448                                 error = ERESTART;
1449                         }
1450                         break;
1451                 case SCSI_STATUS_QUEUE_FULL:
1452                 {
1453                         /* no decrement */
1454                         struct ccb_getdevstats cgds;
1455
1456                         /*
1457                          * First off, find out what the current
1458                          * transaction counts are.
1459                          */
1460                         xpt_setup_ccb(&cgds.ccb_h,
1461                                       ccb->ccb_h.path,
1462                                       /*priority*/1);
1463                         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1464                         xpt_action((union ccb *)&cgds);
1465
1466                         /*
1467                          * If we were the only transaction active, treat
1468                          * the QUEUE FULL as if it were a BUSY condition.
1469                          */
1470                         if (cgds.dev_active != 0) {
1471                                 int total_openings;
1472
1473                                 /*
1474                                  * Reduce the number of openings to
1475                                  * be 1 less than the amount it took
1476                                  * to get a queue full bounded by the
1477                                  * minimum allowed tag count for this
1478                                  * device.
1479                                  */
1480                                 total_openings =
1481                                     cgds.dev_active+cgds.dev_openings;
1482                                 openings = cgds.dev_active;
1483                                 if (openings < cgds.mintags)
1484                                         openings = cgds.mintags;
1485                                 if (openings < total_openings)
1486                                         relsim_flags = RELSIM_ADJUST_OPENINGS;
1487                                 else {
1488                                         /*
1489                                          * Some devices report queue full for
1490                                          * temporary resource shortages.  For
1491                                          * this reason, we allow a minimum
1492                                          * tag count to be entered via a
1493                                          * quirk entry to prevent the queue
1494                                          * count on these devices from falling
1495                                          * to a pessimisticly low value.  We
1496                                          * still wait for the next successful
1497                                          * completion, however, before queueing
1498                                          * more transactions to the device.
1499                                          */
1500                                         relsim_flags =
1501                                             RELSIM_RELEASE_AFTER_CMDCMPLT;
1502                                 }
1503                                 timeout = 0;
1504                                 error = ERESTART;
1505                                 break;
1506                         }
1507                         /* FALLTHROUGH */
1508                 }
1509                 case SCSI_STATUS_BUSY:
1510                         /*
1511                          * Restart the queue after either another
1512                          * command completes or a 1 second timeout.
1513                          * If we have any retries left, that is.
1514                          */
1515                         retry = ccb->ccb_h.retry_count > 0;
1516                         if (retry) {
1517                                 ccb->ccb_h.retry_count--;
1518                                 error = ERESTART;
1519                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1520                                              | RELSIM_RELEASE_AFTER_CMDCMPLT;
1521                                 timeout = 1000;
1522                         } else {
1523                                 error = EIO;
1524                         }
1525                         break;
1526                 case SCSI_STATUS_RESERV_CONFLICT:
1527                         error = EIO;
1528                         break;
1529                 default:
1530                         error = EIO;
1531                         break;
1532                 }
1533                 break;
1534         case CAM_REQ_CMP_ERR:
1535         case CAM_CMD_TIMEOUT:
1536         case CAM_UNEXP_BUSFREE:
1537         case CAM_UNCOR_PARITY:
1538         case CAM_DATA_RUN_ERR:
1539                 /* decrement the number of retries */
1540                 retry = ccb->ccb_h.retry_count > 0;
1541                 if (retry) {
1542                         ccb->ccb_h.retry_count--;
1543                         error = ERESTART;
1544                 } else {
1545                         error = EIO;
1546                 }
1547                 break;
1548         case CAM_UA_ABORT:
1549         case CAM_UA_TERMIO:
1550         case CAM_MSG_REJECT_REC:
1551                 /* XXX Don't know that these are correct */
1552                 error = EIO;
1553                 break;
1554         case CAM_SEL_TIMEOUT:
1555         {
1556                 /*
1557                  * XXX
1558                  * A single selection timeout should not be enough
1559                  * to invalidate a device.  We should retry for multiple
1560                  * seconds assuming this isn't a probe.  We'll probably
1561                  * need a special flag for that.
1562                  */
1563 #if 0
1564                 struct cam_path *newpath;
1565
1566                 /* Should we do more if we can't create the path?? */
1567                 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1568                                     xpt_path_path_id(ccb->ccb_h.path),
1569                                     xpt_path_target_id(ccb->ccb_h.path),
1570                                     CAM_LUN_WILDCARD) != CAM_REQ_CMP) 
1571                         break;
1572                 /*
1573                  * Let peripheral drivers know that this device has gone
1574                  * away.
1575                  */
1576                 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1577                 xpt_free_path(newpath);
1578 #endif
1579                 if ((sense_flags & SF_RETRY_SELTO) != 0) {
1580                         retry = ccb->ccb_h.retry_count > 0;
1581                         if (retry) {
1582                                 ccb->ccb_h.retry_count--;
1583                                 error = ERESTART;
1584                                 /*
1585                                  * Wait half a second to give the device
1586                                  * time to recover before we try again.
1587                                  */
1588                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1589                                 timeout = 500;
1590                         } else {
1591                                 error = ENXIO;
1592                         }
1593                 } else {
1594                         error = ENXIO;
1595                 }
1596                 break;
1597         }
1598         case CAM_REQ_INVALID:
1599         case CAM_PATH_INVALID:
1600         case CAM_DEV_NOT_THERE:
1601         case CAM_NO_HBA:
1602         case CAM_PROVIDE_FAIL:
1603         case CAM_REQ_TOO_BIG:           
1604                 error = EINVAL;
1605                 break;
1606         case CAM_SCSI_BUS_RESET:
1607         case CAM_BDR_SENT:              
1608         case CAM_REQUEUE_REQ:
1609                 /* Unconditional requeue, dammit */
1610                 error = ERESTART;
1611                 break;
1612         case CAM_RESRC_UNAVAIL:
1613         case CAM_BUSY:
1614                 /* timeout??? */
1615         default:
1616                 /* decrement the number of retries */
1617                 retry = ccb->ccb_h.retry_count > 0;
1618                 if (retry) {
1619                         ccb->ccb_h.retry_count--;
1620                         error = ERESTART;
1621                 } else {
1622                         /* Check the sense codes */
1623                         error = EIO;
1624                 }
1625                 break;
1626         }
1627
1628         /* Attempt a retry */
1629         if (error == ERESTART || error == 0) {  
1630                 if (frozen != 0)
1631                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1632
1633                 if (error == ERESTART)
1634                         xpt_action(ccb);
1635                 
1636                 if (frozen != 0) {
1637                         cam_release_devq(ccb->ccb_h.path,
1638                                          relsim_flags,
1639                                          openings,
1640                                          timeout,
1641                                          /*getcount_only*/0);
1642                 }
1643         }
1644
1645
1646         return (error);
1647 }