Merge from vendor branch TNFTP:
[dragonfly.git] / sys / bus / cam / cam_periph.c
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_periph.c,v 1.24.2.3 2003/01/25 19:04:40 dillon Exp $
30  * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.39 2007/12/02 04:54:40 pavalos Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/devicestat.h>
41 #include <sys/bus.h>
42 #include <vm/vm.h>
43 #include <vm/vm_extern.h>
44
45 #include <sys/thread2.h>
46
47 #include "cam.h"
48 #include "cam_ccb.h"
49 #include "cam_xpt_periph.h"
50 #include "cam_periph.h"
51 #include "cam_debug.h"
52
53 #include <bus/cam/scsi/scsi_all.h>
54 #include <bus/cam/scsi/scsi_message.h>
55 #include <bus/cam/scsi/scsi_pass.h>
56
57 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
58                                           u_int newunit, int wired,
59                                           path_id_t pathid, target_id_t target,
60                                           lun_id_t lun);
61 static  u_int           camperiphunit(struct periph_driver *p_drv,
62                                       path_id_t pathid, target_id_t target,
63                                       lun_id_t lun); 
64 static  void            camperiphdone(struct cam_periph *periph, 
65                                         union ccb *done_ccb);
66 static  void            camperiphfree(struct cam_periph *periph);
67 static int              camperiphscsistatuserror(union ccb *ccb,
68                                                  cam_flags camflags,
69                                                  u_int32_t sense_flags,
70                                                  union ccb *save_ccb,
71                                                  int *openings,
72                                                  u_int32_t *relsim_flags,
73                                                  u_int32_t *timeout);
74 static  int             camperiphscsisenseerror(union ccb *ccb,
75                                                 cam_flags camflags,
76                                                 u_int32_t sense_flags,
77                                                 union ccb *save_ccb,
78                                                 int *openings,
79                                                 u_int32_t *relsim_flags,
80                                                 u_int32_t *timeout);
81
82 static int nperiph_drivers;
83 struct periph_driver **periph_drivers;
84
85 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
86
87 static int periph_selto_delay = 1000;
88 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
89 static int periph_noresrc_delay = 500;
90 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
91 static int periph_busy_delay = 500;
92 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
93
94
95 void
96 periphdriver_register(void *data)
97 {
98         struct periph_driver **newdrivers, **old;
99         int ndrivers;
100
101         ndrivers = nperiph_drivers + 2;
102         newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK);
103         if (periph_drivers)
104                 bcopy(periph_drivers, newdrivers,
105                       sizeof(*newdrivers) * nperiph_drivers);
106         newdrivers[nperiph_drivers] = (struct periph_driver *)data;
107         newdrivers[nperiph_drivers + 1] = NULL;
108         old = periph_drivers;
109         periph_drivers = newdrivers;
110         if (old)
111                 kfree(old, M_TEMP);
112         nperiph_drivers++;
113 }
114
115 cam_status
116 cam_periph_alloc(periph_ctor_t *periph_ctor,
117                  periph_oninv_t *periph_oninvalidate,
118                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
119                  char *name, cam_periph_type type, struct cam_path *path,
120                  ac_callback_t *ac_callback, ac_code code, void *arg)
121 {
122         struct          periph_driver **p_drv;
123         struct          cam_periph *periph;
124         struct          cam_periph *cur_periph;
125         path_id_t       path_id;
126         target_id_t     target_id;
127         lun_id_t        lun_id;
128         cam_status      status;
129         u_int           init_level;
130
131         init_level = 0;
132         /*
133          * Handle Hot-Plug scenarios.  If there is already a peripheral
134          * of our type assigned to this path, we are likely waiting for
135          * final close on an old, invalidated, peripheral.  If this is
136          * the case, queue up a deferred call to the peripheral's async
137          * handler.  If it looks like a mistaken re-allocation, complain.
138          */
139         if ((periph = cam_periph_find(path, name)) != NULL) {
140
141                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
142                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
143                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
144                         periph->deferred_callback = ac_callback;
145                         periph->deferred_ac = code;
146                         return (CAM_REQ_INPROG);
147                 } else {
148                         kprintf("cam_periph_alloc: attempt to re-allocate "
149                                "valid device %s%d rejected\n",
150                                periph->periph_name, periph->unit_number);
151                 }
152                 return (CAM_REQ_INVALID);
153         }
154         
155         periph = kmalloc(sizeof(*periph), M_CAMPERIPH, M_INTWAIT | M_ZERO);
156         
157         init_level++;
158
159         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
160                 if (strcmp((*p_drv)->driver_name, name) == 0)
161                         break;
162         }
163         
164         path_id = xpt_path_path_id(path);
165         target_id = xpt_path_target_id(path);
166         lun_id = xpt_path_lun_id(path);
167         cam_init_pinfo(&periph->pinfo);
168         periph->periph_start = periph_start;
169         periph->periph_dtor = periph_dtor;
170         periph->periph_oninval = periph_oninvalidate;
171         periph->type = type;
172         periph->periph_name = name;
173         periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
174         periph->immediate_priority = CAM_PRIORITY_NONE;
175         periph->refcount = 0;
176         SLIST_INIT(&periph->ccb_list);
177         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
178         if (status != CAM_REQ_CMP)
179                 goto failure;
180
181         periph->path = path;
182         init_level++;
183
184         status = xpt_add_periph(periph);
185
186         if (status != CAM_REQ_CMP)
187                 goto failure;
188
189         crit_enter();
190         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
191         while (cur_periph != NULL
192             && cur_periph->unit_number < periph->unit_number)
193                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
194
195         if (cur_periph != NULL)
196                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
197         else {
198                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
199                 (*p_drv)->generation++;
200         }
201
202         crit_exit();
203
204         init_level++;
205
206         status = periph_ctor(periph, arg);
207
208         if (status == CAM_REQ_CMP)
209                 init_level++;
210
211 failure:
212         switch (init_level) {
213         case 4:
214                 /* Initialized successfully */
215                 break;
216         case 3:
217                 crit_enter();
218                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
219                 crit_exit();
220                 xpt_remove_periph(periph);
221         case 2:
222                 xpt_free_path(periph->path);
223         case 1:
224                 kfree(periph, M_CAMPERIPH);
225         case 0:
226                 /* No cleanup to perform. */
227                 break;
228         default:
229                 panic("cam_periph_alloc: Unknown init level");
230         }
231         return(status);
232 }
233
234 /*
235  * Find a peripheral structure with the specified path, target, lun, 
236  * and (optionally) type.  If the name is NULL, this function will return
237  * the first peripheral driver that matches the specified path.
238  */
239 struct cam_periph *
240 cam_periph_find(struct cam_path *path, char *name)
241 {
242         struct periph_driver **p_drv;
243         struct cam_periph *periph;
244
245         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
246                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
247                         continue;
248
249                 crit_enter();
250                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
251                         if (xpt_path_comp(periph->path, path) == 0) {
252                                 crit_exit();
253                                 return(periph);
254                         }
255                 }
256                 crit_exit();
257                 if (name != NULL)
258                         return(NULL);
259         }
260         return(NULL);
261 }
262
263 cam_status
264 cam_periph_acquire(struct cam_periph *periph)
265 {
266         if (periph == NULL)
267                 return(CAM_REQ_CMP_ERR);
268
269         crit_enter();
270         periph->refcount++;
271         crit_exit();
272
273         return(CAM_REQ_CMP);
274 }
275
276 void
277 cam_periph_release(struct cam_periph *periph)
278 {
279         if (periph == NULL)
280                 return;
281
282         crit_enter();
283         if ((--periph->refcount == 0)
284          && (periph->flags & CAM_PERIPH_INVALID)) {
285                 camperiphfree(periph);
286         }
287         crit_exit();
288 }
289
290 /*
291  * Look for the next unit number that is not currently in use for this
292  * peripheral type starting at "newunit".  Also exclude unit numbers that
293  * are reserved by for future "hardwiring" unless we already know that this
294  * is a potential wired device.  Only assume that the device is "wired" the
295  * first time through the loop since after that we'll be looking at unit
296  * numbers that did not match a wiring entry.
297  */
298 static u_int
299 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
300                   path_id_t pathid, target_id_t target, lun_id_t lun)
301 {
302         struct  cam_periph *periph;
303         char    *periph_name, *strval;
304         int     i, val, dunit;
305         const char *dname;
306
307         crit_enter();
308         periph_name = p_drv->driver_name;
309         for (;;newunit++) {
310
311                 for (periph = TAILQ_FIRST(&p_drv->units);
312                      periph != NULL && periph->unit_number != newunit;
313                      periph = TAILQ_NEXT(periph, unit_links))
314                         ;
315
316                 if (periph != NULL && periph->unit_number == newunit) {
317                         if (wired != 0) {
318                                 xpt_print_path(periph->path);
319                                 kprintf("Duplicate Wired Device entry!\n");
320                                 xpt_print_path(periph->path);
321                                 kprintf("Second device (%s device at scbus%d "
322                                        "target %d lun %d) will not be wired\n",
323                                        periph_name, pathid, target, lun);
324                                 wired = 0;
325                         }
326                         continue;
327                 }
328                 if (wired)
329                         break;
330
331                 /*
332                  * Don't match entries like "da 4" as a wired down
333                  * device, but do match entries like "da 4 target 5"
334                  * or even "da 4 scbus 1". 
335                  */
336                 i = -1;
337                 while ((i = resource_locate(i, periph_name)) != -1) {
338                         dname = resource_query_name(i);
339                         dunit = resource_query_unit(i);
340                         /* if no "target" and no specific scbus, skip */
341                         if (resource_int_value(dname, dunit, "target", &val) &&
342                             (resource_string_value(dname, dunit, "at",&strval)||
343                              strcmp(strval, "scbus") == 0))
344                                 continue;
345                         if (newunit == dunit)
346                                 break;
347                 }
348                 if (i == -1)
349                         break;
350         }
351         crit_exit();
352         return (newunit);
353 }
354
355 static u_int
356 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
357               target_id_t target, lun_id_t lun)
358 {
359         u_int   unit;
360         int     hit, i, val, dunit;
361         const char *dname;
362         char    pathbuf[32], *strval, *periph_name;
363
364         unit = 0;
365
366         periph_name = p_drv->driver_name;
367         ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
368         i = -1;
369         for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
370                 dname = resource_query_name(i);
371                 dunit = resource_query_unit(i);
372                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
373                         if (strcmp(strval, pathbuf) != 0)
374                                 continue;
375                         hit++;
376                 }
377                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
378                         if (val != target)
379                                 continue;
380                         hit++;
381                 }
382                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
383                         if (val != lun)
384                                 continue;
385                         hit++;
386                 }
387                 if (hit != 0) {
388                         unit = dunit;
389                         break;
390                 }
391         }
392
393         /*
394          * Either start from 0 looking for the next unit or from
395          * the unit number given in the resource config.  This way,
396          * if we have wildcard matches, we don't return the same
397          * unit number twice.
398          */
399         unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
400                                  target, lun);
401
402         return (unit);
403 }
404
405 void
406 cam_periph_invalidate(struct cam_periph *periph)
407 {
408         /*
409          * We only call this routine the first time a peripheral is
410          * invalidated.  The oninvalidate() routine is always called in
411          * a critical section.
412          */
413         crit_enter();
414         if (((periph->flags & CAM_PERIPH_INVALID) == 0)
415          && (periph->periph_oninval != NULL))
416                 periph->periph_oninval(periph);
417
418         periph->flags |= CAM_PERIPH_INVALID;
419         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
420
421         if (periph->refcount == 0)
422                 camperiphfree(periph);
423         else if (periph->refcount < 0)
424                 kprintf("cam_invalidate_periph: refcount < 0!!\n");
425         crit_exit();
426 }
427
428 static void
429 camperiphfree(struct cam_periph *periph)
430 {
431         struct periph_driver **p_drv;
432
433         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
434                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
435                         break;
436         }
437
438         if (*p_drv == NULL) {
439                 kprintf("camperiphfree: attempt to free "
440                         "non-existent periph: %s\n", periph->periph_name);
441                 return;
442         }
443         
444         if (periph->periph_dtor != NULL)
445                 periph->periph_dtor(periph);
446         
447         crit_enter();
448         TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
449         (*p_drv)->generation++;
450         crit_exit();
451
452         xpt_remove_periph(periph);
453
454         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
455                 union ccb ccb;
456                 void *arg;
457
458                 switch (periph->deferred_ac) {
459                 case AC_FOUND_DEVICE:
460                         ccb.ccb_h.func_code = XPT_GDEV_TYPE;
461                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
462                         xpt_action(&ccb);
463                         arg = &ccb;
464                         break;
465                 case AC_PATH_REGISTERED:
466                         ccb.ccb_h.func_code = XPT_PATH_INQ;
467                         xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
468                         xpt_action(&ccb);
469                         arg = &ccb;
470                         break;
471                 default:
472                         arg = NULL;
473                         break;
474                 }
475                 periph->deferred_callback(NULL, periph->deferred_ac,
476                                           periph->path, arg);
477         }
478         xpt_free_path(periph->path);
479         kfree(periph, M_CAMPERIPH);
480 }
481
482 /*
483  * Wait interruptibly for an exclusive lock.
484  */
485 int
486 cam_periph_lock(struct cam_periph *periph, int flags)
487 {
488         int error;
489
490         /*
491          * Increment the reference count on the peripheral
492          * while we wait for our lock attempt to succeed
493          * to ensure the peripheral doesn't disappear out
494          * from under us while we sleep.
495          */
496         if (cam_periph_acquire(periph) != CAM_REQ_CMP)
497                 return(ENXIO);
498
499         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
500                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
501                 if ((error = tsleep(periph, flags, "caplck", 0)) != 0) {
502                         cam_periph_release(periph);
503                         return error;
504                 }
505         }
506
507         periph->flags |= CAM_PERIPH_LOCKED;
508         return 0;
509 }
510
511 /*
512  * Unlock and wake up any waiters.
513  */
514 void
515 cam_periph_unlock(struct cam_periph *periph)
516 {
517         periph->flags &= ~CAM_PERIPH_LOCKED;
518         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
519                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
520                 wakeup(periph);
521         }
522
523         cam_periph_release(periph);
524 }
525
526 /*
527  * Map user virtual pointers into kernel virtual address space, so we can
528  * access the memory.  This won't work on physical pointers, for now it's
529  * up to the caller to check for that.  (XXX KDM -- should we do that here
530  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
531  * buffers to map stuff in and out, we're limited to the buffer size.
532  */
533 int
534 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
535 {
536         int numbufs, i, j;
537         buf_cmd_t cmd[CAM_PERIPH_MAXMAPS];
538         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
539         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
540         u_int32_t dirs[CAM_PERIPH_MAXMAPS];
541
542         switch(ccb->ccb_h.func_code) {
543         case XPT_DEV_MATCH:
544                 if (ccb->cdm.match_buf_len == 0) {
545                         kprintf("cam_periph_mapmem: invalid match buffer "
546                                "length 0\n");
547                         return(EINVAL);
548                 }
549                 if (ccb->cdm.pattern_buf_len > 0) {
550                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
551                         lengths[0] = ccb->cdm.pattern_buf_len;
552                         dirs[0] = CAM_DIR_OUT;
553                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
554                         lengths[1] = ccb->cdm.match_buf_len;
555                         dirs[1] = CAM_DIR_IN;
556                         numbufs = 2;
557                 } else {
558                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
559                         lengths[0] = ccb->cdm.match_buf_len;
560                         dirs[0] = CAM_DIR_IN;
561                         numbufs = 1;
562                 }
563                 break;
564         case XPT_SCSI_IO:
565         case XPT_CONT_TARGET_IO:
566                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
567                         return(0);
568
569                 data_ptrs[0] = &ccb->csio.data_ptr;
570                 lengths[0] = ccb->csio.dxfer_len;
571                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
572                 numbufs = 1;
573                 break;
574         default:
575                 return(EINVAL);
576                 break; /* NOTREACHED */
577         }
578
579         /*
580          * Check the transfer length and permissions first, so we don't
581          * have to unmap any previously mapped buffers.
582          */
583         for (i = 0; i < numbufs; i++) {
584                 /*
585                  * Its kinda bogus, we need a R+W command.  For now the
586                  * buffer needs some sort of command.  Use BUF_CMD_WRITE
587                  * to indicate a write and BUF_CMD_READ to indicate R+W.
588                  */
589                 cmd[i] = BUF_CMD_WRITE;
590
591                 /*
592                  * The userland data pointer passed in may not be page
593                  * aligned.  vmapbuf() truncates the address to a page
594                  * boundary, so if the address isn't page aligned, we'll
595                  * need enough space for the given transfer length, plus
596                  * whatever extra space is necessary to make it to the page
597                  * boundary.
598                  */
599                 if ((lengths[i] +
600                     (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
601                         kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
602                                "which is greater than DFLTPHYS(%d)\n",
603                                (long)(lengths[i] +
604                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
605                                DFLTPHYS);
606                         return(E2BIG);
607                 }
608
609                 if (dirs[i] & CAM_DIR_OUT) {
610                         if (!useracc(*data_ptrs[i], lengths[i], 
611                                      VM_PROT_READ)) {
612                                 kprintf("cam_periph_mapmem: error, "
613                                         "address %p, length %lu isn't "
614                                         "user accessible for READ\n",
615                                         (void *)*data_ptrs[i],
616                                         (u_long)lengths[i]);
617                                 return(EACCES);
618                         }
619                 }
620
621                 if (dirs[i] & CAM_DIR_IN) {
622                         cmd[i] = BUF_CMD_READ;
623                         if (!useracc(*data_ptrs[i], lengths[i], 
624                                      VM_PROT_WRITE)) {
625                                 kprintf("cam_periph_mapmem: error, "
626                                         "address %p, length %lu isn't "
627                                         "user accessible for WRITE\n",
628                                         (void *)*data_ptrs[i],
629                                         (u_long)lengths[i]);
630
631                                 return(EACCES);
632                         }
633                 }
634
635         }
636
637         for (i = 0; i < numbufs; i++) {
638                 /*
639                  * Get the buffer.
640                  */
641                 mapinfo->bp[i] = getpbuf(NULL);
642
643                 /* save the original user pointer */
644                 mapinfo->saved_ptrs[i] = *data_ptrs[i];
645
646                 /* set the flags */
647                 mapinfo->bp[i]->b_cmd = cmd[i];
648
649                 /* map the user buffer into kernel memory */
650                 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i]) < 0) {
651                         kprintf("cam_periph_mapmem: error, "
652                                 "address %p, length %lu isn't "
653                                 "user accessible any more\n",
654                                 (void *)*data_ptrs[i],
655                                 (u_long)lengths[i]);
656                         for (j = 0; j < i; ++j) {
657                                 *data_ptrs[j] = mapinfo->saved_ptrs[j];
658                                 vunmapbuf(mapinfo->bp[j]);
659                                 relpbuf(mapinfo->bp[j], NULL);
660                         }
661                         mapinfo->num_bufs_used -= i;
662                         return(EACCES);
663                 }
664
665                 /* set our pointer to the new mapped area */
666                 *data_ptrs[i] = mapinfo->bp[i]->b_data;
667
668                 mapinfo->num_bufs_used++;
669         }
670
671         return(0);
672 }
673
674 /*
675  * Unmap memory segments mapped into kernel virtual address space by
676  * cam_periph_mapmem().
677  */
678 void
679 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
680 {
681         int numbufs, i;
682         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
683
684         if (mapinfo->num_bufs_used <= 0) {
685                 /* allow ourselves to be swapped once again */
686                 return;
687         }
688
689         switch (ccb->ccb_h.func_code) {
690         case XPT_DEV_MATCH:
691                 numbufs = min(mapinfo->num_bufs_used, 2);
692
693                 if (numbufs == 1) {
694                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
695                 } else {
696                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
697                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
698                 }
699                 break;
700         case XPT_SCSI_IO:
701         case XPT_CONT_TARGET_IO:
702                 data_ptrs[0] = &ccb->csio.data_ptr;
703                 numbufs = min(mapinfo->num_bufs_used, 1);
704                 break;
705         default:
706                 /* allow ourselves to be swapped once again */
707                 return;
708                 break; /* NOTREACHED */ 
709         }
710
711         for (i = 0; i < numbufs; i++) {
712                 /* Set the user's pointer back to the original value */
713                 *data_ptrs[i] = mapinfo->saved_ptrs[i];
714
715                 /* unmap the buffer */
716                 vunmapbuf(mapinfo->bp[i]);
717
718                 /* release the buffer */
719                 relpbuf(mapinfo->bp[i], NULL);
720         }
721
722         /* allow ourselves to be swapped once again */
723 }
724
725 union ccb *
726 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
727 {
728         struct ccb_hdr *ccb_h;
729
730         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
731
732         crit_enter();
733         
734         while (SLIST_FIRST(&periph->ccb_list) == NULL) {
735                 if (periph->immediate_priority > priority)
736                         periph->immediate_priority = priority;
737                 xpt_schedule(periph, priority);
738                 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
739                  && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
740                         break;
741                 tsleep(&periph->ccb_list, 0, "cgticb", 0);
742         }
743
744         ccb_h = SLIST_FIRST(&periph->ccb_list);
745         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
746         crit_exit();
747         return ((union ccb *)ccb_h);
748 }
749
750 void
751 cam_periph_ccbwait(union ccb *ccb)
752 {
753         crit_enter();
754         if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
755          || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
756                 tsleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0);
757         crit_exit();
758 }
759
760 int
761 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
762                  int (*error_routine)(union ccb *ccb, 
763                                       cam_flags camflags,
764                                       u_int32_t sense_flags))
765 {
766         union ccb            *ccb;
767         int                  error;
768         int                  found;
769
770         error = found = 0;
771
772         switch(cmd){
773         case CAMGETPASSTHRU:
774                 ccb = cam_periph_getccb(periph, /* priority */ 1);
775                 xpt_setup_ccb(&ccb->ccb_h,
776                               ccb->ccb_h.path,
777                               /*priority*/1);
778                 ccb->ccb_h.func_code = XPT_GDEVLIST;
779
780                 /*
781                  * Basically, the point of this is that we go through
782                  * getting the list of devices, until we find a passthrough
783                  * device.  In the current version of the CAM code, the
784                  * only way to determine what type of device we're dealing
785                  * with is by its name.
786                  */
787                 while (found == 0) {
788                         ccb->cgdl.index = 0;
789                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
790                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
791
792                                 /* we want the next device in the list */
793                                 xpt_action(ccb);
794                                 if (strncmp(ccb->cgdl.periph_name, 
795                                     "pass", 4) == 0){
796                                         found = 1;
797                                         break;
798                                 }
799                         }
800                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
801                             (found == 0)) {
802                                 ccb->cgdl.periph_name[0] = '\0';
803                                 ccb->cgdl.unit_number = 0;
804                                 break;
805                         }
806                 }
807
808                 /* copy the result back out */  
809                 bcopy(ccb, addr, sizeof(union ccb));
810
811                 /* and release the ccb */
812                 xpt_release_ccb(ccb);
813
814                 break;
815         default:
816                 error = ENOTTY;
817                 break;
818         }
819         return(error);
820 }
821
822 int
823 cam_periph_runccb(union ccb *ccb,
824                   int (*error_routine)(union ccb *ccb,
825                                        cam_flags camflags,
826                                        u_int32_t sense_flags),
827                   cam_flags camflags, u_int32_t sense_flags,
828                   struct devstat *ds)
829 {
830         int error;
831  
832         error = 0;
833         
834         /*
835          * If the user has supplied a stats structure, and if we understand
836          * this particular type of ccb, record the transaction start.
837          */
838         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
839                 devstat_start_transaction(ds);
840
841         xpt_action(ccb);
842  
843         do {
844                 cam_periph_ccbwait(ccb);
845                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
846                         error = 0;
847                 else if (error_routine != NULL)
848                         error = (*error_routine)(ccb, camflags, sense_flags);
849                 else
850                         error = 0;
851
852         } while (error == ERESTART);
853           
854         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 
855                 cam_release_devq(ccb->ccb_h.path,
856                                  /* relsim_flags */0,
857                                  /* openings */0,
858                                  /* timeout */0,
859                                  /* getcount_only */ FALSE);
860
861         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
862                 devstat_end_transaction(ds,
863                                         ccb->csio.dxfer_len,
864                                         ccb->csio.tag_action & 0xf,
865                                         ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
866                                         CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
867                                         (ccb->ccb_h.flags & CAM_DIR_OUT) ?
868                                         DEVSTAT_WRITE : 
869                                         DEVSTAT_READ);
870
871         return(error);
872 }
873
874 void
875 cam_freeze_devq(struct cam_path *path)
876 {
877         struct ccb_hdr ccb_h;
878
879         xpt_setup_ccb(&ccb_h, path, /*priority*/1);
880         ccb_h.func_code = XPT_NOOP;
881         ccb_h.flags = CAM_DEV_QFREEZE;
882         xpt_action((union ccb *)&ccb_h);
883 }
884
885 u_int32_t
886 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
887                  u_int32_t openings, u_int32_t timeout,
888                  int getcount_only)
889 {
890         struct ccb_relsim crs;
891
892         xpt_setup_ccb(&crs.ccb_h, path,
893                       /*priority*/1);
894         crs.ccb_h.func_code = XPT_REL_SIMQ;
895         crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
896         crs.release_flags = relsim_flags;
897         crs.openings = openings;
898         crs.release_timeout = timeout;
899         xpt_action((union ccb *)&crs);
900         return (crs.qfrozen_cnt);
901 }
902
903 #define saved_ccb_ptr ppriv_ptr0
904 static void
905 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
906 {
907         union ccb      *saved_ccb;
908         cam_status      status;
909         int             frozen;
910         int             sense;
911         struct scsi_start_stop_unit *scsi_cmd;
912         u_int32_t       relsim_flags, timeout;
913         u_int32_t       qfrozen_cnt;
914         int             xpt_done_ccb;
915
916         xpt_done_ccb = FALSE;
917         status = done_ccb->ccb_h.status;
918         frozen = (status & CAM_DEV_QFRZN) != 0;
919         sense  = (status & CAM_AUTOSNS_VALID) != 0;
920         status &= CAM_STATUS_MASK;
921
922         timeout = 0;
923         relsim_flags = 0;
924         saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
925
926         /* 
927          * Unfreeze the queue once if it is already frozen..
928          */
929         if (frozen != 0) {
930                 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
931                                               /*relsim_flags*/0,
932                                               /*openings*/0,
933                                               /*timeout*/0,
934                                               /*getcount_only*/0);
935         }
936
937         switch (status) {
938         case CAM_REQ_CMP:
939         {
940                 /*
941                  * If we have successfully taken a device from the not
942                  * ready to ready state, re-scan the device and re-get
943                  * the inquiry information.  Many devices (mostly disks)
944                  * don't properly report their inquiry information unless
945                  * they are spun up.
946                  *
947                  * If we manually retrieved sense into a CCB and got
948                  * something other than "NO SENSE" send the updated CCB
949                  * back to the client via xpt_done() to be processed via
950                  * the error recovery code again.
951                  */
952                 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
953                         scsi_cmd = (struct scsi_start_stop_unit *)
954                                         &done_ccb->csio.cdb_io.cdb_bytes;
955
956                         if (scsi_cmd->opcode == START_STOP_UNIT)
957                                 xpt_async(AC_INQ_CHANGED,
958                                           done_ccb->ccb_h.path, NULL);
959                         if (scsi_cmd->opcode == REQUEST_SENSE) {
960                                 u_int sense_key;
961
962                                 sense_key = saved_ccb->csio.sense_data.flags;
963                                 sense_key &= SSD_KEY;
964                                 if (sense_key != SSD_KEY_NO_SENSE) {
965                                         saved_ccb->ccb_h.status |=
966                                             CAM_AUTOSNS_VALID;
967 #if 0
968                                         xpt_print_path(saved_ccb->ccb_h.path);
969                                         kprintf("Recovered Sense\n");
970                                         scsi_sense_print(&saved_ccb->csio);
971                                         cam_error_print(saved_ccb, CAM_ESF_ALL,
972                                                         CAM_EPF_ALL);
973 #endif
974                                         xpt_done_ccb = TRUE;
975                                 }
976                         }
977                 }
978                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
979                       sizeof(union ccb));
980
981                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
982
983                 if (xpt_done_ccb == FALSE)
984                         xpt_action(done_ccb);
985
986                 break;
987         }
988         case CAM_SCSI_STATUS_ERROR:
989                 scsi_cmd = (struct scsi_start_stop_unit *)
990                                 &done_ccb->csio.cdb_io.cdb_bytes;
991                 if (sense != 0) {
992                         struct ccb_getdev cgd;
993                         struct scsi_sense_data *sense;
994                         int    error_code, sense_key, asc, ascq;        
995                         scsi_sense_action err_action;
996
997                         sense = &done_ccb->csio.sense_data;
998                         scsi_extract_sense(sense, &error_code, 
999                                            &sense_key, &asc, &ascq);
1000
1001                         /*
1002                          * Grab the inquiry data for this device.
1003                          */
1004                         xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1005                                       /*priority*/ 1);
1006                         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1007                         xpt_action((union ccb *)&cgd);
1008                         err_action = scsi_error_action(&done_ccb->csio,
1009                                                        &cgd.inq_data, 0);
1010
1011                         /*
1012                          * If the error is "invalid field in CDB", 
1013                          * and the load/eject flag is set, turn the 
1014                          * flag off and try again.  This is just in 
1015                          * case the drive in question barfs on the 
1016                          * load eject flag.  The CAM code should set 
1017                          * the load/eject flag by default for 
1018                          * removable media.
1019                          */
1020
1021                         /* XXX KDM 
1022                          * Should we check to see what the specific
1023                          * scsi status is??  Or does it not matter
1024                          * since we already know that there was an
1025                          * error, and we know what the specific
1026                          * error code was, and we know what the
1027                          * opcode is..
1028                          */
1029                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1030                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1031                              (asc == 0x24) && (ascq == 0x00) &&
1032                              (done_ccb->ccb_h.retry_count > 0)) {
1033
1034                                 scsi_cmd->how &= ~SSS_LOEJ;
1035
1036                                 xpt_action(done_ccb);
1037
1038                         } else if ((done_ccb->ccb_h.retry_count > 1)
1039                                 && ((err_action & SS_MASK) != SS_FAIL)) {
1040
1041                                 /*
1042                                  * In this case, the error recovery
1043                                  * command failed, but we've got 
1044                                  * some retries left on it.  Give
1045                                  * it another try unless this is an
1046                                  * unretryable error.
1047                                  */
1048
1049                                 /* set the timeout to .5 sec */
1050                                 relsim_flags =
1051                                         RELSIM_RELEASE_AFTER_TIMEOUT;
1052                                 timeout = 500;
1053
1054                                 xpt_action(done_ccb);
1055
1056                                 break;
1057
1058                         } else {
1059                                 /* 
1060                                  * Perform the final retry with the original
1061                                  * CCB so that final error processing is
1062                                  * performed by the owner of the CCB.
1063                                  */
1064                                 bcopy(done_ccb->ccb_h.saved_ccb_ptr,            
1065                                       done_ccb, sizeof(union ccb));
1066
1067                                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1068
1069                                 xpt_action(done_ccb);
1070                         }
1071                 } else {
1072                         /*
1073                          * Eh??  The command failed, but we don't
1074                          * have any sense.  What's up with that?
1075                          * Fire the CCB again to return it to the
1076                          * caller.
1077                          */
1078                         bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1079                               done_ccb, sizeof(union ccb));
1080
1081                         periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1082
1083                         xpt_action(done_ccb);
1084
1085                 }
1086                 break;
1087         default:
1088                 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1089                       sizeof(union ccb));
1090
1091                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1092
1093                 xpt_action(done_ccb);
1094
1095                 break;
1096         }
1097
1098         /* decrement the retry count */
1099         /*
1100          * XXX This isn't appropriate in all cases.  Restructure,
1101          *     so that the retry count is only decremented on an
1102          *     actual retry.  Remeber that the orignal ccb had its
1103          *     retry count dropped before entering recovery, so
1104          *     doing it again is a bug.
1105          */
1106         if (done_ccb->ccb_h.retry_count > 0)
1107                 done_ccb->ccb_h.retry_count--;
1108
1109         qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1110                                       /*relsim_flags*/relsim_flags,
1111                                       /*openings*/0,
1112                                       /*timeout*/timeout,
1113                                       /*getcount_only*/0);
1114         if (xpt_done_ccb == TRUE)
1115                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1116 }
1117
1118 /*
1119  * Generic Async Event handler.  Peripheral drivers usually
1120  * filter out the events that require personal attention,
1121  * and leave the rest to this function.
1122  */
1123 void
1124 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1125                  struct cam_path *path, void *arg)
1126 {
1127         switch (code) {
1128         case AC_LOST_DEVICE:
1129                 cam_periph_invalidate(periph);
1130                 break; 
1131         case AC_SENT_BDR:
1132         case AC_BUS_RESET:
1133         {
1134                 cam_periph_bus_settle(periph, scsi_delay);
1135                 break;
1136         }
1137         default:
1138                 break;
1139         }
1140 }
1141
1142 void
1143 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1144 {
1145         struct ccb_getdevstats cgds;
1146
1147         xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1148         cgds.ccb_h.func_code = XPT_GDEV_STATS;
1149         xpt_action((union ccb *)&cgds);
1150         cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1151 }
1152
1153 void
1154 cam_periph_freeze_after_event(struct cam_periph *periph,
1155                               struct timeval* event_time, u_int duration_ms)
1156 {
1157         struct timeval delta;
1158         struct timeval duration_tv;
1159
1160         microuptime(&delta);
1161         timevalsub(&delta, event_time);
1162         duration_tv.tv_sec = duration_ms / 1000;
1163         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1164         if (timevalcmp(&delta, &duration_tv, <)) {
1165                 timevalsub(&duration_tv, &delta);
1166
1167                 duration_ms = duration_tv.tv_sec * 1000;
1168                 duration_ms += duration_tv.tv_usec / 1000;
1169                 cam_freeze_devq(periph->path); 
1170                 cam_release_devq(periph->path,
1171                                 RELSIM_RELEASE_AFTER_TIMEOUT,
1172                                 /*reduction*/0,
1173                                 /*timeout*/duration_ms,
1174                                 /*getcount_only*/0);
1175         }
1176
1177 }
1178
1179 static int
1180 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1181                          u_int32_t sense_flags, union ccb *save_ccb,
1182                          int *openings, u_int32_t *relsim_flags,
1183                          u_int32_t *timeout)
1184 {
1185         int error;
1186
1187         switch (ccb->csio.scsi_status) {
1188         case SCSI_STATUS_OK:
1189         case SCSI_STATUS_COND_MET:
1190         case SCSI_STATUS_INTERMED:
1191         case SCSI_STATUS_INTERMED_COND_MET:
1192                 error = 0;
1193                 break;
1194         case SCSI_STATUS_CMD_TERMINATED:
1195         case SCSI_STATUS_CHECK_COND:
1196                 error = camperiphscsisenseerror(ccb,
1197                                                 camflags,
1198                                                 sense_flags,
1199                                                 save_ccb,
1200                                                 openings,
1201                                                 relsim_flags,
1202                                                 timeout);
1203                 break;
1204         case SCSI_STATUS_QUEUE_FULL:
1205         {
1206                 /* no decrement */
1207                 struct ccb_getdevstats cgds;
1208
1209                 /*
1210                  * First off, find out what the current
1211                  * transaction counts are.
1212                  */
1213                 xpt_setup_ccb(&cgds.ccb_h,
1214                               ccb->ccb_h.path,
1215                               /*priority*/1);
1216                 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1217                 xpt_action((union ccb *)&cgds);
1218
1219                 /*
1220                  * If we were the only transaction active, treat
1221                  * the QUEUE FULL as if it were a BUSY condition.
1222                  */
1223                 if (cgds.dev_active != 0) {
1224                         int total_openings;
1225
1226                         /*
1227                          * Reduce the number of openings to
1228                          * be 1 less than the amount it took
1229                          * to get a queue full bounded by the
1230                          * minimum allowed tag count for this
1231                          * device.
1232                          */
1233                         total_openings = cgds.dev_active + cgds.dev_openings;
1234                         *openings = cgds.dev_active;
1235                         if (*openings < cgds.mintags)
1236                                 *openings = cgds.mintags;
1237                         if (*openings < total_openings)
1238                                 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1239                         else {
1240                                 /*
1241                                  * Some devices report queue full for
1242                                  * temporary resource shortages.  For
1243                                  * this reason, we allow a minimum
1244                                  * tag count to be entered via a
1245                                  * quirk entry to prevent the queue
1246                                  * count on these devices from falling
1247                                  * to a pessimisticly low value.  We
1248                                  * still wait for the next successful
1249                                  * completion, however, before queueing
1250                                  * more transactions to the device.
1251                                  */
1252                                 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1253                         }
1254                         *timeout = 0;
1255                         error = ERESTART;
1256                         if (bootverbose) {
1257                                 xpt_print_path(ccb->ccb_h.path);
1258                                 kprintf("Queue Full\n");
1259                         }
1260                         break;
1261                 }
1262                 /* FALLTHROUGH */
1263         }
1264         case SCSI_STATUS_BUSY:
1265                 /*
1266                  * Restart the queue after either another
1267                  * command completes or a 1 second timeout.
1268                  */
1269                 if (bootverbose) {
1270                         xpt_print_path(ccb->ccb_h.path);
1271                         kprintf("Device Busy\n");
1272                 }
1273                 if (ccb->ccb_h.retry_count > 0) {
1274                         ccb->ccb_h.retry_count--;
1275                         error = ERESTART;
1276                         *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1277                                       | RELSIM_RELEASE_AFTER_CMDCMPLT;
1278                         *timeout = 1000;
1279                 } else {
1280                         error = EIO;
1281                 }
1282                 break;
1283         case SCSI_STATUS_RESERV_CONFLICT:
1284                 xpt_print_path(ccb->ccb_h.path);
1285                 kprintf("Reservation Conflict\n");
1286                 error = EIO;
1287                 break;
1288         default:
1289                 xpt_print_path(ccb->ccb_h.path);
1290                 kprintf("SCSI Status 0x%x\n", ccb->csio.scsi_status);
1291                 error = EIO;
1292                 break;
1293         }
1294         return (error);
1295 }
1296
1297 static int
1298 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1299                         u_int32_t sense_flags, union ccb *save_ccb,
1300                        int *openings, u_int32_t *relsim_flags,
1301                        u_int32_t *timeout)
1302 {
1303         struct cam_periph *periph;
1304         int error;
1305
1306         periph = xpt_path_periph(ccb->ccb_h.path);
1307         if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1308
1309                 /*
1310                  * If error recovery is already in progress, don't attempt
1311                  * to process this error, but requeue it unconditionally
1312                  * and attempt to process it once error recovery has
1313                  * completed.  This failed command is probably related to
1314                  * the error that caused the currently active error recovery
1315                  * action so our  current recovery efforts should also
1316                  * address this command.  Be aware that the error recovery
1317                  * code assumes that only one recovery action is in progress
1318                  * on a particular peripheral instance at any given time
1319                  * (e.g. only one saved CCB for error recovery) so it is
1320                  * imperitive that we don't violate this assumption.
1321                  */
1322                 error = ERESTART;
1323         } else {
1324                 scsi_sense_action err_action;
1325                 struct ccb_getdev cgd;
1326                 const char *action_string;
1327                 union ccb* print_ccb;
1328
1329                 /* A description of the error recovery action performed */
1330                 action_string = NULL;
1331
1332                 /*
1333                  * The location of the orignal ccb
1334                  * for sense printing purposes.
1335                  */
1336                 print_ccb = ccb;
1337
1338                 /*
1339                  * Grab the inquiry data for this device.
1340                  */
1341                 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1342                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1343                 xpt_action((union ccb *)&cgd);
1344
1345                 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1346                         err_action = scsi_error_action(&ccb->csio,
1347                                                        &cgd.inq_data,
1348                                                        sense_flags);
1349                 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1350                         err_action = SS_REQSENSE;
1351                 else
1352                         err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1353
1354                 error = err_action & SS_ERRMASK;
1355
1356                 /*
1357                  * If the recovery action will consume a retry,
1358                  * make sure we actually have retries available.
1359                  */
1360                 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1361                         if (ccb->ccb_h.retry_count > 0)
1362                                 ccb->ccb_h.retry_count--;
1363                         else {
1364                                 action_string = "Retries Exhausted";
1365                                 goto sense_error_done;
1366                         }
1367                 }
1368
1369                 if ((err_action & SS_MASK) >= SS_START) {
1370                         /*
1371                          * Do common portions of commands that
1372                          * use recovery CCBs.
1373                          */
1374                         if (save_ccb == NULL) {
1375                                 action_string = "No recovery CCB supplied";
1376                                 goto sense_error_done;
1377                         }
1378                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1379                         print_ccb = save_ccb;
1380                         periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1381                 }
1382
1383                 switch (err_action & SS_MASK) {
1384                 case SS_NOP:
1385                         action_string = "No Recovery Action Needed";
1386                         error = 0;
1387                         break;
1388                 case SS_RETRY:
1389                         action_string = "Retrying Command (per Sense Data)";
1390                         error = ERESTART;
1391                         break;
1392                 case SS_FAIL:
1393                         action_string = "Unretryable error";
1394                         break;
1395                 case SS_START:
1396                 {
1397                         int le;
1398
1399                         /*
1400                          * Send a start unit command to the device, and
1401                          * then retry the command.
1402                          */
1403                         action_string = "Attempting to Start Unit";
1404
1405                         /*
1406                          * Check for removable media and set
1407                          * load/eject flag appropriately.
1408                          */
1409                         if (SID_IS_REMOVABLE(&cgd.inq_data))
1410                                 le = TRUE;
1411                         else
1412                                 le = FALSE;
1413
1414                         scsi_start_stop(&ccb->csio,
1415                                         /*retries*/1,
1416                                         camperiphdone,
1417                                         MSG_SIMPLE_Q_TAG,
1418                                         /*start*/TRUE,
1419                                         /*load/eject*/le,
1420                                         /*immediate*/FALSE,
1421                                         SSD_FULL_SIZE,
1422                                         /*timeout*/50000);
1423                         break;
1424                 }
1425                 case SS_TUR:
1426                 {
1427                         /*
1428                          * Send a Test Unit Ready to the device.
1429                          * If the 'many' flag is set, we send 120
1430                          * test unit ready commands, one every half
1431                          * second.  Otherwise, we just send one TUR.
1432                          * We only want to do this if the retry
1433                          * count has not been exhausted.
1434                          */
1435                         int retries;
1436
1437                         if ((err_action & SSQ_MANY) != 0) {
1438                                 action_string = "Polling device for readiness";
1439                                 retries = 120;
1440                         } else {
1441                                 action_string = "Testing device for readiness";
1442                                 retries = 1;
1443                         }
1444                         scsi_test_unit_ready(&ccb->csio,
1445                                              retries,
1446                                              camperiphdone,
1447                                              MSG_SIMPLE_Q_TAG,
1448                                              SSD_FULL_SIZE,
1449                                              /*timeout*/5000);
1450
1451                         /*
1452                          * Accomplish our 500ms delay by deferring
1453                          * the release of our device queue appropriately.
1454                          */
1455                         *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1456                         *timeout = 500;
1457                         break;
1458                 }
1459                 case SS_REQSENSE:
1460                 {
1461                         /*
1462                          * Send a Request Sense to the device.  We
1463                          * assume that we are in a contingent allegiance
1464                          * condition so we do not tag this request.
1465                          */
1466                         scsi_request_sense(&ccb->csio, /*retries*/1,
1467                                            camperiphdone,
1468                                            &save_ccb->csio.sense_data,
1469                                            sizeof(save_ccb->csio.sense_data),
1470                                            CAM_TAG_ACTION_NONE,
1471                                            /*sense_len*/SSD_FULL_SIZE,
1472                                            /*timeout*/5000);
1473                         break;
1474                 }
1475                 default:
1476                         panic("Unhandled error action %x", err_action);
1477                 }
1478
1479                 if ((err_action & SS_MASK) >= SS_START) {
1480                         /*
1481                          * Drop the priority to 0 so that the recovery
1482                          * CCB is the first to execute.  Freeze the queue
1483                          * after this command is sent so that we can
1484                          * restore the old csio and have it queued in
1485                          * the proper order before we release normal
1486                          * transactions to the device.
1487                          */
1488                         ccb->ccb_h.pinfo.priority = 0;
1489                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1490                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1491                         error = ERESTART;
1492                 }
1493
1494 sense_error_done:
1495                 if ((err_action & SSQ_PRINT_SENSE) != 0
1496                  && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1497                         cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1498                         xpt_print_path(ccb->ccb_h.path);
1499                         if (bootverbose)
1500                                 scsi_sense_print(&print_ccb->csio);
1501                         kprintf("%s\n", action_string);
1502                 }
1503         }
1504         return (error);
1505 }
1506
1507 /*
1508  * Generic error handler.  Peripheral drivers usually filter
1509  * out the errors that they handle in a unique mannor, then
1510  * call this function.
1511  */
1512 int
1513 cam_periph_error(union ccb *ccb, cam_flags camflags,
1514                  u_int32_t sense_flags, union ccb *save_ccb)
1515 {
1516         const char *action_string;
1517         cam_status  status;
1518         int         frozen;
1519         int         error, printed = 0;
1520         int         openings;
1521         u_int32_t   relsim_flags;
1522         u_int32_t   timeout = 0;
1523
1524         action_string = NULL;
1525         status = ccb->ccb_h.status;
1526         frozen = (status & CAM_DEV_QFRZN) != 0;
1527         status &= CAM_STATUS_MASK;
1528         openings = relsim_flags = 0;
1529
1530         switch (status) {
1531         case CAM_REQ_CMP:
1532                 error = 0;
1533                 break;
1534         case CAM_SCSI_STATUS_ERROR:
1535                 error = camperiphscsistatuserror(ccb,
1536                                                  camflags,
1537                                                  sense_flags,
1538                                                  save_ccb,
1539                                                  &openings,
1540                                                  &relsim_flags,
1541                                                  &timeout);
1542                 break;
1543         case CAM_AUTOSENSE_FAIL:
1544                 xpt_print_path(ccb->ccb_h.path);
1545                 kprintf("AutoSense Failed\n");
1546                 error = EIO;    /* we have to kill the command */
1547                 break;
1548         case CAM_REQ_CMP_ERR:
1549                 if (bootverbose && printed == 0) {
1550                         xpt_print_path(ccb->ccb_h.path);
1551                         kprintf("Request completed with CAM_REQ_CMP_ERR\n");
1552                         printed++;
1553                 }
1554         case CAM_CMD_TIMEOUT:
1555                 if (bootverbose && printed == 0) {
1556                         xpt_print_path(ccb->ccb_h.path);
1557                         kprintf("Command timed out\n");
1558                         printed++;
1559                 }
1560         case CAM_UNEXP_BUSFREE:
1561                 if (bootverbose && printed == 0) {
1562                         xpt_print_path(ccb->ccb_h.path);
1563                         kprintf("Unexpected Bus Free\n");
1564                         printed++;
1565                 }
1566         case CAM_UNCOR_PARITY:
1567                 if (bootverbose && printed == 0) {
1568                         xpt_print_path(ccb->ccb_h.path);
1569                         kprintf("Uncorrected Parity Error\n");
1570                         printed++;
1571                 }
1572         case CAM_DATA_RUN_ERR:
1573                 if (bootverbose && printed == 0) {
1574                         xpt_print_path(ccb->ccb_h.path);
1575                         kprintf("Data Overrun\n");
1576                         printed++;
1577                 }
1578                 error = EIO;    /* we have to kill the command */
1579                 /* decrement the number of retries */
1580                 if (ccb->ccb_h.retry_count > 0) {
1581                         ccb->ccb_h.retry_count--;
1582                         error = ERESTART;
1583                 } else {
1584                         action_string = "Retries Exausted";
1585                         error = EIO;
1586                 }
1587                 break;
1588         case CAM_UA_ABORT:
1589         case CAM_UA_TERMIO:
1590         case CAM_MSG_REJECT_REC:
1591                 /* XXX Don't know that these are correct */
1592                 error = EIO;
1593                 break;
1594         case CAM_SEL_TIMEOUT:
1595         {
1596                 struct cam_path *newpath;
1597
1598                 if ((camflags & CAM_RETRY_SELTO) != 0) {
1599                         if (ccb->ccb_h.retry_count > 0) {
1600
1601                                 ccb->ccb_h.retry_count--;
1602                                 error = ERESTART;
1603                                 if (bootverbose && printed == 0) {
1604                                         xpt_print_path(ccb->ccb_h.path);
1605                                         kprintf("Selection Timeout\n");
1606                                         printed++;
1607                                 }
1608
1609                                 /*
1610                                  * Wait a bit to give the device
1611                                  * time to recover before we try again.
1612                                  */
1613                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1614                                 timeout = periph_selto_delay;
1615                                 break;
1616                         }
1617                 }
1618                 error = ENXIO;
1619                 /* Should we do more if we can't create the path?? */
1620                 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1621                                     xpt_path_path_id(ccb->ccb_h.path),
1622                                     xpt_path_target_id(ccb->ccb_h.path),
1623                                     CAM_LUN_WILDCARD) != CAM_REQ_CMP) 
1624                         break;
1625
1626                 /*
1627                  * Let peripheral drivers know that this device has gone
1628                  * away.
1629                  */
1630                 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1631                 xpt_free_path(newpath);
1632                 break;
1633         }
1634         case CAM_REQ_INVALID:
1635         case CAM_PATH_INVALID:
1636         case CAM_DEV_NOT_THERE:
1637         case CAM_NO_HBA:
1638         case CAM_PROVIDE_FAIL:
1639         case CAM_REQ_TOO_BIG:
1640         case CAM_LUN_INVALID:
1641         case CAM_TID_INVALID:
1642                 error = EINVAL;
1643                 break;
1644         case CAM_SCSI_BUS_RESET:
1645         case CAM_BDR_SENT:
1646                 /*
1647                  * Commands that repeatedly timeout and cause these
1648                  * kinds of error recovery actions, should return
1649                  * CAM_CMD_TIMEOUT, which allows us to safely assume
1650                  * that this command was an innocent bystander to
1651                  * these events and should be unconditionally
1652                  * retried.
1653                  */
1654                 if (bootverbose && printed == 0) {
1655                         xpt_print_path(ccb->ccb_h.path);
1656                         if (status == CAM_BDR_SENT)
1657                                 kprintf("Bus Device Reset sent\n");
1658                         else
1659                                 kprintf("Bus Reset issued\n");
1660                         printed++;
1661                 }
1662                 /* FALLTHROUGH */
1663         case CAM_REQUEUE_REQ:
1664                 /* Unconditional requeue */
1665                 error = ERESTART;
1666                 if (bootverbose && printed == 0) {
1667                         xpt_print_path(ccb->ccb_h.path);
1668                         kprintf("Request Requeued\n");
1669                         printed++;
1670                 }
1671                 break;
1672         case CAM_RESRC_UNAVAIL:
1673                 /* Wait a bit for the resource shortage to abate. */
1674                 timeout = periph_noresrc_delay;
1675                 /* FALLTHROUGH */
1676         case CAM_BUSY:
1677                 if (timeout == 0) {
1678                         /* Wait a bit for the busy condition to abate. */
1679                         timeout = periph_busy_delay;
1680                 }
1681                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1682                 /* FALLTHROUGH */
1683         default:
1684                 /* decrement the number of retries */
1685                 if (ccb->ccb_h.retry_count > 0) {
1686                         ccb->ccb_h.retry_count--;
1687                         error = ERESTART;
1688                         if (bootverbose && printed == 0) {
1689                                 xpt_print_path(ccb->ccb_h.path);
1690                                 kprintf("CAM Status 0x%x\n", status);
1691                                 printed++;
1692                         }
1693                 } else {
1694                         error = EIO;
1695                         action_string = "Retries Exhausted";
1696                 }
1697                 break;
1698         }
1699
1700         /* Attempt a retry */
1701         if (error == ERESTART || error == 0) {  
1702                 if (frozen != 0)
1703                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1704
1705                 if (error == ERESTART) {
1706                         action_string = "Retrying Command";
1707                         xpt_action(ccb);
1708                 }
1709                 
1710                 if (frozen != 0)
1711                         cam_release_devq(ccb->ccb_h.path,
1712                                          relsim_flags,
1713                                          openings,
1714                                          timeout,
1715                                          /*getcount_only*/0);
1716         }
1717
1718         /*
1719          * If we have an error and are booting verbosely, whine
1720          * *unless* this was a non-retryable selection timeout.
1721          */
1722         if (error != 0 && bootverbose &&
1723             !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1724
1725
1726                 if (action_string == NULL)
1727                         action_string = "Unretryable Error";
1728                 if (error != ERESTART) {
1729                         xpt_print_path(ccb->ccb_h.path);
1730                         kprintf("error %d\n", error);
1731                 }
1732                 xpt_print_path(ccb->ccb_h.path);
1733                 kprintf("%s\n", action_string);
1734         }
1735
1736         return (error);
1737 }