kernel - Add quirks for CORSAIR STRAFE_RGB keyboard
[dragonfly.git] / sys / bus / cam / cam_periph.c
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $
30  */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/devicestat.h>
41 #include <sys/bus.h>
42 #include <vm/vm.h>
43 #include <vm/vm_extern.h>
44
45 #include "cam.h"
46 #include "cam_ccb.h"
47 #include "cam_xpt_periph.h"
48 #include "cam_periph.h"
49 #include "cam_debug.h"
50 #include "cam_sim.h"
51
52 #include <bus/cam/scsi/scsi_all.h>
53 #include <bus/cam/scsi/scsi_message.h>
54 #include <bus/cam/scsi/scsi_pass.h>
55
56 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
57                                           u_int newunit, int wired,
58                                           path_id_t pathid, target_id_t target,
59                                           lun_id_t lun);
60 static  u_int           camperiphunit(struct periph_driver *p_drv,
61                                       struct cam_sim *sim, path_id_t pathid,
62                                       target_id_t target, lun_id_t lun);
63 static  void            camperiphdone(struct cam_periph *periph, 
64                                         union ccb *done_ccb);
65 static  void            camperiphfree(struct cam_periph *periph);
66 static int              camperiphscsistatuserror(union ccb *ccb,
67                                                  cam_flags camflags,
68                                                  u_int32_t sense_flags,
69                                                  union ccb *save_ccb,
70                                                  int *openings,
71                                                  u_int32_t *relsim_flags,
72                                                  u_int32_t *timeout);
73 static  int             camperiphscsisenseerror(union ccb *ccb,
74                                                 cam_flags camflags,
75                                                 u_int32_t sense_flags,
76                                                 union ccb *save_ccb,
77                                                 int *openings,
78                                                 u_int32_t *relsim_flags,
79                                                 u_int32_t *timeout);
80 static void cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo,
81                                  u_int8_t ***data_ptrs, int numbufs);
82
83 static int nperiph_drivers;
84 struct periph_driver **periph_drivers;
85
86 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
87
88 static int periph_selto_delay = 1000;
89 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
90 static int periph_noresrc_delay = 500;
91 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
92 static int periph_busy_delay = 500;
93 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
94
95 /*
96  * This is a horrible hack.  The CAM code was just bulk-copying the ccb
97  * to 'restore' it from the saved version.  This completely destroys list
98  * linkages and such, so hack the hack to not copy-over fields that cannot
99  * be safely copied over.
100  *
101  * This fixes list races when scsi errors occur simultaneously on multiple
102  * requests.
103  */
104 #define RESTORE_CCB(saved, ccbh, field) \
105         bcopy(&(saved)->field, &(ccbh)->field, sizeof((ccbh)->field))
106
107 #define saved_ccb_ptr ppriv_ptr0
108
109 static void
110 restore_ccb(struct ccb_hdr *ccb_h)
111 {
112         struct ccb_hdr *saved;
113
114         saved = ccb_h->saved_ccb_ptr;
115         bcopy(saved + 1, ccb_h + 1, sizeof(union ccb) - sizeof(*saved));
116         RESTORE_CCB(saved, ccb_h, retry_count);
117         RESTORE_CCB(saved, ccb_h, cbfcnp);
118         RESTORE_CCB(saved, ccb_h, func_code);
119         RESTORE_CCB(saved, ccb_h, status);
120         RESTORE_CCB(saved, ccb_h, path);
121         RESTORE_CCB(saved, ccb_h, path_id);
122         RESTORE_CCB(saved, ccb_h, target_id);
123         RESTORE_CCB(saved, ccb_h, target_lun);
124         RESTORE_CCB(saved, ccb_h, flags);
125         RESTORE_CCB(saved, ccb_h, periph_priv);
126         RESTORE_CCB(saved, ccb_h, sim_priv);
127         RESTORE_CCB(saved, ccb_h, timeout);
128 }
129
130 void
131 periphdriver_register(void *data)
132 {
133         struct periph_driver **newdrivers, **old;
134         int ndrivers;
135
136         ndrivers = nperiph_drivers + 2;
137         newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
138                              M_WAITOK);
139         if (periph_drivers)
140                 bcopy(periph_drivers, newdrivers,
141                       sizeof(*newdrivers) * nperiph_drivers);
142         newdrivers[nperiph_drivers] = (struct periph_driver *)data;
143         newdrivers[nperiph_drivers + 1] = NULL;
144         old = periph_drivers;
145         periph_drivers = newdrivers;
146         if (old)
147                 kfree(old, M_CAMPERIPH);
148         nperiph_drivers++;
149 }
150
151 cam_status
152 cam_periph_alloc(periph_ctor_t *periph_ctor,
153                  periph_oninv_t *periph_oninvalidate,
154                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
155                  char *name, cam_periph_type type, struct cam_path *path,
156                  ac_callback_t *ac_callback, ac_code code, void *arg)
157 {
158         struct          periph_driver **p_drv;
159         struct          cam_sim *sim;
160         struct          cam_periph *periph;
161         struct          cam_periph *cur_periph;
162         path_id_t       path_id;
163         target_id_t     target_id;
164         lun_id_t        lun_id;
165         cam_status      status;
166         u_int           init_level;
167
168         init_level = 0;
169         /*
170          * Handle Hot-Plug scenarios.  If there is already a peripheral
171          * of our type assigned to this path, we are likely waiting for
172          * final close on an old, invalidated, peripheral.  If this is
173          * the case, queue up a deferred call to the peripheral's async
174          * handler.  If it looks like a mistaken re-allocation, complain.
175          */
176         if ((periph = cam_periph_find(path, name)) != NULL) {
177
178                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
179                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
180                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
181                         periph->deferred_callback = ac_callback;
182                         periph->deferred_ac = code;
183                         return (CAM_REQ_INPROG);
184                 } else {
185                         kprintf("cam_periph_alloc: attempt to re-allocate "
186                                "valid device %s%d rejected\n",
187                                periph->periph_name, periph->unit_number);
188                 }
189                 return (CAM_REQ_INVALID);
190         }
191         
192         periph = kmalloc(sizeof(*periph), M_CAMPERIPH, M_INTWAIT | M_ZERO);
193         
194         init_level++;   /* 1 */
195
196         xpt_lock_buses();
197         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
198                 if (strcmp((*p_drv)->driver_name, name) == 0)
199                         break;
200         }
201         xpt_unlock_buses();
202
203         sim = xpt_path_sim(path);
204         CAM_SIM_LOCK(sim);
205         path_id = xpt_path_path_id(path);
206         target_id = xpt_path_target_id(path);
207         lun_id = xpt_path_lun_id(path);
208         cam_init_pinfo(&periph->pinfo);
209         periph->periph_start = periph_start;
210         periph->periph_dtor = periph_dtor;
211         periph->periph_oninval = periph_oninvalidate;
212         periph->type = type;
213         periph->periph_name = name;
214         periph->immediate_priority = CAM_PRIORITY_NONE;
215         periph->refcount = 0;
216         periph->sim = sim;
217         SLIST_INIT(&periph->ccb_list);
218         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
219         if (status != CAM_REQ_CMP)
220                 goto failure;
221
222         init_level++;   /* 2 */
223
224         periph->path = path;
225
226         /*
227          * Finalize with buses locked.  Allocate unit number and add to
228          * list to reserve the unit number.  Undo later if the XPT fails.
229          */
230         xpt_lock_buses();
231         periph->unit_number = camperiphunit(*p_drv, sim, path_id,
232                                             target_id, lun_id);
233         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
234         while (cur_periph != NULL &&
235                cur_periph->unit_number < periph->unit_number) {
236                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
237         }
238         if (cur_periph != NULL) {
239                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
240         } else {
241                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
242                 (*p_drv)->generation++;
243         }
244         xpt_unlock_buses();
245
246         status = xpt_add_periph(periph);
247
248         if (status != CAM_REQ_CMP)
249                 goto failure;
250
251         init_level++;   /* 3 */
252
253         status = periph_ctor(periph, arg);
254
255         if (status == CAM_REQ_CMP)
256                 init_level++; /* 4 */
257
258 failure:
259         switch (init_level) {
260         case 4:
261                 /* Initialized successfully */
262                 CAM_SIM_UNLOCK(sim);
263                 break;
264         case 3:
265         case 2:
266                 xpt_lock_buses();
267                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
268                 xpt_unlock_buses();
269                 if (init_level == 3)
270                         xpt_remove_periph(periph);
271                 periph->path = NULL;
272                 /* FALLTHROUGH */
273         case 1:
274                 CAM_SIM_UNLOCK(sim);    /* sim was retrieved from path */
275                 xpt_free_path(path);
276                 kfree(periph, M_CAMPERIPH);
277                 /* FALLTHROUGH */
278         case 0:
279                 /* No cleanup to perform. */
280                 break;
281         default:
282                 panic("cam_periph_alloc: Unknown init level");
283         }
284         return(status);
285 }
286
287 /*
288  * Find a peripheral structure with the specified path, target, lun, 
289  * and (optionally) type.  If the name is NULL, this function will return
290  * the first peripheral driver that matches the specified path.
291  */
292 struct cam_periph *
293 cam_periph_find(struct cam_path *path, char *name)
294 {
295         struct periph_driver **p_drv;
296         struct cam_periph *periph;
297
298         xpt_lock_buses();
299         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
300                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
301                         continue;
302
303                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
304                         if (xpt_path_comp(periph->path, path) == 0) {
305                                 xpt_unlock_buses();
306                                 return(periph);
307                         }
308                 }
309                 if (name != NULL) {
310                         xpt_unlock_buses();
311                         return(NULL);
312                 }
313         }
314         xpt_unlock_buses();
315         return(NULL);
316 }
317
318 cam_status
319 cam_periph_acquire(struct cam_periph *periph)
320 {
321         if (periph == NULL)
322                 return(CAM_REQ_CMP_ERR);
323
324         xpt_lock_buses();
325         periph->refcount++;
326         xpt_unlock_buses();
327
328         return(CAM_REQ_CMP);
329 }
330
331 /*
332  * Release the peripheral.  The XPT is not locked and the SIM may or may
333  * not be locked on entry.
334  *
335  * The last release on a peripheral marked invalid frees it.  In this
336  * case we must be sure to hold both the XPT lock and the SIM lock,
337  * requiring a bit of fancy footwork if the SIM lock already happens
338  * to be held.
339  */
340 void
341 cam_periph_release(struct cam_periph *periph)
342 {
343         struct cam_sim *sim;
344         int doun;
345
346         while (periph) {
347                 /*
348                  * First try the critical path case
349                  */
350                 sim = periph->sim;
351                 xpt_lock_buses();
352                 if ((periph->flags & CAM_PERIPH_INVALID) == 0 ||
353                     periph->refcount != 1) {
354                         --periph->refcount;
355                         xpt_unlock_buses();
356                         break;
357                 }
358
359                 /*
360                  * Otherwise we also need to free the peripheral and must
361                  * acquire the sim lock and xpt lock in the correct order
362                  * to do so.
363                  *
364                  * The condition must be re-checked after the locks have
365                  * been reacquired.
366                  */
367                 xpt_unlock_buses();
368                 doun = CAM_SIM_COND_LOCK(sim);
369                 xpt_lock_buses();
370                 --periph->refcount;
371                 if ((periph->flags & CAM_PERIPH_INVALID) &&
372                     periph->refcount == 0) {
373                         camperiphfree(periph);
374                 }
375                 xpt_unlock_buses();
376                 CAM_SIM_COND_UNLOCK(sim, doun);
377                 break;
378         }
379 }
380
381 int
382 cam_periph_hold(struct cam_periph *periph, int flags)
383 {
384         int error;
385
386         sim_lock_assert_owned(periph->sim->lock);
387
388         /*
389          * Increment the reference count on the peripheral
390          * while we wait for our lock attempt to succeed
391          * to ensure the peripheral doesn't disappear out
392          * from user us while we sleep.
393          */
394
395         if (cam_periph_acquire(periph) != CAM_REQ_CMP)
396                 return (ENXIO);
397
398         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
399                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
400                 if ((error = sim_lock_sleep(periph, flags, "caplck", 0,
401                                             periph->sim->lock)) != 0) {
402                         cam_periph_release(periph);
403                         return (error);
404                 }
405         }
406
407         periph->flags |= CAM_PERIPH_LOCKED;
408         return (0);
409 }
410
411 void
412 cam_periph_unhold(struct cam_periph *periph, int unlock)
413 {
414         struct cam_sim *sim;
415
416         sim_lock_assert_owned(periph->sim->lock);
417         periph->flags &= ~CAM_PERIPH_LOCKED;
418         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
419                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
420                 wakeup(periph);
421         }
422         if (unlock) {
423                 sim = periph->sim;
424                 cam_periph_release(periph);
425                 /* periph may be garbage now */
426                 CAM_SIM_UNLOCK(sim);
427         } else {
428                 cam_periph_release(periph);
429         }
430 }
431
432 /*
433  * Look for the next unit number that is not currently in use for this
434  * peripheral type starting at "newunit".  Also exclude unit numbers that
435  * are reserved by for future "hardwiring" unless we already know that this
436  * is a potential wired device.  Only assume that the device is "wired" the
437  * first time through the loop since after that we'll be looking at unit
438  * numbers that did not match a wiring entry.
439  */
440 static u_int
441 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
442                   path_id_t pathid, target_id_t target, lun_id_t lun)
443 {
444         struct  cam_periph *periph;
445         char    *periph_name;
446         int     i, val, dunit;
447         const char *dname, *strval;
448
449         periph_name = p_drv->driver_name;
450         for (;;) {
451                 for (periph = TAILQ_FIRST(&p_drv->units);
452                      periph != NULL && periph->unit_number != newunit;
453                      periph = TAILQ_NEXT(periph, unit_links))
454                         ;
455
456                 if (periph != NULL && periph->unit_number == newunit) {
457                         if (wired != 0) {
458                                 xpt_print(periph->path, "Duplicate Wired "
459                                     "Device entry!\n");
460                                 xpt_print(periph->path, "Second device (%s "
461                                     "device at scbus%d target %d lun %d) will "
462                                     "not be wired\n", periph_name, pathid,
463                                     target, lun);
464                                 wired = 0;
465                         }
466                         ++newunit;
467                         continue;
468                 }
469                 if (wired)
470                         break;
471
472                 /*
473                  * Don't match entries like "da 4" as a wired down
474                  * device, but do match entries like "da 4 target 5"
475                  * or even "da 4 scbus 1". 
476                  */
477                 i = -1;
478                 while ((i = resource_locate(i, periph_name)) != -1) {
479                         dname = resource_query_name(i);
480                         dunit = resource_query_unit(i);
481                         /* if no "target" and no specific scbus, skip */
482                         if (resource_int_value(dname, dunit, "target", &val) &&
483                             (resource_string_value(dname, dunit, "at",&strval)||
484                              strcmp(strval, "scbus") == 0)) {
485                                 continue;
486                         }
487                         if (newunit == dunit)
488                                 break;
489                 }
490                 if (i == -1)
491                         break;
492                 ++newunit;
493         }
494         return (newunit);
495 }
496
497 static u_int
498 camperiphunit(struct periph_driver *p_drv,
499               struct cam_sim *sim, path_id_t pathid,
500               target_id_t target, lun_id_t lun)
501 {
502         u_int   unit;
503         int     hit, i, val, dunit;
504         const char *dname, *strval;
505         char    pathbuf[32], *periph_name;
506
507         unit = 0;
508
509         periph_name = p_drv->driver_name;
510         ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
511         i = -1;
512         for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
513                 dname = resource_query_name(i);
514                 dunit = resource_query_unit(i);
515                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
516                         if (strcmp(strval, pathbuf) != 0)
517                                 continue;
518                         hit++;
519                 }
520                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
521                         if (val != target)
522                                 continue;
523                         hit++;
524                 }
525                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
526                         if (val != lun)
527                                 continue;
528                         hit++;
529                 }
530                 if (hit != 0) {
531                         unit = dunit;
532                         break;
533                 }
534         }
535
536         /*
537          * If no wired units are in the kernel config do an auto unit
538          * start selection.  We want usb mass storage out of the way
539          * so it doesn't steal low numbered da%d slots from ahci, sili,
540          * or other scsi attachments.
541          */
542         if (hit == 0 && sim) {
543                 if (strncmp(sim->sim_name, "umass", 4) == 0 && unit < 8)
544                         unit = 8;
545         }
546
547         /*
548          * Either start from 0 looking for the next unit or from
549          * the unit number given in the resource config.  This way,
550          * if we have wildcard matches, we don't return the same
551          * unit number twice.
552          */
553         unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
554                                  target, lun);
555
556         return (unit);
557 }
558
559 void
560 cam_periph_invalidate(struct cam_periph *periph)
561 {
562         /*
563          * We only call this routine the first time a peripheral is
564          * invalidated.
565          */
566         if (((periph->flags & CAM_PERIPH_INVALID) == 0)
567          && (periph->periph_oninval != NULL))
568                 periph->periph_oninval(periph);
569
570         periph->flags |= CAM_PERIPH_INVALID;
571         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
572
573         xpt_lock_buses();
574         if (periph->refcount == 0)
575                 camperiphfree(periph);
576         else if (periph->refcount < 0)
577                 kprintf("cam_invalidate_periph: refcount < 0!!\n");
578         xpt_unlock_buses();
579 }
580
581 static void
582 camperiphfree(struct cam_periph *periph)
583 {
584         struct periph_driver **p_drv;
585
586         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
587                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
588                         break;
589         }
590
591         if (*p_drv == NULL) {
592                 kprintf("camperiphfree: attempt to free non-existent periph\n");
593                 return;
594         }
595
596         TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
597         (*p_drv)->generation++;
598         xpt_unlock_buses();
599
600         if (periph->periph_dtor != NULL)
601                 periph->periph_dtor(periph);
602         xpt_remove_periph(periph);
603
604         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
605                 union ccb *ccb = xpt_alloc_ccb();
606                 void *arg;
607
608                 switch (periph->deferred_ac) {
609                 case AC_FOUND_DEVICE:
610                         ccb->ccb_h.func_code = XPT_GDEV_TYPE;
611                         xpt_setup_ccb(&ccb->ccb_h, periph->path, /*priority*/1);
612                         xpt_action(ccb);
613                         arg = ccb;
614                         break;
615                 case AC_PATH_REGISTERED:
616                         ccb->ccb_h.func_code = XPT_PATH_INQ;
617                         xpt_setup_ccb(&ccb->ccb_h, periph->path, /*priority*/1);
618                         xpt_action(ccb);
619                         arg = ccb;
620                         break;
621                 default:
622                         arg = NULL;
623                         break;
624                 }
625                 periph->deferred_callback(NULL, periph->deferred_ac,
626                                           periph->path, arg);
627                 xpt_free_ccb(&ccb->ccb_h);
628         }
629         xpt_free_path(periph->path);
630         kfree(periph, M_CAMPERIPH);
631         xpt_lock_buses();
632 }
633
634 /*
635  * We don't map user pointers into KVM, instead we use pbufs.
636  *
637  * This won't work on physical pointers(?OLD), for now it's
638  * up to the caller to check for that.  (XXX KDM -- should we do that here
639  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
640  * buffers to map stuff in and out, we're limited to the buffer size.
641  */
642 int
643 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
644 {
645         buf_cmd_t cmd[CAM_PERIPH_MAXMAPS];
646         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
647         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
648         int numbufs;
649         int error;
650         int i;
651         struct buf *bp;
652
653         switch(ccb->ccb_h.func_code) {
654         case XPT_DEV_MATCH:
655                 if (ccb->cdm.match_buf_len == 0) {
656                         kprintf("cam_periph_mapmem: invalid match buffer "
657                                "length 0\n");
658                         return(EINVAL);
659                 }
660                 if (ccb->cdm.pattern_buf_len > 0) {
661                         data_ptrs[0] = (void *)&ccb->cdm.patterns;
662                         lengths[0] = ccb->cdm.pattern_buf_len;
663                         mapinfo->dirs[0] = CAM_DIR_OUT;
664                         data_ptrs[1] = (void *)&ccb->cdm.matches;
665                         lengths[1] = ccb->cdm.match_buf_len;
666                         mapinfo->dirs[1] = CAM_DIR_IN;
667                         numbufs = 2;
668                 } else {
669                         data_ptrs[0] = (void *)&ccb->cdm.matches;
670                         lengths[0] = ccb->cdm.match_buf_len;
671                         mapinfo->dirs[0] = CAM_DIR_IN;
672                         numbufs = 1;
673                 }
674                 break;
675         case XPT_SCSI_IO:
676         case XPT_CONT_TARGET_IO:
677                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
678                         return(0);
679
680                 data_ptrs[0] = &ccb->csio.data_ptr;
681                 lengths[0] = ccb->csio.dxfer_len;
682                 mapinfo->dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
683                 numbufs = 1;
684                 break;
685         default:
686                 return(EINVAL);
687                 break; /* NOTREACHED */
688         }
689
690         /*
691          * Check the transfer length and permissions first, so we don't
692          * have to unmap any previously mapped buffers.
693          */
694         for (i = 0; i < numbufs; i++) {
695                 /*
696                  * Its kinda bogus, we need a R+W command.  For now the
697                  * buffer needs some sort of command.  Use BUF_CMD_WRITE
698                  * to indicate a write and BUF_CMD_READ to indicate R+W.
699                  */
700                 cmd[i] = BUF_CMD_WRITE;
701
702                 if (lengths[i] > MAXPHYS) {
703                         kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
704                                "which is greater than MAXPHYS(%d)\n",
705                                (long)(lengths[i] +
706                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
707                                MAXPHYS);
708                         return(E2BIG);
709                 }
710
711                 if (mapinfo->dirs[i] & CAM_DIR_OUT) {
712                         if (!useracc(*data_ptrs[i], lengths[i], 
713                                      VM_PROT_READ)) {
714                                 kprintf("cam_periph_mapmem: error, "
715                                         "address %p, length %lu isn't "
716                                         "user accessible for READ\n",
717                                         (void *)*data_ptrs[i],
718                                         (u_long)lengths[i]);
719                                 return(EACCES);
720                         }
721                 }
722
723                 if (mapinfo->dirs[i] & CAM_DIR_IN) {
724                         cmd[i] = BUF_CMD_READ;
725                         if (!useracc(*data_ptrs[i], lengths[i], 
726                                      VM_PROT_WRITE)) {
727                                 kprintf("cam_periph_mapmem: error, "
728                                         "address %p, length %lu isn't "
729                                         "user accessible for WRITE\n",
730                                         (void *)*data_ptrs[i],
731                                         (u_long)lengths[i]);
732
733                                 return(EACCES);
734                         }
735                 }
736
737         }
738
739         for (i = 0; i < numbufs; i++) {
740                 /*
741                  * Get the buffer.
742                  */
743                 bp = getpbuf_mem(NULL);
744
745                 /* save the original user pointer */
746                 mapinfo->saved_ptrs[i] = *data_ptrs[i];
747
748                 /* set the flags */
749                 bp->b_cmd = cmd[i];
750
751                 /*
752                  * Always bounce the I/O through kernel memory.
753                  */
754                 bp->b_bcount = lengths[i];
755                 if (mapinfo->dirs[i] & CAM_DIR_OUT) {
756                         error = copyin(*data_ptrs[i], bp->b_data, bp->b_bcount);
757                 } else {
758                         error = 0;
759                 }
760                 if (error) {
761                         relpbuf(bp, NULL);
762                         cam_periph_unmapbufs(mapinfo, data_ptrs, i);
763                         mapinfo->num_bufs_used -= i;
764                         return(error);
765                 }
766
767                 /* set our pointer to the new mapped area */
768                 *data_ptrs[i] = bp->b_data;
769
770                 mapinfo->bp[i] = bp;
771                 mapinfo->num_bufs_used++;
772         }
773
774         return(0);
775 }
776
777 /*
778  * Unmap memory segments mapped into kernel virtual address space by
779  * cam_periph_mapmem().
780  */
781 void
782 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
783 {
784         int numbufs;
785         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
786
787         if (mapinfo->num_bufs_used <= 0) {
788                 /* allow ourselves to be swapped once again */
789                 return;
790         }
791
792         switch (ccb->ccb_h.func_code) {
793         case XPT_DEV_MATCH:
794                 numbufs = min(mapinfo->num_bufs_used, 2);
795
796                 if (numbufs == 1) {
797                         data_ptrs[0] = (void *)&ccb->cdm.matches;
798                 } else {
799                         data_ptrs[0] = (void *)&ccb->cdm.patterns;
800                         data_ptrs[1] = (void *)&ccb->cdm.matches;
801                 }
802                 break;
803         case XPT_SCSI_IO:
804         case XPT_CONT_TARGET_IO:
805                 data_ptrs[0] = &ccb->csio.data_ptr;
806                 numbufs = min(mapinfo->num_bufs_used, 1);
807                 break;
808         default:
809                 /* allow ourselves to be swapped once again */
810                 return;
811                 break; /* NOTREACHED */ 
812         }
813         cam_periph_unmapbufs(mapinfo, data_ptrs, numbufs);
814 }
815
816 static void
817 cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo,
818                      u_int8_t ***data_ptrs, int numbufs)
819 {
820         struct buf *bp;
821         int i;
822
823         for (i = 0; i < numbufs; i++) {
824                 bp = mapinfo->bp[i];
825
826                 /* Set the user's pointer back to the original value */
827                 *data_ptrs[i] = mapinfo->saved_ptrs[i];
828
829                 if (mapinfo->dirs[i] & CAM_DIR_IN) {
830                         /* XXX return error */
831                         copyout(bp->b_data, *data_ptrs[i], bp->b_bcount);
832                 }
833                 relpbuf(bp, NULL);
834                 mapinfo->bp[i] = NULL;
835         }
836 }
837
838 union ccb *
839 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
840 {
841         struct ccb_hdr *ccb_h;
842
843         sim_lock_assert_owned(periph->sim->lock);
844         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
845
846         while (SLIST_FIRST(&periph->ccb_list) == NULL) {
847                 if (periph->immediate_priority > priority)
848                         periph->immediate_priority = priority;
849                 xpt_schedule(periph, priority);
850                 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
851                  && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
852                         break;
853                 sim_lock_sleep(&periph->ccb_list, 0, "cgticb", 0,
854                                periph->sim->lock);
855         }
856
857         ccb_h = SLIST_FIRST(&periph->ccb_list);
858         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
859         return ((union ccb *)ccb_h);
860 }
861
862 void
863 cam_periph_ccbwait(union ccb *ccb)
864 {
865         struct cam_sim *sim;
866
867         sim = xpt_path_sim(ccb->ccb_h.path);
868         while ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
869          || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) {
870                 sim_lock_sleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0, sim->lock);
871         }
872 }
873
874 int
875 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
876                  int (*error_routine)(union ccb *ccb, 
877                                       cam_flags camflags,
878                                       u_int32_t sense_flags))
879 {
880         union ccb            *ccb;
881         int                  error;
882         int                  found;
883
884         error = found = 0;
885
886         switch(cmd){
887         case CAMGETPASSTHRU:
888                 ccb = cam_periph_getccb(periph, /* priority */ 1);
889                 xpt_setup_ccb(&ccb->ccb_h,
890                               ccb->ccb_h.path,
891                               /*priority*/1);
892                 ccb->ccb_h.func_code = XPT_GDEVLIST;
893
894                 /*
895                  * Basically, the point of this is that we go through
896                  * getting the list of devices, until we find a passthrough
897                  * device.  In the current version of the CAM code, the
898                  * only way to determine what type of device we're dealing
899                  * with is by its name.
900                  */
901                 while (found == 0) {
902                         ccb->cgdl.index = 0;
903                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
904                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
905
906                                 /* we want the next device in the list */
907                                 xpt_action(ccb);
908                                 if (strncmp(ccb->cgdl.periph_name, 
909                                     "pass", 4) == 0){
910                                         found = 1;
911                                         break;
912                                 }
913                         }
914                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
915                             (found == 0)) {
916                                 ccb->cgdl.periph_name[0] = '\0';
917                                 ccb->cgdl.unit_number = 0;
918                                 break;
919                         }
920                 }
921
922                 /* copy the result back out */  
923                 bcopy(ccb, addr, sizeof(union ccb));
924
925                 /* and release the ccb */
926                 xpt_release_ccb(ccb);
927
928                 break;
929         default:
930                 error = ENOTTY;
931                 break;
932         }
933         return(error);
934 }
935
936 int
937 cam_periph_runccb(union ccb *ccb,
938                   int (*error_routine)(union ccb *ccb,
939                                        cam_flags camflags,
940                                        u_int32_t sense_flags),
941                   cam_flags camflags, u_int32_t sense_flags,
942                   struct devstat *ds)
943 {
944         struct cam_sim *sim;
945         int error;
946  
947         error = 0;
948         sim = xpt_path_sim(ccb->ccb_h.path);
949         sim_lock_assert_owned(sim->lock);
950
951         /*
952          * If the user has supplied a stats structure, and if we understand
953          * this particular type of ccb, record the transaction start.
954          */
955         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
956                 devstat_start_transaction(ds);
957
958         xpt_action(ccb);
959  
960         do {
961                 cam_periph_ccbwait(ccb);
962                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
963                         error = 0;
964                 else if (error_routine != NULL)
965                         error = (*error_routine)(ccb, camflags, sense_flags);
966                 else
967                         error = 0;
968
969         } while (error == ERESTART);
970           
971         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 
972                 cam_release_devq(ccb->ccb_h.path,
973                                  /* relsim_flags */0,
974                                  /* openings */0,
975                                  /* timeout */0,
976                                  /* getcount_only */ FALSE);
977
978         if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
979                 devstat_end_transaction(ds,
980                                         ccb->csio.dxfer_len,
981                                         ccb->csio.tag_action & 0xf,
982                                         ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
983                                         CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
984                                         (ccb->ccb_h.flags & CAM_DIR_OUT) ?
985                                         DEVSTAT_WRITE : 
986                                         DEVSTAT_READ);
987
988         return(error);
989 }
990
991 void
992 cam_freeze_devq(struct cam_path *path)
993 {
994         struct ccb_hdr *ccb_h;
995
996         ccb_h = &xpt_alloc_ccb()->ccb_h;
997         xpt_setup_ccb(ccb_h, path, /*priority*/1);
998         ccb_h->func_code = XPT_NOOP;
999         ccb_h->flags = CAM_DEV_QFREEZE;
1000         xpt_action((union ccb *)ccb_h);
1001
1002         xpt_free_ccb(ccb_h);
1003 }
1004
1005 u_int32_t
1006 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1007                  u_int32_t openings, u_int32_t timeout,
1008                  int getcount_only)
1009 {
1010         struct ccb_relsim *crs;
1011         uint32_t cnt;
1012
1013         crs = &xpt_alloc_ccb()->crs;
1014
1015         xpt_setup_ccb(&crs->ccb_h, path, /*priority*/1);
1016         crs->ccb_h.func_code = XPT_REL_SIMQ;
1017         crs->ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1018         crs->release_flags = relsim_flags;
1019         crs->openings = openings;
1020         crs->release_timeout = timeout;
1021         xpt_action((union ccb *)crs);
1022         cnt = crs->qfrozen_cnt;
1023
1024         xpt_free_ccb(&crs->ccb_h);
1025
1026         return cnt;
1027 }
1028
1029 static void
1030 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1031 {
1032         union ccb      *saved_ccb;
1033         cam_status      status;
1034         int             frozen;
1035         int             sense;
1036         struct scsi_start_stop_unit *scsi_cmd;
1037         u_int32_t       relsim_flags, timeout;
1038         u_int32_t       qfrozen_cnt;
1039         int             xpt_done_ccb;
1040
1041         xpt_done_ccb = FALSE;
1042         status = done_ccb->ccb_h.status;
1043         frozen = (status & CAM_DEV_QFRZN) != 0;
1044         sense  = (status & CAM_AUTOSNS_VALID) != 0;
1045         status &= CAM_STATUS_MASK;
1046
1047         timeout = 0;
1048         relsim_flags = 0;
1049         saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1050
1051         /* 
1052          * Unfreeze the queue once if it is already frozen..
1053          */
1054         if (frozen != 0) {
1055                 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1056                                               /*relsim_flags*/0,
1057                                               /*openings*/0,
1058                                               /*timeout*/0,
1059                                               /*getcount_only*/0);
1060         }
1061
1062         switch (status) {
1063         case CAM_REQ_CMP:
1064         {
1065                 /*
1066                  * If we have successfully taken a device from the not
1067                  * ready to ready state, re-scan the device and re-get
1068                  * the inquiry information.  Many devices (mostly disks)
1069                  * don't properly report their inquiry information unless
1070                  * they are spun up.
1071                  *
1072                  * If we manually retrieved sense into a CCB and got
1073                  * something other than "NO SENSE" send the updated CCB
1074                  * back to the client via xpt_done() to be processed via
1075                  * the error recovery code again.
1076                  */
1077                 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
1078                         scsi_cmd = (struct scsi_start_stop_unit *)
1079                                         &done_ccb->csio.cdb_io.cdb_bytes;
1080
1081                         if (scsi_cmd->opcode == START_STOP_UNIT)
1082                                 xpt_async(AC_INQ_CHANGED,
1083                                           done_ccb->ccb_h.path, NULL);
1084                         if (scsi_cmd->opcode == REQUEST_SENSE) {
1085                                 u_int sense_key;
1086
1087                                 sense_key = saved_ccb->csio.sense_data.flags;
1088                                 sense_key &= SSD_KEY;
1089                                 if (sense_key != SSD_KEY_NO_SENSE) {
1090                                         saved_ccb->ccb_h.status |=
1091                                             CAM_AUTOSNS_VALID;
1092 #if 0
1093                                         xpt_print(saved_ccb->ccb_h.path,
1094                                             "Recovered Sense\n");
1095                                         scsi_sense_print(&saved_ccb->csio);
1096                                         cam_error_print(saved_ccb, CAM_ESF_ALL,
1097                                                         CAM_EPF_ALL);
1098 #endif
1099                                         xpt_done_ccb = TRUE;
1100                                 }
1101                         }
1102                 }
1103                 restore_ccb(&done_ccb->ccb_h);
1104
1105                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1106
1107                 if (xpt_done_ccb == FALSE)
1108                         xpt_action(done_ccb);
1109
1110                 break;
1111         }
1112         case CAM_SCSI_STATUS_ERROR:
1113                 scsi_cmd = (struct scsi_start_stop_unit *)
1114                                 &done_ccb->csio.cdb_io.cdb_bytes;
1115                 if (sense != 0) {
1116                         struct ccb_getdev *cgd;
1117                         struct scsi_sense_data *sense;
1118                         int    error_code, sense_key, asc, ascq;        
1119                         scsi_sense_action err_action;
1120
1121                         cgd = &xpt_alloc_ccb()->cgd;
1122                         sense = &done_ccb->csio.sense_data;
1123                         scsi_extract_sense(sense, &error_code, 
1124                                            &sense_key, &asc, &ascq);
1125
1126                         /*
1127                          * Grab the inquiry data for this device.
1128                          */
1129                         xpt_setup_ccb(&cgd->ccb_h, done_ccb->ccb_h.path,
1130                                       /*priority*/ 1);
1131                         cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1132                         xpt_action((union ccb *)cgd);
1133                         err_action = scsi_error_action(&done_ccb->csio,
1134                                                        &cgd->inq_data, 0);
1135                         xpt_free_ccb(&cgd->ccb_h);
1136                         cgd = NULL;     /* safety */
1137
1138                         /*
1139                          * If the error is "invalid field in CDB", 
1140                          * and the load/eject flag is set, turn the 
1141                          * flag off and try again.  This is just in 
1142                          * case the drive in question barfs on the 
1143                          * load eject flag.  The CAM code should set 
1144                          * the load/eject flag by default for 
1145                          * removable media.
1146                          */
1147
1148                         /* XXX KDM 
1149                          * Should we check to see what the specific
1150                          * scsi status is??  Or does it not matter
1151                          * since we already know that there was an
1152                          * error, and we know what the specific
1153                          * error code was, and we know what the
1154                          * opcode is..
1155                          */
1156                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1157                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1158                              (asc == 0x24) && (ascq == 0x00) &&
1159                              (done_ccb->ccb_h.retry_count > 0)) {
1160
1161                                 scsi_cmd->how &= ~SSS_LOEJ;
1162
1163                                 xpt_action(done_ccb);
1164
1165                         } else if ((done_ccb->ccb_h.retry_count > 1)
1166                                 && ((err_action & SS_MASK) != SS_FAIL)) {
1167
1168                                 /*
1169                                  * In this case, the error recovery
1170                                  * command failed, but we've got 
1171                                  * some retries left on it.  Give
1172                                  * it another try unless this is an
1173                                  * unretryable error.
1174                                  */
1175
1176                                 /* set the timeout to .5 sec */
1177                                 relsim_flags =
1178                                         RELSIM_RELEASE_AFTER_TIMEOUT;
1179                                 timeout = 500;
1180
1181                                 xpt_action(done_ccb);
1182
1183                                 break;
1184
1185                         } else {
1186                                 /* 
1187                                  * Perform the final retry with the original
1188                                  * CCB so that final error processing is
1189                                  * performed by the owner of the CCB.
1190                                  */
1191                                 restore_ccb(&done_ccb->ccb_h);
1192
1193                                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1194
1195                                 xpt_action(done_ccb);
1196                         }
1197                 } else {
1198                         /*
1199                          * Eh??  The command failed, but we don't
1200                          * have any sense.  What's up with that?
1201                          * Fire the CCB again to return it to the
1202                          * caller.
1203                          */
1204                         restore_ccb(&done_ccb->ccb_h);
1205
1206                         periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1207
1208                         xpt_action(done_ccb);
1209
1210                 }
1211                 break;
1212         default:
1213                 restore_ccb(&done_ccb->ccb_h);
1214
1215                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1216
1217                 xpt_action(done_ccb);
1218
1219                 break;
1220         }
1221
1222         /* decrement the retry count */
1223         /*
1224          * XXX This isn't appropriate in all cases.  Restructure,
1225          *     so that the retry count is only decremented on an
1226          *     actual retry.  Remeber that the orignal ccb had its
1227          *     retry count dropped before entering recovery, so
1228          *     doing it again is a bug.
1229          */
1230         if (done_ccb->ccb_h.retry_count > 0)
1231                 done_ccb->ccb_h.retry_count--;
1232
1233         qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1234                                       /*relsim_flags*/relsim_flags,
1235                                       /*openings*/0,
1236                                       /*timeout*/timeout,
1237                                       /*getcount_only*/0);
1238         if (xpt_done_ccb == TRUE)
1239                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1240 }
1241
1242 /*
1243  * Generic Async Event handler.  Peripheral drivers usually
1244  * filter out the events that require personal attention,
1245  * and leave the rest to this function.
1246  */
1247 void
1248 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1249                  struct cam_path *path, void *arg)
1250 {
1251         switch (code) {
1252         case AC_LOST_DEVICE:
1253                 cam_periph_invalidate(periph);
1254                 break; 
1255         case AC_SENT_BDR:
1256         case AC_BUS_RESET:
1257         {
1258                 cam_periph_bus_settle(periph, scsi_delay);
1259                 break;
1260         }
1261         default:
1262                 break;
1263         }
1264 }
1265
1266 void
1267 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1268 {
1269         struct ccb_getdevstats *cgds;
1270
1271         cgds = &xpt_alloc_ccb()->cgds;
1272         xpt_setup_ccb(&cgds->ccb_h, periph->path, /*priority*/1);
1273         cgds->ccb_h.func_code = XPT_GDEV_STATS;
1274         xpt_action((union ccb *)cgds);
1275         cam_periph_freeze_after_event(periph, &cgds->last_reset, bus_settle);
1276         xpt_free_ccb(&cgds->ccb_h);
1277 }
1278
1279 void
1280 cam_periph_freeze_after_event(struct cam_periph *periph,
1281                               struct timeval* event_time, u_int duration_ms)
1282 {
1283         struct timeval delta;
1284         struct timeval duration_tv;
1285
1286         microuptime(&delta);
1287         timevalsub(&delta, event_time);
1288         duration_tv.tv_sec = duration_ms / 1000;
1289         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1290         if (timevalcmp(&delta, &duration_tv, <)) {
1291                 timevalsub(&duration_tv, &delta);
1292
1293                 duration_ms = duration_tv.tv_sec * 1000;
1294                 duration_ms += duration_tv.tv_usec / 1000;
1295                 cam_freeze_devq(periph->path); 
1296                 cam_release_devq(periph->path,
1297                                 RELSIM_RELEASE_AFTER_TIMEOUT,
1298                                 /*reduction*/0,
1299                                 /*timeout*/duration_ms,
1300                                 /*getcount_only*/0);
1301         }
1302
1303 }
1304
1305 static int
1306 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1307                          u_int32_t sense_flags, union ccb *save_ccb,
1308                          int *openings, u_int32_t *relsim_flags,
1309                          u_int32_t *timeout)
1310 {
1311         int error;
1312
1313         switch (ccb->csio.scsi_status) {
1314         case SCSI_STATUS_OK:
1315         case SCSI_STATUS_COND_MET:
1316         case SCSI_STATUS_INTERMED:
1317         case SCSI_STATUS_INTERMED_COND_MET:
1318                 error = 0;
1319                 break;
1320         case SCSI_STATUS_CMD_TERMINATED:
1321         case SCSI_STATUS_CHECK_COND:
1322                 error = camperiphscsisenseerror(ccb,
1323                                                 camflags,
1324                                                 sense_flags,
1325                                                 save_ccb,
1326                                                 openings,
1327                                                 relsim_flags,
1328                                                 timeout);
1329                 break;
1330         case SCSI_STATUS_QUEUE_FULL:
1331         {
1332                 /* no decrement */
1333                 struct ccb_getdevstats *cgds;
1334
1335                 cgds = &xpt_alloc_ccb()->cgds;
1336
1337                 /*
1338                  * First off, find out what the current
1339                  * transaction counts are.
1340                  */
1341                 xpt_setup_ccb(&cgds->ccb_h, ccb->ccb_h.path, /*priority*/1);
1342                 cgds->ccb_h.func_code = XPT_GDEV_STATS;
1343                 xpt_action((union ccb *)cgds);
1344
1345                 /*
1346                  * If we were the only transaction active, treat
1347                  * the QUEUE FULL as if it were a BUSY condition.
1348                  */
1349                 if (cgds->dev_active != 0) {
1350                         int total_openings;
1351
1352                         /*
1353                          * Reduce the number of openings to
1354                          * be 1 less than the amount it took
1355                          * to get a queue full bounded by the
1356                          * minimum allowed tag count for this
1357                          * device.
1358                          */
1359                         total_openings = cgds->dev_active + cgds->dev_openings;
1360                         *openings = cgds->dev_active;
1361                         if (*openings < cgds->mintags)
1362                                 *openings = cgds->mintags;
1363                         if (*openings < total_openings) {
1364                                 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1365                         } else {
1366                                 /*
1367                                  * Some devices report queue full for
1368                                  * temporary resource shortages.  For
1369                                  * this reason, we allow a minimum
1370                                  * tag count to be entered via a
1371                                  * quirk entry to prevent the queue
1372                                  * count on these devices from falling
1373                                  * to a pessimisticly low value.  We
1374                                  * still wait for the next successful
1375                                  * completion, however, before queueing
1376                                  * more transactions to the device.
1377                                  */
1378                                 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1379                         }
1380                         *timeout = 0;
1381                         error = ERESTART;
1382                         if (bootverbose) {
1383                                 xpt_print(ccb->ccb_h.path, "Queue Full\n");
1384                         }
1385                         xpt_free_ccb(&cgds->ccb_h);
1386                         break;
1387                 }
1388                 xpt_free_ccb(&cgds->ccb_h);
1389                 /* FALLTHROUGH */
1390         }
1391         case SCSI_STATUS_BUSY:
1392                 /*
1393                  * Restart the queue after either another
1394                  * command completes or a 1 second timeout.
1395                  */
1396                 if (bootverbose) {
1397                         xpt_print(ccb->ccb_h.path, "Device Busy\n");
1398                 }
1399                 if (ccb->ccb_h.retry_count > 0) {
1400                         ccb->ccb_h.retry_count--;
1401                         error = ERESTART;
1402                         *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1403                                       | RELSIM_RELEASE_AFTER_CMDCMPLT;
1404                         *timeout = 1000;
1405                 } else {
1406                         error = EIO;
1407                 }
1408                 break;
1409         case SCSI_STATUS_RESERV_CONFLICT:
1410                 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n");
1411                 error = EIO;
1412                 break;
1413         default:
1414                 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n",
1415                     ccb->csio.scsi_status);
1416                 error = EIO;
1417                 break;
1418         }
1419         return (error);
1420 }
1421
1422 static int
1423 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1424                         u_int32_t sense_flags, union ccb *save_ccb,
1425                        int *openings, u_int32_t *relsim_flags,
1426                        u_int32_t *timeout)
1427 {
1428         struct cam_periph *periph;
1429         int error;
1430
1431         periph = xpt_path_periph(ccb->ccb_h.path);
1432         if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1433
1434                 /*
1435                  * If error recovery is already in progress, don't attempt
1436                  * to process this error, but requeue it unconditionally
1437                  * and attempt to process it once error recovery has
1438                  * completed.  This failed command is probably related to
1439                  * the error that caused the currently active error recovery
1440                  * action so our  current recovery efforts should also
1441                  * address this command.  Be aware that the error recovery
1442                  * code assumes that only one recovery action is in progress
1443                  * on a particular peripheral instance at any given time
1444                  * (e.g. only one saved CCB for error recovery) so it is
1445                  * imperitive that we don't violate this assumption.
1446                  */
1447                 error = ERESTART;
1448         } else {
1449                 scsi_sense_action err_action;
1450                 struct ccb_getdev *cgd;
1451                 const char *action_string;
1452                 union ccb* print_ccb;
1453
1454                 /* A description of the error recovery action performed */
1455                 action_string = NULL;
1456
1457                 /*
1458                  * The location of the orignal ccb
1459                  * for sense printing purposes.
1460                  */
1461                 print_ccb = ccb;
1462
1463                 /*
1464                  * Grab the inquiry data for this device.
1465                  */
1466                 cgd = &xpt_alloc_ccb()->cgd;
1467                 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1468                 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1469                 xpt_action((union ccb *)cgd);
1470
1471                 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1472                         err_action = scsi_error_action(&ccb->csio,
1473                                                        &cgd->inq_data,
1474                                                        sense_flags);
1475                 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1476                         err_action = SS_REQSENSE;
1477                 else
1478                         err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1479
1480                 error = err_action & SS_ERRMASK;
1481
1482                 /*
1483                  * If the recovery action will consume a retry,
1484                  * make sure we actually have retries available.
1485                  */
1486                 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1487                         if (ccb->ccb_h.retry_count > 0)
1488                                 ccb->ccb_h.retry_count--;
1489                         else {
1490                                 action_string = "Retries Exhausted";
1491                                 goto sense_error_done;
1492                         }
1493                 }
1494
1495                 if ((err_action & SS_MASK) >= SS_START) {
1496                         /*
1497                          * Do common portions of commands that
1498                          * use recovery CCBs.
1499                          */
1500                         if (save_ccb == NULL) {
1501                                 action_string = "No recovery CCB supplied";
1502                                 goto sense_error_done;
1503                         }
1504                         bcopy(ccb, save_ccb, sizeof(*save_ccb));
1505                         print_ccb = save_ccb;
1506                         periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1507                 }
1508
1509                 switch (err_action & SS_MASK) {
1510                 case SS_NOP:
1511                         action_string = "No Recovery Action Needed";
1512                         error = 0;
1513                         break;
1514                 case SS_RETRY:
1515                         action_string = "Retrying Command (per Sense Data)";
1516                         error = ERESTART;
1517                         break;
1518                 case SS_FAIL:
1519                         action_string = "Unretryable error";
1520                         break;
1521                 case SS_START:
1522                 {
1523                         int le;
1524
1525                         /*
1526                          * Send a start unit command to the device, and
1527                          * then retry the command.
1528                          */
1529                         action_string = "Attempting to Start Unit";
1530
1531                         /*
1532                          * Check for removable media and set
1533                          * load/eject flag appropriately.
1534                          */
1535                         if (SID_IS_REMOVABLE(&cgd->inq_data))
1536                                 le = TRUE;
1537                         else
1538                                 le = FALSE;
1539
1540                         scsi_start_stop(&ccb->csio,
1541                                         /*retries*/1,
1542                                         camperiphdone,
1543                                         MSG_SIMPLE_Q_TAG,
1544                                         /*start*/TRUE,
1545                                         /*load/eject*/le,
1546                                         /*immediate*/FALSE,
1547                                         SSD_FULL_SIZE,
1548                                         /*timeout*/50000);
1549                         break;
1550                 }
1551                 case SS_TUR:
1552                 {
1553                         /*
1554                          * Send a Test Unit Ready to the device.
1555                          * If the 'many' flag is set, we send 120
1556                          * test unit ready commands, one every half
1557                          * second.  Otherwise, we just send one TUR.
1558                          * We only want to do this if the retry
1559                          * count has not been exhausted.
1560                          */
1561                         int retries;
1562
1563                         if ((err_action & SSQ_MANY) != 0) {
1564                                 action_string = "Polling device for readiness";
1565                                 retries = 120;
1566                         } else {
1567                                 action_string = "Testing device for readiness";
1568                                 retries = 1;
1569                         }
1570                         scsi_test_unit_ready(&ccb->csio,
1571                                              retries,
1572                                              camperiphdone,
1573                                              MSG_SIMPLE_Q_TAG,
1574                                              SSD_FULL_SIZE,
1575                                              /*timeout*/5000);
1576
1577                         /*
1578                          * Accomplish our 500ms delay by deferring
1579                          * the release of our device queue appropriately.
1580                          */
1581                         *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1582                         *timeout = 500;
1583                         break;
1584                 }
1585                 case SS_REQSENSE:
1586                 {
1587                         /*
1588                          * Send a Request Sense to the device.  We
1589                          * assume that we are in a contingent allegiance
1590                          * condition so we do not tag this request.
1591                          */
1592                         scsi_request_sense(&ccb->csio, /*retries*/1,
1593                                            camperiphdone,
1594                                            &save_ccb->csio.sense_data,
1595                                            sizeof(save_ccb->csio.sense_data),
1596                                            CAM_TAG_ACTION_NONE,
1597                                            /*sense_len*/SSD_FULL_SIZE,
1598                                            /*timeout*/5000);
1599                         break;
1600                 }
1601                 default:
1602                         panic("Unhandled error action %x", err_action);
1603                 }
1604
1605                 if ((err_action & SS_MASK) >= SS_START) {
1606                         /*
1607                          * Drop the priority to 0 so that the recovery
1608                          * CCB is the first to execute.  Freeze the queue
1609                          * after this command is sent so that we can
1610                          * restore the old csio and have it queued in
1611                          * the proper order before we release normal
1612                          * transactions to the device.
1613                          */
1614                         ccb->ccb_h.pinfo.priority = 0;
1615                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1616                         ccb->ccb_h.saved_ccb_ptr = save_ccb;
1617                         error = ERESTART;
1618                 }
1619
1620 sense_error_done:
1621                 if ((err_action & SSQ_PRINT_SENSE) != 0 &&
1622                     (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1623                         if ((ccb->ccb_h.flags & CAM_QUIET) == 0 ||
1624                             bootverbose) {
1625                                 cam_error_print(print_ccb,
1626                                                 CAM_ESF_ALL, CAM_EPF_ALL);
1627                                 xpt_print_path(ccb->ccb_h.path);
1628                         }
1629                         if (bootverbose)
1630                                 scsi_sense_print(&print_ccb->csio);
1631                         if ((ccb->ccb_h.flags & CAM_QUIET) == 0 ||
1632                             bootverbose) {
1633                                 kprintf("%s\n", action_string);
1634                         }
1635                 }
1636                 xpt_free_ccb(&cgd->ccb_h);
1637         }
1638         return (error);
1639 }
1640
1641 /*
1642  * Generic error handler.  Peripheral drivers usually filter
1643  * out the errors that they handle in a unique mannor, then
1644  * call this function.
1645  */
1646 int
1647 cam_periph_error(union ccb *ccb, cam_flags camflags,
1648                  u_int32_t sense_flags, union ccb *save_ccb)
1649 {
1650         const char *action_string;
1651         cam_status  status;
1652         int         frozen;
1653         int         error, printed = 0;
1654         int         openings;
1655         u_int32_t   relsim_flags;
1656         u_int32_t   timeout = 0;
1657
1658         action_string = NULL;
1659         status = ccb->ccb_h.status;
1660         frozen = (status & CAM_DEV_QFRZN) != 0;
1661         status &= CAM_STATUS_MASK;
1662         openings = relsim_flags = 0;
1663
1664         switch (status) {
1665         case CAM_REQ_CMP:
1666                 error = 0;
1667                 break;
1668         case CAM_SCSI_STATUS_ERROR:
1669                 error = camperiphscsistatuserror(ccb,
1670                                                  camflags,
1671                                                  sense_flags,
1672                                                  save_ccb,
1673                                                  &openings,
1674                                                  &relsim_flags,
1675                                                  &timeout);
1676                 break;
1677         case CAM_AUTOSENSE_FAIL:
1678                 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n");
1679                 error = EIO;    /* we have to kill the command */
1680                 break;
1681         case CAM_REQ_CMP_ERR:
1682                 if (bootverbose && printed == 0) {
1683                         xpt_print(ccb->ccb_h.path,
1684                             "Request completed with CAM_REQ_CMP_ERR\n");
1685                         printed++;
1686                 }
1687                 /* FALLTHROUGH */
1688         case CAM_CMD_TIMEOUT:
1689                 if (bootverbose && printed == 0) {
1690                         xpt_print(ccb->ccb_h.path, "Command timed out\n");
1691                         printed++;
1692                 }
1693                 /* FALLTHROUGH */
1694         case CAM_UNEXP_BUSFREE:
1695                 if (bootverbose && printed == 0) {
1696                         xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1697                         printed++;
1698                 }
1699                 /* FALLTHROUGH */
1700         case CAM_UNCOR_PARITY:
1701                 if (bootverbose && printed == 0) {
1702                         xpt_print(ccb->ccb_h.path,
1703                             "Uncorrected Parity Error\n");
1704                         printed++;
1705                 }
1706                 /* FALLTHROUGH */
1707         case CAM_DATA_RUN_ERR:
1708                 if (bootverbose && printed == 0) {
1709                         xpt_print(ccb->ccb_h.path, "Data Overrun\n");
1710                         printed++;
1711                 }
1712                 error = EIO;    /* we have to kill the command */
1713                 /* decrement the number of retries */
1714                 if (ccb->ccb_h.retry_count > 0) {
1715                         ccb->ccb_h.retry_count--;
1716                         error = ERESTART;
1717                 } else {
1718                         action_string = "Retries Exhausted";
1719                         error = EIO;
1720                 }
1721                 break;
1722         case CAM_UA_ABORT:
1723         case CAM_UA_TERMIO:
1724         case CAM_MSG_REJECT_REC:
1725                 /* XXX Don't know that these are correct */
1726                 error = EIO;
1727                 break;
1728         case CAM_SEL_TIMEOUT:
1729         {
1730                 struct cam_path *newpath;
1731
1732                 if ((camflags & CAM_RETRY_SELTO) != 0) {
1733                         if (ccb->ccb_h.retry_count > 0) {
1734
1735                                 ccb->ccb_h.retry_count--;
1736                                 error = ERESTART;
1737                                 if (bootverbose && printed == 0) {
1738                                         xpt_print(ccb->ccb_h.path,
1739                                             "Selection Timeout\n");
1740                                         printed++;
1741                                 }
1742
1743                                 /*
1744                                  * Wait a bit to give the device
1745                                  * time to recover before we try again.
1746                                  */
1747                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1748                                 timeout = periph_selto_delay;
1749                                 break;
1750                         }
1751                 }
1752                 error = ENXIO;
1753                 /* Should we do more if we can't create the path?? */
1754                 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1755                                     xpt_path_path_id(ccb->ccb_h.path),
1756                                     xpt_path_target_id(ccb->ccb_h.path),
1757                                     CAM_LUN_WILDCARD) != CAM_REQ_CMP) 
1758                         break;
1759
1760                 /*
1761                  * Let peripheral drivers know that this device has gone
1762                  * away.
1763                  */
1764                 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1765                 xpt_free_path(newpath);
1766                 break;
1767         }
1768         case CAM_REQ_INVALID:
1769         case CAM_PATH_INVALID:
1770         case CAM_DEV_NOT_THERE:
1771         case CAM_NO_HBA:
1772         case CAM_PROVIDE_FAIL:
1773         case CAM_REQ_TOO_BIG:
1774         case CAM_LUN_INVALID:
1775         case CAM_TID_INVALID:
1776                 error = EINVAL;
1777                 break;
1778         case CAM_SCSI_BUS_RESET:
1779         case CAM_BDR_SENT:
1780                 /*
1781                  * Commands that repeatedly timeout and cause these
1782                  * kinds of error recovery actions, should return
1783                  * CAM_CMD_TIMEOUT, which allows us to safely assume
1784                  * that this command was an innocent bystander to
1785                  * these events and should be unconditionally
1786                  * retried.
1787                  */
1788                 if (bootverbose && printed == 0) {
1789                         xpt_print_path(ccb->ccb_h.path);
1790                         if (status == CAM_BDR_SENT)
1791                                 kprintf("Bus Device Reset sent\n");
1792                         else
1793                                 kprintf("Bus Reset issued\n");
1794                         printed++;
1795                 }
1796                 /* FALLTHROUGH */
1797         case CAM_REQUEUE_REQ:
1798                 /* Unconditional requeue */
1799                 error = ERESTART;
1800                 if (bootverbose && printed == 0) {
1801                         xpt_print(ccb->ccb_h.path, "Request Requeued\n");
1802                         printed++;
1803                 }
1804                 break;
1805         case CAM_RESRC_UNAVAIL:
1806                 /* Wait a bit for the resource shortage to abate. */
1807                 timeout = periph_noresrc_delay;
1808                 /* FALLTHROUGH */
1809         case CAM_BUSY:
1810                 if (timeout == 0) {
1811                         /* Wait a bit for the busy condition to abate. */
1812                         timeout = periph_busy_delay;
1813                 }
1814                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1815                 /* FALLTHROUGH */
1816         default:
1817                 /* decrement the number of retries */
1818                 if (ccb->ccb_h.retry_count > 0) {
1819                         ccb->ccb_h.retry_count--;
1820                         error = ERESTART;
1821                         if (bootverbose && printed == 0) {
1822                                 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n",
1823                                     status);
1824                                 printed++;
1825                         }
1826                 } else {
1827                         error = EIO;
1828                         action_string = "Retries Exhausted";
1829                 }
1830                 break;
1831         }
1832
1833         /* Attempt a retry */
1834         if (error == ERESTART || error == 0) {  
1835                 if (frozen != 0)
1836                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1837
1838                 if (error == ERESTART) {
1839                         action_string = "Retrying Command";
1840                         xpt_action(ccb);
1841                 }
1842                 
1843                 if (frozen != 0)
1844                         cam_release_devq(ccb->ccb_h.path,
1845                                          relsim_flags,
1846                                          openings,
1847                                          timeout,
1848                                          /*getcount_only*/0);
1849         }
1850
1851         /*
1852          * If we have an error and are booting verbosely, whine
1853          * *unless* this was a non-retryable selection timeout.
1854          */
1855         if (error != 0 && bootverbose && (sense_flags & SF_NO_PRINT) == 0 &&
1856             !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1857
1858
1859                 if (action_string == NULL)
1860                         action_string = "Unretryable Error";
1861                 if (error != ERESTART) {
1862                         xpt_print(ccb->ccb_h.path, "error %d\n", error);
1863                 }
1864                 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1865         }
1866
1867         return (error);
1868 }