The cam_sim structure was being deallocated unconditionally by device
[dragonfly.git] / sys / dev / disk / isp / isp_freebsd.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */
3aed1355 2/* $DragonFly: src/sys/dev/disk/isp/isp_freebsd.c,v 1.8 2004/03/15 01:10:43 dillon Exp $ */
984263bc
MD
3/*
4 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
1f2de5d4 29#include "isp_freebsd.h"
984263bc
MD
30#include <sys/unistd.h>
31#include <sys/kthread.h>
32#include <machine/stdarg.h> /* for use by isp_prt below */
33#include <sys/conf.h>
34#include <sys/ioccom.h>
1f2de5d4 35#include "isp_ioctl.h"
984263bc
MD
36
37
38static d_ioctl_t ispioctl;
39static void isp_intr_enable(void *);
40static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
41static void isp_poll(struct cam_sim *);
42static timeout_t isp_watchdog;
43static void isp_kthread(void *);
44static void isp_action(struct cam_sim *, union ccb *);
45
46
47#define ISP_CDEV_MAJOR 248
48static struct cdevsw isp_cdevsw = {
fabb8ceb
MD
49 /* name */ "isp",
50 /* maj */ ISP_CDEV_MAJOR,
51 /* flags */ D_TAPE,
52 /* port */ NULL,
53 /* autoq */ 0,
54
984263bc
MD
55 /* open */ nullopen,
56 /* close */ nullclose,
57 /* read */ noread,
58 /* write */ nowrite,
59 /* ioctl */ ispioctl,
60 /* poll */ nopoll,
61 /* mmap */ nommap,
62 /* strategy */ nostrategy,
984263bc 63 /* dump */ nodump,
fabb8ceb 64 /* psize */ nopsize
984263bc
MD
65};
66
67static struct ispsoftc *isplist = NULL;
68
69void
70isp_attach(struct ispsoftc *isp)
71{
72 int primary, secondary;
73 struct ccb_setasync csa;
74 struct cam_devq *devq;
75 struct cam_sim *sim;
76 struct cam_path *path;
77
78 /*
79 * Establish (in case of 12X0) which bus is the primary.
80 */
81
82 primary = 0;
83 secondary = 1;
84
85 /*
86 * Create the device queue for our SIM(s).
87 */
88 devq = cam_simq_alloc(isp->isp_maxcmds);
89 if (devq == NULL) {
90 return;
91 }
92
93 /*
94 * Construct our SIM entry.
95 */
96 ISPLOCK_2_CAMLOCK(isp);
97 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
98 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
3aed1355 99 cam_simq_release(devq); /* leaves 1 ref due to cam_sim_alloc */
984263bc 100 if (sim == NULL) {
984263bc
MD
101 CAMLOCK_2_ISPLOCK(isp);
102 return;
103 }
104 CAMLOCK_2_ISPLOCK(isp);
105
106 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
107 isp->isp_osinfo.ehook.ich_arg = isp;
108 ISPLOCK_2_CAMLOCK(isp);
109 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
3aed1355 110 cam_sim_free(sim);
984263bc
MD
111 CAMLOCK_2_ISPLOCK(isp);
112 isp_prt(isp, ISP_LOGERR,
113 "could not establish interrupt enable hook");
114 return;
115 }
116
117 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
3aed1355 118 cam_sim_free(sim);
984263bc
MD
119 CAMLOCK_2_ISPLOCK(isp);
120 return;
121 }
122
123 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
124 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
125 xpt_bus_deregister(cam_sim_path(sim));
3aed1355 126 cam_sim_free(sim);
984263bc
MD
127 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
128 CAMLOCK_2_ISPLOCK(isp);
129 return;
130 }
131
132 xpt_setup_ccb(&csa.ccb_h, path, 5);
133 csa.ccb_h.func_code = XPT_SASYNC_CB;
134 csa.event_enable = AC_LOST_DEVICE;
135 csa.callback = isp_cam_async;
136 csa.callback_arg = sim;
137 xpt_action((union ccb *)&csa);
138 CAMLOCK_2_ISPLOCK(isp);
139 isp->isp_sim = sim;
140 isp->isp_path = path;
141 /*
142 * Create a kernel thread for fibre channel instances. We
143 * don't have dual channel FC cards.
144 */
145 if (IS_FC(isp)) {
146 ISPLOCK_2_CAMLOCK(isp);
41c20dac 147 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kthread,
984263bc
MD
148 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) {
149 xpt_bus_deregister(cam_sim_path(sim));
3aed1355 150 cam_sim_free(sim);
984263bc
MD
151 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
152 CAMLOCK_2_ISPLOCK(isp);
153 isp_prt(isp, ISP_LOGERR, "could not create kthread");
154 return;
155 }
156 CAMLOCK_2_ISPLOCK(isp);
157 }
158
159
160 /*
161 * If we have a second channel, construct SIM entry for that.
162 */
163 if (IS_DUALBUS(isp)) {
164 ISPLOCK_2_CAMLOCK(isp);
165 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
166 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
167 if (sim == NULL) {
168 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
169 xpt_free_path(isp->isp_path);
984263bc
MD
170 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
171 return;
172 }
173 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
174 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
175 xpt_free_path(isp->isp_path);
3aed1355 176 cam_sim_free(sim);
984263bc
MD
177 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
178 CAMLOCK_2_ISPLOCK(isp);
179 return;
180 }
181
182 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
183 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
184 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
185 xpt_free_path(isp->isp_path);
186 xpt_bus_deregister(cam_sim_path(sim));
3aed1355 187 cam_sim_free(sim);
984263bc
MD
188 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
189 CAMLOCK_2_ISPLOCK(isp);
190 return;
191 }
192
193 xpt_setup_ccb(&csa.ccb_h, path, 5);
194 csa.ccb_h.func_code = XPT_SASYNC_CB;
195 csa.event_enable = AC_LOST_DEVICE;
196 csa.callback = isp_cam_async;
197 csa.callback_arg = sim;
198 xpt_action((union ccb *)&csa);
199 CAMLOCK_2_ISPLOCK(isp);
200 isp->isp_sim2 = sim;
201 isp->isp_path2 = path;
202 }
203 /*
204 * Create device nodes
205 */
206 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
207 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
208
209 if (isp->isp_role != ISP_ROLE_NONE) {
210 isp->isp_state = ISP_RUNSTATE;
211 }
212 if (isplist == NULL) {
213 isplist = isp;
214 } else {
215 struct ispsoftc *tmp = isplist;
216 while (tmp->isp_osinfo.next) {
217 tmp = tmp->isp_osinfo.next;
218 }
219 tmp->isp_osinfo.next = isp;
220 }
221
222}
223
224static INLINE void
225isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
226{
227 if (isp->isp_osinfo.simqfrozen == 0) {
228 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
229 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
230 ISPLOCK_2_CAMLOCK(isp);
231 xpt_freeze_simq(isp->isp_sim, 1);
232 CAMLOCK_2_ISPLOCK(isp);
233 } else {
234 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
235 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
236 }
237}
238
239static int
41c20dac 240ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, d_thread_t *td)
984263bc
MD
241{
242 struct ispsoftc *isp;
243 int retval = ENOTTY;
244
245 isp = isplist;
246 while (isp) {
247 if (minor(dev) == device_get_unit(isp->isp_dev)) {
248 break;
249 }
250 isp = isp->isp_osinfo.next;
251 }
252 if (isp == NULL)
253 return (ENXIO);
254
255 switch (cmd) {
256#ifdef ISP_FW_CRASH_DUMP
257 case ISP_GET_FW_CRASH_DUMP:
258 {
259 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
260 size_t sz;
261
262 retval = 0;
263 if (IS_2200(isp))
264 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
265 else
266 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
267 ISP_LOCK(isp);
268 if (ptr && *ptr) {
269 void *uaddr = *((void **) addr);
270 if (copyout(ptr, uaddr, sz)) {
271 retval = EFAULT;
272 } else {
273 *ptr = 0;
274 }
275 } else {
276 retval = ENXIO;
277 }
278 ISP_UNLOCK(isp);
279 break;
280 }
281
282 case ISP_FORCE_CRASH_DUMP:
283 ISP_LOCK(isp);
284 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
285 isp_fw_dump(isp);
286 isp_reinit(isp);
287 ISP_UNLOCK(isp);
288 retval = 0;
289 break;
290#endif
291 case ISP_SDBLEV:
292 {
293 int olddblev = isp->isp_dblev;
294 isp->isp_dblev = *(int *)addr;
295 *(int *)addr = olddblev;
296 retval = 0;
297 break;
298 }
299 case ISP_RESETHBA:
300 ISP_LOCK(isp);
301 isp_reinit(isp);
302 ISP_UNLOCK(isp);
303 retval = 0;
304 break;
305 case ISP_RESCAN:
306 if (IS_FC(isp)) {
307 ISP_LOCK(isp);
308 if (isp_fc_runstate(isp, 5 * 1000000)) {
309 retval = EIO;
310 } else {
311 retval = 0;
312 }
313 ISP_UNLOCK(isp);
314 }
315 break;
316 case ISP_FC_LIP:
317 if (IS_FC(isp)) {
318 ISP_LOCK(isp);
319 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
320 retval = EIO;
321 } else {
322 retval = 0;
323 }
324 ISP_UNLOCK(isp);
325 }
326 break;
327 case ISP_FC_GETDINFO:
328 {
329 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
330 struct lportdb *lp;
331
332 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
333 retval = EINVAL;
334 break;
335 }
336 ISP_LOCK(isp);
337 lp = &FCPARAM(isp)->portdb[ifc->loopid];
338 if (lp->valid) {
339 ifc->loopid = lp->loopid;
340 ifc->portid = lp->portid;
341 ifc->node_wwn = lp->node_wwn;
342 ifc->port_wwn = lp->port_wwn;
343 retval = 0;
344 } else {
345 retval = ENODEV;
346 }
347 ISP_UNLOCK(isp);
348 break;
349 }
350 case ISP_GET_STATS:
351 {
352 isp_stats_t *sp = (isp_stats_t *) addr;
353
354 MEMZERO(sp, sizeof (*sp));
355 sp->isp_stat_version = ISP_STATS_VERSION;
356 sp->isp_type = isp->isp_type;
357 sp->isp_revision = isp->isp_revision;
358 ISP_LOCK(isp);
359 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
360 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
361 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
362 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
363 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
364 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
365 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
366 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
367 ISP_UNLOCK(isp);
368 retval = 0;
369 break;
370 }
371 case ISP_CLR_STATS:
372 ISP_LOCK(isp);
373 isp->isp_intcnt = 0;
374 isp->isp_intbogus = 0;
375 isp->isp_intmboxc = 0;
376 isp->isp_intoasync = 0;
377 isp->isp_rsltccmplt = 0;
378 isp->isp_fphccmplt = 0;
379 isp->isp_rscchiwater = 0;
380 isp->isp_fpcchiwater = 0;
381 ISP_UNLOCK(isp);
382 retval = 0;
383 break;
384 case ISP_FC_GETHINFO:
385 {
386 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
387 MEMZERO(hba, sizeof (*hba));
388 ISP_LOCK(isp);
389 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
390 hba->fc_scsi_supported = 1;
391 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
392 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
393 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
394 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
395 ISP_UNLOCK(isp);
396 retval = 0;
397 break;
398 }
399 case ISP_GET_FC_PARAM:
400 {
401 struct isp_fc_param *f = (struct isp_fc_param *) addr;
402
403 if (!IS_FC(isp)) {
404 retval = EINVAL;
405 break;
406 }
407 f->parameter = 0;
408 if (strcmp(f->param_name, "framelength") == 0) {
409 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
410 retval = 0;
411 break;
412 }
413 if (strcmp(f->param_name, "exec_throttle") == 0) {
414 f->parameter = FCPARAM(isp)->isp_execthrottle;
415 retval = 0;
416 break;
417 }
418 if (strcmp(f->param_name, "fullduplex") == 0) {
419 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
420 f->parameter = 1;
421 retval = 0;
422 break;
423 }
424 if (strcmp(f->param_name, "loopid") == 0) {
425 f->parameter = FCPARAM(isp)->isp_loopid;
426 retval = 0;
427 break;
428 }
429 retval = EINVAL;
430 break;
431 }
432 case ISP_SET_FC_PARAM:
433 {
434 struct isp_fc_param *f = (struct isp_fc_param *) addr;
435 u_int32_t param = f->parameter;
436
437 if (!IS_FC(isp)) {
438 retval = EINVAL;
439 break;
440 }
441 f->parameter = 0;
442 if (strcmp(f->param_name, "framelength") == 0) {
443 if (param != 512 && param != 1024 && param != 1024) {
444 retval = EINVAL;
445 break;
446 }
447 FCPARAM(isp)->isp_maxfrmlen = param;
448 retval = 0;
449 break;
450 }
451 if (strcmp(f->param_name, "exec_throttle") == 0) {
452 if (param < 16 || param > 255) {
453 retval = EINVAL;
454 break;
455 }
456 FCPARAM(isp)->isp_execthrottle = param;
457 retval = 0;
458 break;
459 }
460 if (strcmp(f->param_name, "fullduplex") == 0) {
461 if (param != 0 && param != 1) {
462 retval = EINVAL;
463 break;
464 }
465 if (param) {
466 FCPARAM(isp)->isp_fwoptions |=
467 ICBOPT_FULL_DUPLEX;
468 } else {
469 FCPARAM(isp)->isp_fwoptions &=
470 ~ICBOPT_FULL_DUPLEX;
471 }
472 retval = 0;
473 break;
474 }
475 if (strcmp(f->param_name, "loopid") == 0) {
476 if (param < 0 || param > 125) {
477 retval = EINVAL;
478 break;
479 }
480 FCPARAM(isp)->isp_loopid = param;
481 retval = 0;
482 break;
483 }
484 retval = EINVAL;
485 break;
486 }
487 default:
488 break;
489 }
490 return (retval);
491}
492
493static void
494isp_intr_enable(void *arg)
495{
496 struct ispsoftc *isp = arg;
497 if (isp->isp_role != ISP_ROLE_NONE) {
498 ENABLE_INTS(isp);
499 }
500 /* Release our hook so that the boot can continue. */
501 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
502}
503
504/*
505 * Put the target mode functions here, because some are inlines
506 */
507
508#ifdef ISP_TARGET_MODE
509
510static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
511static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
512static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
513static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
514static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
515static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
516static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
517static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
518static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
519static cam_status
520create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
521static void destroy_lun_state(struct ispsoftc *, tstate_t *);
522static void isp_en_lun(struct ispsoftc *, union ccb *);
523static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
524static timeout_t isp_refire_putback_atio;
525static void isp_complete_ctio(union ccb *);
526static void isp_target_putback_atio(union ccb *);
527static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
528static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
529static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
530static int isp_handle_platform_ctio(struct ispsoftc *, void *);
531static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
532static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
533
534static INLINE int
535is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
536{
537 tstate_t *tptr;
538 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
539 if (tptr == NULL) {
540 return (0);
541 }
542 do {
543 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
544 return (1);
545 }
546 } while ((tptr = tptr->next) != NULL);
547 return (0);
548}
549
550static INLINE int
551are_any_luns_enabled(struct ispsoftc *isp, int port)
552{
553 int lo, hi;
554 if (IS_DUALBUS(isp)) {
555 lo = (port * (LUN_HASH_SIZE >> 1));
556 hi = lo + (LUN_HASH_SIZE >> 1);
557 } else {
558 lo = 0;
559 hi = LUN_HASH_SIZE;
560 }
561 for (lo = 0; lo < hi; lo++) {
562 if (isp->isp_osinfo.lun_hash[lo]) {
563 return (1);
564 }
565 }
566 return (0);
567}
568
569static INLINE tstate_t *
570get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
571{
572 tstate_t *tptr = NULL;
573
574 if (lun == CAM_LUN_WILDCARD) {
575 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
576 tptr = &isp->isp_osinfo.tsdflt[bus];
577 tptr->hold++;
578 return (tptr);
579 }
580 } else {
581 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
582 if (tptr == NULL) {
583 return (NULL);
584 }
585 }
586
587 do {
588 if (tptr->lun == lun && tptr->bus == bus) {
589 tptr->hold++;
590 return (tptr);
591 }
592 } while ((tptr = tptr->next) != NULL);
593 return (tptr);
594}
595
596static __inline void
597rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
598{
599 if (tptr->hold)
600 tptr->hold--;
601}
602
603static __inline int
604isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
605{
606 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
607 isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
377d4740 608 if (tsleep(&isp->isp_osinfo.tmflags[bus], PCATCH, "i0", 0)) {
984263bc
MD
609 return (-1);
610 }
611 isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
612 }
613 return (0);
614}
615
616static __inline int
617isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
618{
377d4740 619 if (tsleep(&isp->isp_osinfo.rstatus[bus], 0, "qt1", timo)) {
984263bc
MD
620 return (-1);
621 }
622 return (0);
623}
624
625static __inline void
626isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
627{
628 isp->isp_osinfo.rstatus[bus] = status;
629 wakeup(&isp->isp_osinfo.rstatus[bus]);
630}
631
632static __inline void
633isp_vsema_rqe(struct ispsoftc *isp, int bus)
634{
635 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
636 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
637 wakeup(&isp->isp_osinfo.tmflags[bus]);
638 }
639 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
640}
641
642static __inline atio_private_data_t *
643isp_get_atpd(struct ispsoftc *isp, int tag)
644{
645 atio_private_data_t *atp;
646 for (atp = isp->isp_osinfo.atpdp;
647 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
648 if (atp->tag == tag)
649 return (atp);
650 }
651 return (NULL);
652}
653
654static cam_status
655create_lun_state(struct ispsoftc *isp, int bus,
656 struct cam_path *path, tstate_t **rslt)
657{
658 cam_status status;
659 lun_id_t lun;
660 int hfx;
661 tstate_t *tptr, *new;
662
663 lun = xpt_path_lun_id(path);
664 if (lun < 0) {
665 return (CAM_LUN_INVALID);
666 }
667 if (is_lun_enabled(isp, bus, lun)) {
668 return (CAM_LUN_ALRDY_ENA);
669 }
3aed1355 670 new = malloc(sizeof (tstate_t), M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
671 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
672 xpt_path_target_id(path), xpt_path_lun_id(path));
673 if (status != CAM_REQ_CMP) {
674 free(new, M_DEVBUF);
675 return (status);
676 }
677 new->bus = bus;
678 new->lun = lun;
679 SLIST_INIT(&new->atios);
680 SLIST_INIT(&new->inots);
681 new->hold = 1;
682
683 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
684 tptr = isp->isp_osinfo.lun_hash[hfx];
685 if (tptr == NULL) {
686 isp->isp_osinfo.lun_hash[hfx] = new;
687 } else {
688 while (tptr->next)
689 tptr = tptr->next;
690 tptr->next = new;
691 }
692 *rslt = new;
693 return (CAM_REQ_CMP);
694}
695
696static INLINE void
697destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
698{
699 int hfx;
700 tstate_t *lw, *pw;
701
702 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
703 if (tptr->hold) {
704 return;
705 }
706 pw = isp->isp_osinfo.lun_hash[hfx];
707 if (pw == NULL) {
708 return;
709 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
710 isp->isp_osinfo.lun_hash[hfx] = pw->next;
711 } else {
712 lw = pw;
713 pw = lw->next;
714 while (pw) {
715 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
716 lw->next = pw->next;
717 break;
718 }
719 lw = pw;
720 pw = pw->next;
721 }
722 if (pw == NULL) {
723 return;
724 }
725 }
726 free(tptr, M_DEVBUF);
727}
728
729/*
730 * we enter with our locks held.
731 */
732static void
733isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
734{
735 const char lfmt[] = "Lun now %sabled for target mode on channel %d";
736 struct ccb_en_lun *cel = &ccb->cel;
737 tstate_t *tptr;
738 u_int16_t rstat;
739 int bus, cmd, av, wildcard;
740 lun_id_t lun;
741 target_id_t tgt;
742
743
744 bus = XS_CHANNEL(ccb) & 0x1;
745 tgt = ccb->ccb_h.target_id;
746 lun = ccb->ccb_h.target_lun;
747
748 /*
749 * Do some sanity checking first.
750 */
751
752 if ((lun != CAM_LUN_WILDCARD) &&
753 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
754 ccb->ccb_h.status = CAM_LUN_INVALID;
755 return;
756 }
757
758 if (IS_SCSI(isp)) {
759 sdparam *sdp = isp->isp_param;
760 sdp += bus;
761 if (tgt != CAM_TARGET_WILDCARD &&
762 tgt != sdp->isp_initiator_id) {
763 ccb->ccb_h.status = CAM_TID_INVALID;
764 return;
765 }
766 } else {
767 if (tgt != CAM_TARGET_WILDCARD &&
768 tgt != FCPARAM(isp)->isp_iid) {
769 ccb->ccb_h.status = CAM_TID_INVALID;
770 return;
771 }
772 /*
773 * This is as a good a place as any to check f/w capabilities.
774 */
775 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
776 isp_prt(isp, ISP_LOGERR,
777 "firmware does not support target mode");
778 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
779 return;
780 }
781 /*
782 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
783 * XXX: dorks with our already fragile enable/disable code.
784 */
785 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
786 isp_prt(isp, ISP_LOGERR,
787 "firmware not SCCLUN capable");
788 }
789 }
790
791 if (tgt == CAM_TARGET_WILDCARD) {
792 if (lun == CAM_LUN_WILDCARD) {
793 wildcard = 1;
794 } else {
795 ccb->ccb_h.status = CAM_LUN_INVALID;
796 return;
797 }
798 } else {
799 wildcard = 0;
800 }
801
802 /*
803 * Next check to see whether this is a target/lun wildcard action.
804 *
805 * If so, we know that we can accept commands for luns that haven't
806 * been enabled yet and send them upstream. Otherwise, we have to
807 * handle them locally (if we see them at all).
808 */
809
810 if (wildcard) {
811 tptr = &isp->isp_osinfo.tsdflt[bus];
812 if (cel->enable) {
813 if (isp->isp_osinfo.tmflags[bus] &
814 TM_WILDCARD_ENABLED) {
815 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
816 return;
817 }
818 ccb->ccb_h.status =
819 xpt_create_path(&tptr->owner, NULL,
820 xpt_path_path_id(ccb->ccb_h.path),
821 xpt_path_target_id(ccb->ccb_h.path),
822 xpt_path_lun_id(ccb->ccb_h.path));
823 if (ccb->ccb_h.status != CAM_REQ_CMP) {
824 return;
825 }
826 SLIST_INIT(&tptr->atios);
827 SLIST_INIT(&tptr->inots);
828 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
829 } else {
830 if ((isp->isp_osinfo.tmflags[bus] &
831 TM_WILDCARD_ENABLED) == 0) {
832 ccb->ccb_h.status = CAM_REQ_CMP;
833 return;
834 }
835 if (tptr->hold) {
836 ccb->ccb_h.status = CAM_SCSI_BUSY;
837 return;
838 }
839 xpt_free_path(tptr->owner);
840 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
841 }
842 }
843
844 /*
845 * Now check to see whether this bus needs to be
846 * enabled/disabled with respect to target mode.
847 */
848 av = bus << 31;
849 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
850 av |= ENABLE_TARGET_FLAG;
851 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
852 if (av) {
853 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
854 if (wildcard) {
855 isp->isp_osinfo.tmflags[bus] &=
856 ~TM_WILDCARD_ENABLED;
857 xpt_free_path(tptr->owner);
858 }
859 return;
860 }
861 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
862 isp_prt(isp, ISP_LOGINFO,
863 "Target Mode enabled on channel %d", bus);
864 } else if (cel->enable == 0 &&
865 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
866 if (are_any_luns_enabled(isp, bus)) {
867 ccb->ccb_h.status = CAM_SCSI_BUSY;
868 return;
869 }
870 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
871 if (av) {
872 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
873 return;
874 }
875 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
876 isp_prt(isp, ISP_LOGINFO,
877 "Target Mode disabled on channel %d", bus);
878 }
879
880 if (wildcard) {
881 ccb->ccb_h.status = CAM_REQ_CMP;
882 return;
883 }
884
885 if (cel->enable) {
886 ccb->ccb_h.status =
887 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
888 if (ccb->ccb_h.status != CAM_REQ_CMP) {
889 return;
890 }
891 } else {
892 tptr = get_lun_statep(isp, bus, lun);
893 if (tptr == NULL) {
894 ccb->ccb_h.status = CAM_LUN_INVALID;
895 return;
896 }
897 }
898
899 if (isp_psema_sig_rqe(isp, bus)) {
900 rls_lun_statep(isp, tptr);
901 if (cel->enable)
902 destroy_lun_state(isp, tptr);
903 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
904 return;
905 }
906
907 if (cel->enable) {
908 u_int32_t seq = isp->isp_osinfo.rollinfo++;
909 int c, n, ulun = lun;
910
911 cmd = RQSTYPE_ENABLE_LUN;
912 c = DFLT_CMND_CNT;
913 n = DFLT_INOT_CNT;
914 if (IS_FC(isp) && lun != 0) {
915 cmd = RQSTYPE_MODIFY_LUN;
916 n = 0;
917 /*
918 * For SCC firmware, we only deal with setting
919 * (enabling or modifying) lun 0.
920 */
921 ulun = 0;
922 }
923 rstat = LUN_ERR;
924 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
925 xpt_print_path(ccb->ccb_h.path);
926 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
927 goto out;
928 }
929 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
930 xpt_print_path(ccb->ccb_h.path);
931 isp_prt(isp, ISP_LOGERR,
932 "wait for ENABLE/MODIFY LUN timed out");
933 goto out;
934 }
935 rstat = isp->isp_osinfo.rstatus[bus];
936 if (rstat != LUN_OK) {
937 xpt_print_path(ccb->ccb_h.path);
938 isp_prt(isp, ISP_LOGERR,
939 "ENABLE/MODIFY LUN returned 0x%x", rstat);
940 goto out;
941 }
942 } else {
943 int c, n, ulun = lun;
944 u_int32_t seq;
945
946 rstat = LUN_ERR;
947 seq = isp->isp_osinfo.rollinfo++;
948 cmd = -RQSTYPE_MODIFY_LUN;
949
950 c = DFLT_CMND_CNT;
951 n = DFLT_INOT_CNT;
952 if (IS_FC(isp) && lun != 0) {
953 n = 0;
954 /*
955 * For SCC firmware, we only deal with setting
956 * (enabling or modifying) lun 0.
957 */
958 ulun = 0;
959 }
960 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
961 xpt_print_path(ccb->ccb_h.path);
962 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
963 goto out;
964 }
965 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
966 xpt_print_path(ccb->ccb_h.path);
967 isp_prt(isp, ISP_LOGERR,
968 "wait for MODIFY LUN timed out");
969 goto out;
970 }
971 rstat = isp->isp_osinfo.rstatus[bus];
972 if (rstat != LUN_OK) {
973 xpt_print_path(ccb->ccb_h.path);
974 isp_prt(isp, ISP_LOGERR,
975 "MODIFY LUN returned 0x%x", rstat);
976 goto out;
977 }
978 if (IS_FC(isp) && lun) {
979 goto out;
980 }
981
982 seq = isp->isp_osinfo.rollinfo++;
983
984 rstat = LUN_ERR;
985 cmd = -RQSTYPE_ENABLE_LUN;
986 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
987 xpt_print_path(ccb->ccb_h.path);
988 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
989 goto out;
990 }
991 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
992 xpt_print_path(ccb->ccb_h.path);
993 isp_prt(isp, ISP_LOGERR,
994 "wait for DISABLE LUN timed out");
995 goto out;
996 }
997 rstat = isp->isp_osinfo.rstatus[bus];
998 if (rstat != LUN_OK) {
999 xpt_print_path(ccb->ccb_h.path);
1000 isp_prt(isp, ISP_LOGWARN,
1001 "DISABLE LUN returned 0x%x", rstat);
1002 goto out;
1003 }
1004 if (are_any_luns_enabled(isp, bus) == 0) {
1005 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1006 if (av) {
1007 isp_prt(isp, ISP_LOGWARN,
1008 "disable target mode on channel %d failed",
1009 bus);
1010 goto out;
1011 }
1012 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1013 xpt_print_path(ccb->ccb_h.path);
1014 isp_prt(isp, ISP_LOGINFO,
1015 "Target Mode disabled on channel %d", bus);
1016 }
1017 }
1018
1019out:
1020 isp_vsema_rqe(isp, bus);
1021
1022 if (rstat != LUN_OK) {
1023 xpt_print_path(ccb->ccb_h.path);
1024 isp_prt(isp, ISP_LOGWARN,
1025 "lun %sable failed", (cel->enable) ? "en" : "dis");
1026 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1027 rls_lun_statep(isp, tptr);
1028 if (cel->enable)
1029 destroy_lun_state(isp, tptr);
1030 } else {
1031 xpt_print_path(ccb->ccb_h.path);
1032 isp_prt(isp, ISP_LOGINFO, lfmt,
1033 (cel->enable) ? "en" : "dis", bus);
1034 rls_lun_statep(isp, tptr);
1035 if (cel->enable == 0) {
1036 destroy_lun_state(isp, tptr);
1037 }
1038 ccb->ccb_h.status = CAM_REQ_CMP;
1039 }
1040}
1041
1042static cam_status
1043isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1044{
1045 tstate_t *tptr;
1046 struct ccb_hdr_slist *lp;
1047 struct ccb_hdr *curelm;
1048 int found;
1049 union ccb *accb = ccb->cab.abort_ccb;
1050
1051 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1052 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1053 ((fcparam *) isp->isp_param)->isp_loopid)) {
1054 return (CAM_PATH_INVALID);
1055 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1056 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1057 return (CAM_PATH_INVALID);
1058 }
1059 }
1060 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1061 if (tptr == NULL) {
1062 return (CAM_PATH_INVALID);
1063 }
1064 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1065 lp = &tptr->atios;
1066 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1067 lp = &tptr->inots;
1068 } else {
1069 rls_lun_statep(isp, tptr);
1070 return (CAM_UA_ABORT);
1071 }
1072 curelm = SLIST_FIRST(lp);
1073 found = 0;
1074 if (curelm == &accb->ccb_h) {
1075 found = 1;
1076 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1077 } else {
1078 while(curelm != NULL) {
1079 struct ccb_hdr *nextelm;
1080
1081 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1082 if (nextelm == &accb->ccb_h) {
1083 found = 1;
1084 SLIST_NEXT(curelm, sim_links.sle) =
1085 SLIST_NEXT(nextelm, sim_links.sle);
1086 break;
1087 }
1088 curelm = nextelm;
1089 }
1090 }
1091 rls_lun_statep(isp, tptr);
1092 if (found) {
1093 accb->ccb_h.status = CAM_REQ_ABORTED;
1094 return (CAM_REQ_CMP);
1095 }
1096 return(CAM_PATH_INVALID);
1097}
1098
1099static cam_status
1100isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1101{
1102 void *qe;
1103 struct ccb_scsiio *cso = &ccb->csio;
1104 u_int16_t *hp, save_handle;
1105 u_int16_t nxti, optr;
1106 u_int8_t local[QENTRY_LEN];
1107
1108
1109 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1110 xpt_print_path(ccb->ccb_h.path);
1111 printf("Request Queue Overflow in isp_target_start_ctio\n");
1112 return (CAM_RESRC_UNAVAIL);
1113 }
1114 bzero(local, QENTRY_LEN);
1115
1116 /*
1117 * We're either moving data or completing a command here.
1118 */
1119
1120 if (IS_FC(isp)) {
1121 atio_private_data_t *atp;
1122 ct2_entry_t *cto = (ct2_entry_t *) local;
1123
1124 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1125 cto->ct_header.rqs_entry_count = 1;
1126 cto->ct_iid = cso->init_id;
1127 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1128 cto->ct_lun = ccb->ccb_h.target_lun;
1129 }
1130
1131 atp = isp_get_atpd(isp, cso->tag_id);
1132 if (atp == NULL) {
1133 isp_prt(isp, ISP_LOGERR,
1134 "cannot find private data adjunct for tag %x",
1135 cso->tag_id);
1136 return (-1);
1137 }
1138
1139 cto->ct_rxid = cso->tag_id;
1140 if (cso->dxfer_len == 0) {
1141 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1142 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1143 cto->ct_flags |= CT2_SENDSTATUS;
1144 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1145 cto->ct_resid =
1146 atp->orig_datalen - atp->bytes_xfered;
1147 if (cto->ct_resid < 0) {
1148 cto->rsp.m1.ct_scsi_status |=
1149 CT2_DATA_OVER;
1150 } else if (cto->ct_resid > 0) {
1151 cto->rsp.m1.ct_scsi_status |=
1152 CT2_DATA_UNDER;
1153 }
1154 }
1155 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1156 int m = min(cso->sense_len, MAXRESPLEN);
1157 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1158 cto->rsp.m1.ct_senselen = m;
1159 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1160 }
1161 } else {
1162 cto->ct_flags |= CT2_FLAG_MODE0;
1163 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1164 cto->ct_flags |= CT2_DATA_IN;
1165 } else {
1166 cto->ct_flags |= CT2_DATA_OUT;
1167 }
1168 cto->ct_reloff = atp->bytes_xfered;
1169 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1170 cto->ct_flags |= CT2_SENDSTATUS;
1171 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1172 cto->ct_resid =
1173 atp->orig_datalen -
1174 (atp->bytes_xfered + cso->dxfer_len);
1175 if (cto->ct_resid < 0) {
1176 cto->rsp.m0.ct_scsi_status |=
1177 CT2_DATA_OVER;
1178 } else if (cto->ct_resid > 0) {
1179 cto->rsp.m0.ct_scsi_status |=
1180 CT2_DATA_UNDER;
1181 }
1182 } else {
1183 atp->last_xframt = cso->dxfer_len;
1184 }
1185 /*
1186 * If we're sending data and status back together,
1187 * we can't also send back sense data as well.
1188 */
1189 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1190 }
1191
1192 if (cto->ct_flags & CT2_SENDSTATUS) {
1193 isp_prt(isp, ISP_LOGTDEBUG0,
1194 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1195 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1196 cso->dxfer_len, cto->ct_resid);
1197 cto->ct_flags |= CT2_CCINCR;
1198 atp->state = ATPD_STATE_LAST_CTIO;
1199 } else
1200 atp->state = ATPD_STATE_CTIO;
1201 cto->ct_timeout = 10;
1202 hp = &cto->ct_syshandle;
1203 } else {
1204 ct_entry_t *cto = (ct_entry_t *) local;
1205
1206 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1207 cto->ct_header.rqs_entry_count = 1;
1208 cto->ct_iid = cso->init_id;
1209 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1210 cto->ct_tgt = ccb->ccb_h.target_id;
1211 cto->ct_lun = ccb->ccb_h.target_lun;
1212 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1213 if (AT_HAS_TAG(cso->tag_id)) {
1214 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1215 cto->ct_flags |= CT_TQAE;
1216 }
1217 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1218 cto->ct_flags |= CT_NODISC;
1219 }
1220 if (cso->dxfer_len == 0) {
1221 cto->ct_flags |= CT_NO_DATA;
1222 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1223 cto->ct_flags |= CT_DATA_IN;
1224 } else {
1225 cto->ct_flags |= CT_DATA_OUT;
1226 }
1227 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1228 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1229 cto->ct_scsi_status = cso->scsi_status;
1230 cto->ct_resid = cso->resid;
1231 isp_prt(isp, ISP_LOGTDEBUG0,
1232 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1233 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1234 cso->tag_id);
1235 }
1236 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1237 cto->ct_timeout = 10;
1238 hp = &cto->ct_syshandle;
1239 }
1240
1241 if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1242 xpt_print_path(ccb->ccb_h.path);
1243 printf("No XFLIST pointers for isp_target_start_ctio\n");
1244 return (CAM_RESRC_UNAVAIL);
1245 }
1246
1247
1248 /*
1249 * Call the dma setup routines for this entry (and any subsequent
1250 * CTIOs) if there's data to move, and then tell the f/w it's got
1251 * new things to play with. As with isp_start's usage of DMA setup,
1252 * any swizzling is done in the machine dependent layer. Because
1253 * of this, we put the request onto the queue area first in native
1254 * format.
1255 */
1256
1257 save_handle = *hp;
1258
1259 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1260 case CMD_QUEUED:
1261 ISP_ADD_REQUEST(isp, nxti);
1262 return (CAM_REQ_INPROG);
1263
1264 case CMD_EAGAIN:
1265 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1266 isp_destroy_handle(isp, save_handle);
1267 return (CAM_RESRC_UNAVAIL);
1268
1269 default:
1270 isp_destroy_handle(isp, save_handle);
1271 return (XS_ERR(ccb));
1272 }
1273}
1274
1275static void
1276isp_refire_putback_atio(void *arg)
1277{
1278 int s = splcam();
1279 isp_target_putback_atio(arg);
1280 splx(s);
1281}
1282
1283static void
1284isp_target_putback_atio(union ccb *ccb)
1285{
1286 struct ispsoftc *isp;
1287 struct ccb_scsiio *cso;
1288 u_int16_t nxti, optr;
1289 void *qe;
1290
1291 isp = XS_ISP(ccb);
1292
1293 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1294 (void) timeout(isp_refire_putback_atio, ccb, 10);
1295 isp_prt(isp, ISP_LOGWARN,
1296 "isp_target_putback_atio: Request Queue Overflow");
1297 return;
1298 }
1299 bzero(qe, QENTRY_LEN);
1300 cso = &ccb->csio;
1301 if (IS_FC(isp)) {
1302 at2_entry_t local, *at = &local;
1303 MEMZERO(at, sizeof (at2_entry_t));
1304 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1305 at->at_header.rqs_entry_count = 1;
1306 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1307 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1308 } else {
1309 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1310 }
1311 at->at_status = CT_OK;
1312 at->at_rxid = cso->tag_id;
1313 at->at_iid = cso->ccb_h.target_id;
1314 isp_put_atio2(isp, at, qe);
1315 } else {
1316 at_entry_t local, *at = &local;
1317 MEMZERO(at, sizeof (at_entry_t));
1318 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1319 at->at_header.rqs_entry_count = 1;
1320 at->at_iid = cso->init_id;
1321 at->at_iid |= XS_CHANNEL(ccb) << 7;
1322 at->at_tgt = cso->ccb_h.target_id;
1323 at->at_lun = cso->ccb_h.target_lun;
1324 at->at_status = CT_OK;
1325 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1326 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1327 isp_put_atio(isp, at, qe);
1328 }
1329 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1330 ISP_ADD_REQUEST(isp, nxti);
1331 isp_complete_ctio(ccb);
1332}
1333
1334static void
1335isp_complete_ctio(union ccb *ccb)
1336{
1337 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1338 ccb->ccb_h.status |= CAM_REQ_CMP;
1339 }
1340 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1341 xpt_done(ccb);
1342}
1343
1344/*
1345 * Handle ATIO stuff that the generic code can't.
1346 * This means handling CDBs.
1347 */
1348
1349static int
1350isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1351{
1352 tstate_t *tptr;
1353 int status, bus, iswildcard;
1354 struct ccb_accept_tio *atiop;
1355
1356 /*
1357 * The firmware status (except for the QLTM_SVALID bit)
1358 * indicates why this ATIO was sent to us.
1359 *
1360 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1361 *
1362 * If the DISCONNECTS DISABLED bit is set in the flags field,
1363 * we're still connected on the SCSI bus.
1364 */
1365 status = aep->at_status;
1366 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1367 /*
1368 * Bus Phase Sequence error. We should have sense data
1369 * suggested by the f/w. I'm not sure quite yet what
1370 * to do about this for CAM.
1371 */
1372 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1373 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1374 return (0);
1375 }
1376 if ((status & ~QLTM_SVALID) != AT_CDB) {
1377 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1378 status);
1379 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1380 return (0);
1381 }
1382
1383 bus = GET_BUS_VAL(aep->at_iid);
1384 tptr = get_lun_statep(isp, bus, aep->at_lun);
1385 if (tptr == NULL) {
1386 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1387 iswildcard = 1;
1388 } else {
1389 iswildcard = 0;
1390 }
1391
1392 if (tptr == NULL) {
1393 /*
1394 * Because we can't autofeed sense data back with
1395 * a command for parallel SCSI, we can't give back
1396 * a CHECK CONDITION. We'll give back a BUSY status
1397 * instead. This works out okay because the only
1398 * time we should, in fact, get this, is in the
1399 * case that somebody configured us without the
1400 * blackhole driver, so they get what they deserve.
1401 */
1402 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1403 return (0);
1404 }
1405
1406 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1407 if (atiop == NULL) {
1408 /*
1409 * Because we can't autofeed sense data back with
1410 * a command for parallel SCSI, we can't give back
1411 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1412 * instead. This works out okay because the only time we
1413 * should, in fact, get this, is in the case that we've
1414 * run out of ATIOS.
1415 */
1416 xpt_print_path(tptr->owner);
1417 isp_prt(isp, ISP_LOGWARN,
1418 "no ATIOS for lun %d from initiator %d on channel %d",
1419 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1420 if (aep->at_flags & AT_TQAE)
1421 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1422 else
1423 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1424 rls_lun_statep(isp, tptr);
1425 return (0);
1426 }
1427 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1428 if (iswildcard) {
1429 atiop->ccb_h.target_id = aep->at_tgt;
1430 atiop->ccb_h.target_lun = aep->at_lun;
1431 }
1432 if (aep->at_flags & AT_NODISC) {
1433 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1434 } else {
1435 atiop->ccb_h.flags = 0;
1436 }
1437
1438 if (status & QLTM_SVALID) {
1439 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1440 atiop->sense_len = amt;
1441 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1442 } else {
1443 atiop->sense_len = 0;
1444 }
1445
1446 atiop->init_id = GET_IID_VAL(aep->at_iid);
1447 atiop->cdb_len = aep->at_cdblen;
1448 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1449 atiop->ccb_h.status = CAM_CDB_RECVD;
1450 /*
1451 * Construct a tag 'id' based upon tag value (which may be 0..255)
1452 * and the handle (which we have to preserve).
1453 */
1454 AT_MAKE_TAGID(atiop->tag_id, aep);
1455 if (aep->at_flags & AT_TQAE) {
1456 atiop->tag_action = aep->at_tag_type;
1457 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1458 }
1459 xpt_done((union ccb*)atiop);
1460 isp_prt(isp, ISP_LOGTDEBUG0,
1461 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1462 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1463 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1464 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1465 "nondisc" : "disconnecting");
1466 rls_lun_statep(isp, tptr);
1467 return (0);
1468}
1469
1470static int
1471isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1472{
1473 lun_id_t lun;
1474 tstate_t *tptr;
1475 struct ccb_accept_tio *atiop;
1476 atio_private_data_t *atp;
1477
1478 /*
1479 * The firmware status (except for the QLTM_SVALID bit)
1480 * indicates why this ATIO was sent to us.
1481 *
1482 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1483 */
1484 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1485 isp_prt(isp, ISP_LOGWARN,
1486 "bogus atio (0x%x) leaked to platform", aep->at_status);
1487 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1488 return (0);
1489 }
1490
1491 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1492 lun = aep->at_scclun;
1493 } else {
1494 lun = aep->at_lun;
1495 }
1496 tptr = get_lun_statep(isp, 0, lun);
1497 if (tptr == NULL) {
1498 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1499 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1500 }
1501
1502 if (tptr == NULL) {
1503 /*
1504 * What we'd like to know is whether or not we have a listener
1505 * upstream that really hasn't configured yet. If we do, then
1506 * we can give a more sensible reply here. If not, then we can
1507 * reject this out of hand.
1508 *
1509 * Choices for what to send were
1510 *
1511 * Not Ready, Unit Not Self-Configured Yet
1512 * (0x2,0x3e,0x00)
1513 *
1514 * for the former and
1515 *
1516 * Illegal Request, Logical Unit Not Supported
1517 * (0x5,0x25,0x00)
1518 *
1519 * for the latter.
1520 *
1521 * We used to decide whether there was at least one listener
1522 * based upon whether the black hole driver was configured.
1523 * However, recent config(8) changes have made this hard to do
1524 * at this time.
1525 *
1526 */
1527 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1528 return (0);
1529 }
1530
1531 atp = isp_get_atpd(isp, 0);
1532 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1533 if (atiop == NULL || atp == NULL) {
1534 /*
1535 * Because we can't autofeed sense data back with
1536 * a command for parallel SCSI, we can't give back
1537 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1538 * instead. This works out okay because the only time we
1539 * should, in fact, get this, is in the case that we've
1540 * run out of ATIOS.
1541 */
1542 xpt_print_path(tptr->owner);
1543 isp_prt(isp, ISP_LOGWARN,
1544 "no %s for lun %d from initiator %d",
1545 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1546 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1547 rls_lun_statep(isp, tptr);
1548 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1549 return (0);
1550 }
1551 atp->state = ATPD_STATE_ATIO;
1552 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1553 tptr->atio_count--;
1554 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1555 lun, tptr->atio_count);
1556
1557 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1558 atiop->ccb_h.target_id =
1559 ((fcparam *)isp->isp_param)->isp_loopid;
1560 atiop->ccb_h.target_lun = lun;
1561 }
1562 /*
1563 * We don't get 'suggested' sense data as we do with SCSI cards.
1564 */
1565 atiop->sense_len = 0;
1566
1567 atiop->init_id = aep->at_iid;
1568 atiop->cdb_len = ATIO2_CDBLEN;
1569 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1570 atiop->ccb_h.status = CAM_CDB_RECVD;
1571 atiop->tag_id = aep->at_rxid;
1572 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1573 case ATIO2_TC_ATTR_SIMPLEQ:
1574 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1575 break;
1576 case ATIO2_TC_ATTR_HEADOFQ:
1577 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1578 break;
1579 case ATIO2_TC_ATTR_ORDERED:
1580 atiop->tag_action = MSG_ORDERED_Q_TAG;
1581 break;
1582 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1583 case ATIO2_TC_ATTR_UNTAGGED:
1584 default:
1585 atiop->tag_action = 0;
1586 break;
1587 }
1588 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1589
1590 atp->tag = atiop->tag_id;
1591 atp->lun = lun;
1592 atp->orig_datalen = aep->at_datalen;
1593 atp->last_xframt = 0;
1594 atp->bytes_xfered = 0;
1595 atp->state = ATPD_STATE_CAM;
1596 xpt_done((union ccb*)atiop);
1597
1598 isp_prt(isp, ISP_LOGTDEBUG0,
1599 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1600 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1601 lun, aep->at_taskflags, aep->at_datalen);
1602 rls_lun_statep(isp, tptr);
1603 return (0);
1604}
1605
1606static int
1607isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1608{
1609 union ccb *ccb;
1610 int sentstatus, ok, notify_cam, resid = 0;
1611 u_int16_t tval;
1612
1613 /*
1614 * CTIO and CTIO2 are close enough....
1615 */
1616
1617 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1618 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1619 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1620
1621 if (IS_FC(isp)) {
1622 ct2_entry_t *ct = arg;
1623 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1624 if (atp == NULL) {
1625 isp_prt(isp, ISP_LOGERR,
1626 "cannot find adjunct for %x after I/O",
1627 ct->ct_rxid);
1628 return (0);
1629 }
1630 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1631 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1632 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1633 ccb->ccb_h.status |= CAM_SENT_SENSE;
1634 }
1635 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1636 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1637 resid = ct->ct_resid;
1638 atp->bytes_xfered += (atp->last_xframt - resid);
1639 atp->last_xframt = 0;
1640 }
1641 if (sentstatus || !ok) {
1642 atp->tag = 0;
1643 }
1644 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1645 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1646 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1647 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1648 resid, sentstatus? "FIN" : "MID");
1649 tval = ct->ct_rxid;
1650
1651 /* XXX: should really come after isp_complete_ctio */
1652 atp->state = ATPD_STATE_PDON;
1653 } else {
1654 ct_entry_t *ct = arg;
1655 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1656 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1657 /*
1658 * We *ought* to be able to get back to the original ATIO
1659 * here, but for some reason this gets lost. It's just as
1660 * well because it's squirrelled away as part of periph
1661 * private data.
1662 *
1663 * We can live without it as long as we continue to use
1664 * the auto-replenish feature for CTIOs.
1665 */
1666 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1667 if (ct->ct_status & QLTM_SVALID) {
1668 char *sp = (char *)ct;
1669 sp += CTIO_SENSE_OFFSET;
1670 ccb->csio.sense_len =
1671 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1672 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1673 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1674 }
1675 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1676 resid = ct->ct_resid;
1677 }
1678 isp_prt(isp, ISP_LOGTDEBUG0,
1679 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1680 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1681 ct->ct_status, ct->ct_flags, resid,
1682 sentstatus? "FIN" : "MID");
1683 tval = ct->ct_fwhandle;
1684 }
1685 ccb->csio.resid += resid;
1686
1687 /*
1688 * We're here either because intermediate data transfers are done
1689 * and/or the final status CTIO (which may have joined with a
1690 * Data Transfer) is done.
1691 *
1692 * In any case, for this platform, the upper layers figure out
1693 * what to do next, so all we do here is collect status and
1694 * pass information along. Any DMA handles have already been
1695 * freed.
1696 */
1697 if (notify_cam == 0) {
1698 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1699 return (0);
1700 }
1701
1702 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1703 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1704
1705 if (!ok) {
1706 isp_target_putback_atio(ccb);
1707 } else {
1708 isp_complete_ctio(ccb);
1709
1710 }
1711 return (0);
1712}
1713
1714static int
1715isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1716{
1717 return (0); /* XXXX */
1718}
1719
1720static int
1721isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1722{
1723
1724 switch (inp->in_status) {
1725 case IN_PORT_LOGOUT:
1726 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1727 inp->in_iid);
1728 break;
1729 case IN_PORT_CHANGED:
1730 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1731 inp->in_iid);
1732 break;
1733 case IN_GLOBAL_LOGO:
1734 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1735 break;
1736 case IN_ABORT_TASK:
1737 {
1738 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1739 struct ccb_immed_notify *inot = NULL;
1740
1741 if (atp) {
1742 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1743 if (tptr) {
1744 inot = (struct ccb_immed_notify *)
1745 SLIST_FIRST(&tptr->inots);
1746 if (inot) {
1747 SLIST_REMOVE_HEAD(&tptr->inots,
1748 sim_links.sle);
1749 }
1750 }
1751 isp_prt(isp, ISP_LOGWARN,
1752 "abort task RX_ID %x IID %d state %d",
1753 inp->in_seqid, inp->in_iid, atp->state);
1754 } else {
1755 isp_prt(isp, ISP_LOGWARN,
1756 "abort task RX_ID %x from iid %d, state unknown",
1757 inp->in_seqid, inp->in_iid);
1758 }
1759 if (inot) {
1760 inot->initiator_id = inp->in_iid;
1761 inot->sense_len = 0;
1762 inot->message_args[0] = MSG_ABORT_TAG;
1763 inot->message_args[1] = inp->in_seqid & 0xff;
1764 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1765 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1766 xpt_done((union ccb *)inot);
1767 }
1768 break;
1769 }
1770 default:
1771 break;
1772 }
1773 return (0);
1774}
1775#endif
1776
1777static void
1778isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1779{
1780 struct cam_sim *sim;
1781 struct ispsoftc *isp;
1782
1783 sim = (struct cam_sim *)cbarg;
1784 isp = (struct ispsoftc *) cam_sim_softc(sim);
1785 switch (code) {
1786 case AC_LOST_DEVICE:
1787 if (IS_SCSI(isp)) {
1788 u_int16_t oflags, nflags;
1789 sdparam *sdp = isp->isp_param;
1790 int tgt;
1791
1792 tgt = xpt_path_target_id(path);
1793 if (tgt >= 0) {
1794 sdp += cam_sim_bus(sim);
1795 ISP_LOCK(isp);
1796 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1797#ifndef ISP_TARGET_MODE
1798 nflags &= DPARM_SAFE_DFLT;
1799 if (isp->isp_loaded_fw) {
1800 nflags |= DPARM_NARROW | DPARM_ASYNC;
1801 }
1802#else
1803 nflags = DPARM_DEFAULT;
1804#endif
1805 oflags = sdp->isp_devparam[tgt].goal_flags;
1806 sdp->isp_devparam[tgt].goal_flags = nflags;
1807 sdp->isp_devparam[tgt].dev_update = 1;
1808 isp->isp_update |= (1 << cam_sim_bus(sim));
1809 (void) isp_control(isp,
1810 ISPCTL_UPDATE_PARAMS, NULL);
1811 sdp->isp_devparam[tgt].goal_flags = oflags;
1812 ISP_UNLOCK(isp);
1813 }
1814 }
1815 break;
1816 default:
1817 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1818 break;
1819 }
1820}
1821
1822static void
1823isp_poll(struct cam_sim *sim)
1824{
1825 struct ispsoftc *isp = cam_sim_softc(sim);
1826 u_int16_t isr, sema, mbox;
1827
1828 ISP_LOCK(isp);
1829 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1830 isp_intr(isp, isr, sema, mbox);
1831 }
1832 ISP_UNLOCK(isp);
1833}
1834
1835
1836static void
1837isp_watchdog(void *arg)
1838{
1839 XS_T *xs = arg;
1840 struct ispsoftc *isp = XS_ISP(xs);
1841 u_int32_t handle;
1842 int iok;
1843
1844 /*
1845 * We've decided this command is dead. Make sure we're not trying
1846 * to kill a command that's already dead by getting it's handle and
1847 * and seeing whether it's still alive.
1848 */
1849 ISP_LOCK(isp);
1850 iok = isp->isp_osinfo.intsok;
1851 isp->isp_osinfo.intsok = 0;
1852 handle = isp_find_handle(isp, xs);
1853 if (handle) {
1854 u_int16_t isr, sema, mbox;
1855
1856 if (XS_CMD_DONE_P(xs)) {
1857 isp_prt(isp, ISP_LOGDEBUG1,
1858 "watchdog found done cmd (handle 0x%x)", handle);
1859 ISP_UNLOCK(isp);
1860 return;
1861 }
1862
1863 if (XS_CMD_WDOG_P(xs)) {
1864 isp_prt(isp, ISP_LOGDEBUG2,
1865 "recursive watchdog (handle 0x%x)", handle);
1866 ISP_UNLOCK(isp);
1867 return;
1868 }
1869
1870 XS_CMD_S_WDOG(xs);
1871 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1872 isp_intr(isp, isr, sema, mbox);
1873 }
1874 if (XS_CMD_DONE_P(xs)) {
1875 isp_prt(isp, ISP_LOGDEBUG2,
1876 "watchdog cleanup for handle 0x%x", handle);
1877 xpt_done((union ccb *) xs);
1878 } else if (XS_CMD_GRACE_P(xs)) {
1879 /*
1880 * Make sure the command is *really* dead before we
1881 * release the handle (and DMA resources) for reuse.
1882 */
1883 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1884
1885 /*
1886 * After this point, the comamnd is really dead.
1887 */
1888 if (XS_XFRLEN(xs)) {
1889 ISP_DMAFREE(isp, xs, handle);
1890 }
1891 isp_destroy_handle(isp, handle);
1892 xpt_print_path(xs->ccb_h.path);
1893 isp_prt(isp, ISP_LOGWARN,
1894 "watchdog timeout for handle 0x%x", handle);
1895 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1896 XS_CMD_C_WDOG(xs);
1897 isp_done(xs);
1898 } else {
1899 u_int16_t nxti, optr;
1900 ispreq_t local, *mp= &local, *qe;
1901
1902 XS_CMD_C_WDOG(xs);
1903 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1904 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1905 ISP_UNLOCK(isp);
1906 return;
1907 }
1908 XS_CMD_S_GRACE(xs);
1909 MEMZERO((void *) mp, sizeof (*mp));
1910 mp->req_header.rqs_entry_count = 1;
1911 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1912 mp->req_modifier = SYNC_ALL;
1913 mp->req_target = XS_CHANNEL(xs) << 7;
1914 isp_put_request(isp, mp, qe);
1915 ISP_ADD_REQUEST(isp, nxti);
1916 }
1917 } else {
1918 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1919 }
1920 isp->isp_osinfo.intsok = iok;
1921 ISP_UNLOCK(isp);
1922}
1923
1924static void
1925isp_kthread(void *arg)
1926{
1927 struct ispsoftc *isp = arg;
1928 int s;
1929
1930 s = splcam();
1931 isp->isp_osinfo.intsok = 1;
1932
1933 /*
1934 * The first loop is for our usage where we have yet to have
1935 * gotten good fibre channel state.
1936 */
1937 for (;;) {
1938 int wasfrozen;
1939
1940 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1941 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1942 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1943 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1944 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1945 if (FCPARAM(isp)->loop_seen_once == 0 ||
1946 isp->isp_osinfo.ktmature == 0) {
1947 break;
1948 }
1949 }
377d4740 1950 tsleep(isp_kthread, 0, "isp_fcthrd", hz);
984263bc
MD
1951
1952 }
1953
1954 /*
1955 * Even if we didn't get good loop state we may be
1956 * unfreezing the SIMQ so that we can kill off
1957 * commands (if we've never seen loop before, for example).
1958 */
1959 isp->isp_osinfo.ktmature = 1;
1960 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1961 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1962 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1963 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1964 ISPLOCK_2_CAMLOCK(isp);
1965 xpt_release_simq(isp->isp_sim, 1);
1966 CAMLOCK_2_ISPLOCK(isp);
1967 }
377d4740 1968 tsleep(&isp->isp_osinfo.kthread, 0, "isp_fc_worker", 0);
984263bc
MD
1969 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1970 }
1971}
1972
1973static void
1974isp_action(struct cam_sim *sim, union ccb *ccb)
1975{
1976 int bus, tgt, error;
1977 struct ispsoftc *isp;
1978 struct ccb_trans_settings *cts;
1979
1980 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1981
1982 isp = (struct ispsoftc *)cam_sim_softc(sim);
1983 ccb->ccb_h.sim_priv.entries[0].field = 0;
1984 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1985 if (isp->isp_state != ISP_RUNSTATE &&
1986 ccb->ccb_h.func_code == XPT_SCSI_IO) {
1987 CAMLOCK_2_ISPLOCK(isp);
1988 isp_init(isp);
1989 if (isp->isp_state != ISP_INITSTATE) {
1990 ISP_UNLOCK(isp);
1991 /*
1992 * Lie. Say it was a selection timeout.
1993 */
1994 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1995 xpt_freeze_devq(ccb->ccb_h.path, 1);
1996 xpt_done(ccb);
1997 return;
1998 }
1999 isp->isp_state = ISP_RUNSTATE;
2000 ISPLOCK_2_CAMLOCK(isp);
2001 }
2002 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2003
2004
2005 switch (ccb->ccb_h.func_code) {
2006 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2007 /*
2008 * Do a couple of preliminary checks...
2009 */
2010 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2011 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2012 ccb->ccb_h.status = CAM_REQ_INVALID;
2013 xpt_done(ccb);
2014 break;
2015 }
2016 }
2017#ifdef DIAGNOSTIC
2018 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2019 ccb->ccb_h.status = CAM_PATH_INVALID;
2020 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2021 ccb->ccb_h.status = CAM_PATH_INVALID;
2022 }
2023 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2024 isp_prt(isp, ISP_LOGERR,
2025 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2026 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2027 xpt_done(ccb);
2028 break;
2029 }
2030#endif
2031 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2032 CAMLOCK_2_ISPLOCK(isp);
2033 error = isp_start((XS_T *) ccb);
2034 switch (error) {
2035 case CMD_QUEUED:
2036 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2037 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2038 u_int64_t ticks = (u_int64_t) hz;
2039 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2040 ticks = 60 * 1000 * ticks;
2041 else
2042 ticks = ccb->ccb_h.timeout * hz;
2043 ticks = ((ticks + 999) / 1000) + hz + hz;
2044 if (ticks >= 0x80000000) {
2045 isp_prt(isp, ISP_LOGERR,
2046 "timeout overflow");
2047 ticks = 0x7fffffff;
2048 }
2049 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2050 (caddr_t)ccb, (int)ticks);
2051 } else {
2052 callout_handle_init(&ccb->ccb_h.timeout_ch);
2053 }
2054 ISPLOCK_2_CAMLOCK(isp);
2055 break;
2056 case CMD_RQLATER:
2057 /*
2058 * This can only happen for Fibre Channel
2059 */
2060 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2061 if (FCPARAM(isp)->loop_seen_once == 0 &&
2062 isp->isp_osinfo.ktmature) {
2063 ISPLOCK_2_CAMLOCK(isp);
2064 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2065 xpt_done(ccb);
2066 break;
2067 }
41c20dac 2068 wakeup(&isp->isp_osinfo.kthread);
984263bc
MD
2069 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2070 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2071 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2072 ISPLOCK_2_CAMLOCK(isp);
2073 xpt_done(ccb);
2074 break;
2075 case CMD_EAGAIN:
2076 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2077 ISPLOCK_2_CAMLOCK(isp);
2078 xpt_done(ccb);
2079 break;
2080 case CMD_COMPLETE:
2081 isp_done((struct ccb_scsiio *) ccb);
2082 ISPLOCK_2_CAMLOCK(isp);
2083 break;
2084 default:
2085 isp_prt(isp, ISP_LOGERR,
2086 "What's this? 0x%x at %d in file %s",
2087 error, __LINE__, __FILE__);
2088 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2089 xpt_done(ccb);
2090 ISPLOCK_2_CAMLOCK(isp);
2091 }
2092 break;
2093
2094#ifdef ISP_TARGET_MODE
2095 case XPT_EN_LUN: /* Enable LUN as a target */
2096 {
2097 int iok;
2098 CAMLOCK_2_ISPLOCK(isp);
2099 iok = isp->isp_osinfo.intsok;
2100 isp->isp_osinfo.intsok = 0;
2101 isp_en_lun(isp, ccb);
2102 isp->isp_osinfo.intsok = iok;
2103 ISPLOCK_2_CAMLOCK(isp);
2104 xpt_done(ccb);
2105 break;
2106 }
2107 case XPT_NOTIFY_ACK: /* recycle notify ack */
2108 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2109 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2110 {
2111 tstate_t *tptr =
2112 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2113 if (tptr == NULL) {
2114 ccb->ccb_h.status = CAM_LUN_INVALID;
2115 xpt_done(ccb);
2116 break;
2117 }
2118 ccb->ccb_h.sim_priv.entries[0].field = 0;
2119 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2120 ccb->ccb_h.flags = 0;
2121
2122 CAMLOCK_2_ISPLOCK(isp);
2123 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2124 /*
2125 * Note that the command itself may not be done-
2126 * it may not even have had the first CTIO sent.
2127 */
2128 tptr->atio_count++;
2129 isp_prt(isp, ISP_LOGTDEBUG0,
2130 "Put FREE ATIO2, lun %d, count now %d",
2131 ccb->ccb_h.target_lun, tptr->atio_count);
2132 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2133 sim_links.sle);
2134 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2135 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2136 sim_links.sle);
2137 } else {
2138 ;
2139 }
2140 rls_lun_statep(isp, tptr);
2141 ccb->ccb_h.status = CAM_REQ_INPROG;
2142 ISPLOCK_2_CAMLOCK(isp);
2143 break;
2144 }
2145 case XPT_CONT_TARGET_IO:
2146 {
2147 CAMLOCK_2_ISPLOCK(isp);
2148 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2149 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2150 isp_prt(isp, ISP_LOGWARN,
2151 "XPT_CONT_TARGET_IO: status 0x%x",
2152 ccb->ccb_h.status);
2153 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2154 ISPLOCK_2_CAMLOCK(isp);
2155 xpt_done(ccb);
2156 } else {
2157 ISPLOCK_2_CAMLOCK(isp);
2158 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2159 }
2160 break;
2161 }
2162#endif
2163 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2164
2165 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2166 tgt = ccb->ccb_h.target_id;
2167 tgt |= (bus << 16);
2168
2169 CAMLOCK_2_ISPLOCK(isp);
2170 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2171 ISPLOCK_2_CAMLOCK(isp);
2172 if (error) {
2173 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2174 } else {
2175 ccb->ccb_h.status = CAM_REQ_CMP;
2176 }
2177 xpt_done(ccb);
2178 break;
2179 case XPT_ABORT: /* Abort the specified CCB */
2180 {
2181 union ccb *accb = ccb->cab.abort_ccb;
2182 CAMLOCK_2_ISPLOCK(isp);
2183 switch (accb->ccb_h.func_code) {
2184#ifdef ISP_TARGET_MODE
2185 case XPT_ACCEPT_TARGET_IO:
2186 case XPT_IMMED_NOTIFY:
2187 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2188 break;
2189 case XPT_CONT_TARGET_IO:
2190 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2191 ccb->ccb_h.status = CAM_UA_ABORT;
2192 break;
2193#endif
2194 case XPT_SCSI_IO:
2195 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2196 if (error) {
2197 ccb->ccb_h.status = CAM_UA_ABORT;
2198 } else {
2199 ccb->ccb_h.status = CAM_REQ_CMP;
2200 }
2201 break;
2202 default:
2203 ccb->ccb_h.status = CAM_REQ_INVALID;
2204 break;
2205 }
2206 ISPLOCK_2_CAMLOCK(isp);
2207 xpt_done(ccb);
2208 break;
2209 }
2210#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2211 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2212 cts = &ccb->cts;
2213 if (!IS_CURRENT_SETTINGS(cts)) {
2214 ccb->ccb_h.status = CAM_REQ_INVALID;
2215 xpt_done(ccb);
2216 break;
2217 }
2218 tgt = cts->ccb_h.target_id;
2219 CAMLOCK_2_ISPLOCK(isp);
2220 if (IS_SCSI(isp)) {
2221 sdparam *sdp = isp->isp_param;
2222 u_int16_t *dptr;
2223
2224 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2225
2226 sdp += bus;
2227 /*
2228 * We always update (internally) from goal_flags
2229 * so any request to change settings just gets
2230 * vectored to that location.
2231 */
2232 dptr = &sdp->isp_devparam[tgt].goal_flags;
2233
2234 /*
2235 * Note that these operations affect the
2236 * the goal flags (goal_flags)- not
2237 * the current state flags. Then we mark
2238 * things so that the next operation to
2239 * this HBA will cause the update to occur.
2240 */
2241 if (cts->valid & CCB_TRANS_DISC_VALID) {
2242 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2243 *dptr |= DPARM_DISC;
2244 } else {
2245 *dptr &= ~DPARM_DISC;
2246 }
2247 }
2248 if (cts->valid & CCB_TRANS_TQ_VALID) {
2249 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2250 *dptr |= DPARM_TQING;
2251 } else {
2252 *dptr &= ~DPARM_TQING;
2253 }
2254 }
2255 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2256 switch (cts->bus_width) {
2257 case MSG_EXT_WDTR_BUS_16_BIT:
2258 *dptr |= DPARM_WIDE;
2259 break;
2260 default:
2261 *dptr &= ~DPARM_WIDE;
2262 }
2263 }
2264 /*
2265 * Any SYNC RATE of nonzero and SYNC_OFFSET
2266 * of nonzero will cause us to go to the
2267 * selected (from NVRAM) maximum value for
2268 * this device. At a later point, we'll
2269 * allow finer control.
2270 */
2271 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2272 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2273 (cts->sync_offset > 0)) {
2274 *dptr |= DPARM_SYNC;
2275 } else {
2276 *dptr &= ~DPARM_SYNC;
2277 }
2278 *dptr |= DPARM_SAFE_DFLT;
2279 isp_prt(isp, ISP_LOGDEBUG0,
2280 "SET bus %d targ %d to flags %x off %x per %x",
2281 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2282 sdp->isp_devparam[tgt].goal_offset,
2283 sdp->isp_devparam[tgt].goal_period);
2284 sdp->isp_devparam[tgt].dev_update = 1;
2285 isp->isp_update |= (1 << bus);
2286 }
2287 ISPLOCK_2_CAMLOCK(isp);
2288 ccb->ccb_h.status = CAM_REQ_CMP;
2289 xpt_done(ccb);
2290 break;
2291 case XPT_GET_TRAN_SETTINGS:
2292 cts = &ccb->cts;
2293 tgt = cts->ccb_h.target_id;
2294 CAMLOCK_2_ISPLOCK(isp);
2295 if (IS_FC(isp)) {
2296 /*
2297 * a lot of normal SCSI things don't make sense.
2298 */
2299 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2300 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2301 /*
2302 * How do you measure the width of a high
2303 * speed serial bus? Well, in bytes.
2304 *
2305 * Offset and period make no sense, though, so we set
2306 * (above) a 'base' transfer speed to be gigabit.
2307 */
2308 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2309 } else {
2310 sdparam *sdp = isp->isp_param;
2311 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2312 u_int16_t dval, pval, oval;
2313
2314 sdp += bus;
2315
2316 if (IS_CURRENT_SETTINGS(cts)) {
2317 sdp->isp_devparam[tgt].dev_refresh = 1;
2318 isp->isp_update |= (1 << bus);
2319 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2320 NULL);
2321 dval = sdp->isp_devparam[tgt].actv_flags;
2322 oval = sdp->isp_devparam[tgt].actv_offset;
2323 pval = sdp->isp_devparam[tgt].actv_period;
2324 } else {
2325 dval = sdp->isp_devparam[tgt].nvrm_flags;
2326 oval = sdp->isp_devparam[tgt].nvrm_offset;
2327 pval = sdp->isp_devparam[tgt].nvrm_period;
2328 }
2329
2330 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2331
2332 if (dval & DPARM_DISC) {
2333 cts->flags |= CCB_TRANS_DISC_ENB;
2334 }
2335 if (dval & DPARM_TQING) {
2336 cts->flags |= CCB_TRANS_TAG_ENB;
2337 }
2338 if (dval & DPARM_WIDE) {
2339 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2340 } else {
2341 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2342 }
2343 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2344 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2345
2346 if ((dval & DPARM_SYNC) && oval != 0) {
2347 cts->sync_period = pval;
2348 cts->sync_offset = oval;
2349 cts->valid |=
2350 CCB_TRANS_SYNC_RATE_VALID |
2351 CCB_TRANS_SYNC_OFFSET_VALID;
2352 }
2353 isp_prt(isp, ISP_LOGDEBUG0,
2354 "GET %s bus %d targ %d to flags %x off %x per %x",
2355 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2356 bus, tgt, dval, oval, pval);
2357 }
2358 ISPLOCK_2_CAMLOCK(isp);
2359 ccb->ccb_h.status = CAM_REQ_CMP;
2360 xpt_done(ccb);
2361 break;
2362
2363 case XPT_CALC_GEOMETRY:
2364 {
2365 struct ccb_calc_geometry *ccg;
2366 u_int32_t secs_per_cylinder;
2367 u_int32_t size_mb;
2368
2369 ccg = &ccb->ccg;
2370 if (ccg->block_size == 0) {
2371 isp_prt(isp, ISP_LOGERR,
2372 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2373 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2374 ccb->ccb_h.status = CAM_REQ_INVALID;
2375 xpt_done(ccb);
2376 break;
2377 }
2378 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2379 if (size_mb > 1024) {
2380 ccg->heads = 255;
2381 ccg->secs_per_track = 63;
2382 } else {
2383 ccg->heads = 64;
2384 ccg->secs_per_track = 32;
2385 }
2386 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2387 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2388 ccb->ccb_h.status = CAM_REQ_CMP;
2389 xpt_done(ccb);
2390 break;
2391 }
2392 case XPT_RESET_BUS: /* Reset the specified bus */
2393 bus = cam_sim_bus(sim);
2394 CAMLOCK_2_ISPLOCK(isp);
2395 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2396 ISPLOCK_2_CAMLOCK(isp);
2397 if (error)
2398 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2399 else {
2400 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2401 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2402 else if (isp->isp_path != NULL)
2403 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2404 ccb->ccb_h.status = CAM_REQ_CMP;
2405 }
2406 xpt_done(ccb);
2407 break;
2408
2409 case XPT_TERM_IO: /* Terminate the I/O process */
2410 ccb->ccb_h.status = CAM_REQ_INVALID;
2411 xpt_done(ccb);
2412 break;
2413
2414 case XPT_PATH_INQ: /* Path routing inquiry */
2415 {
2416 struct ccb_pathinq *cpi = &ccb->cpi;
2417
2418 cpi->version_num = 1;
2419#ifdef ISP_TARGET_MODE
2420 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2421#else
2422 cpi->target_sprt = 0;
2423#endif
2424 cpi->hba_eng_cnt = 0;
2425 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2426 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2427 cpi->bus_id = cam_sim_bus(sim);
2428 if (IS_FC(isp)) {
2429 cpi->hba_misc = PIM_NOBUSRESET;
2430 /*
2431 * Because our loop ID can shift from time to time,
2432 * make our initiator ID out of range of our bus.
2433 */
2434 cpi->initiator_id = cpi->max_target + 1;
2435
2436 /*
2437 * Set base transfer capabilities for Fibre Channel.
2438 * Technically not correct because we don't know
2439 * what media we're running on top of- but we'll
2440 * look good if we always say 100MB/s.
2441 */
2442 if (FCPARAM(isp)->isp_gbspeed == 2)
2443 cpi->base_transfer_speed = 200000;
2444 else
2445 cpi->base_transfer_speed = 100000;
2446 cpi->hba_inquiry = PI_TAG_ABLE;
2447 } else {
2448 sdparam *sdp = isp->isp_param;
2449 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2450 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2451 cpi->hba_misc = 0;
2452 cpi->initiator_id = sdp->isp_initiator_id;
2453 cpi->base_transfer_speed = 3300;
2454 }
2455 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2456 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2457 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2458 cpi->unit_number = cam_sim_unit(sim);
2459 cpi->ccb_h.status = CAM_REQ_CMP;
2460 xpt_done(ccb);
2461 break;
2462 }
2463 default:
2464 ccb->ccb_h.status = CAM_REQ_INVALID;
2465 xpt_done(ccb);
2466 break;
2467 }
2468}
2469
2470#define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2471void
2472isp_done(struct ccb_scsiio *sccb)
2473{
2474 struct ispsoftc *isp = XS_ISP(sccb);
2475
2476 if (XS_NOERR(sccb))
2477 XS_SETERR(sccb, CAM_REQ_CMP);
2478
2479 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2480 (sccb->scsi_status != SCSI_STATUS_OK)) {
2481 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2482 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2483 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2484 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2485 } else {
2486 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2487 }
2488 }
2489
2490 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2491 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2492 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2493 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2494 xpt_freeze_devq(sccb->ccb_h.path, 1);
2495 isp_prt(isp, ISP_LOGDEBUG0,
2496 "freeze devq %d.%d cam sts %x scsi sts %x",
2497 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2498 sccb->ccb_h.status, sccb->scsi_status);
2499 }
2500 }
2501
2502 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2503 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2504 xpt_print_path(sccb->ccb_h.path);
2505 isp_prt(isp, ISP_LOGINFO,
2506 "cam completion status 0x%x", sccb->ccb_h.status);
2507 }
2508
2509 XS_CMD_S_DONE(sccb);
2510 if (XS_CMD_WDOG_P(sccb) == 0) {
2511 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2512 if (XS_CMD_GRACE_P(sccb)) {
2513 isp_prt(isp, ISP_LOGDEBUG2,
2514 "finished command on borrowed time");
2515 }
2516 XS_CMD_S_CLEAR(sccb);
2517 ISPLOCK_2_CAMLOCK(isp);
2518 xpt_done((union ccb *) sccb);
2519 CAMLOCK_2_ISPLOCK(isp);
2520 }
2521}
2522
2523int
2524isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2525{
2526 int bus, rv = 0;
2527 switch (cmd) {
2528 case ISPASYNC_NEW_TGT_PARAMS:
2529 {
2530 int flags, tgt;
2531 sdparam *sdp = isp->isp_param;
2532 struct ccb_trans_settings cts;
2533 struct cam_path *tmppath;
2534
2535 bzero(&cts, sizeof (struct ccb_trans_settings));
2536
2537 tgt = *((int *)arg);
2538 bus = (tgt >> 16) & 0xffff;
2539 tgt &= 0xffff;
2540 sdp += bus;
2541 ISPLOCK_2_CAMLOCK(isp);
2542 if (xpt_create_path(&tmppath, NULL,
2543 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2544 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2545 CAMLOCK_2_ISPLOCK(isp);
2546 isp_prt(isp, ISP_LOGWARN,
2547 "isp_async cannot make temp path for %d.%d",
2548 tgt, bus);
2549 rv = -1;
2550 break;
2551 }
2552 CAMLOCK_2_ISPLOCK(isp);
2553 flags = sdp->isp_devparam[tgt].actv_flags;
2554 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2555 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2556 if (flags & DPARM_DISC) {
2557 cts.flags |= CCB_TRANS_DISC_ENB;
2558 }
2559 if (flags & DPARM_TQING) {
2560 cts.flags |= CCB_TRANS_TAG_ENB;
2561 }
2562 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2563 cts.bus_width = (flags & DPARM_WIDE)?
2564 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2565 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2566 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2567 if (flags & DPARM_SYNC) {
2568 cts.valid |=
2569 CCB_TRANS_SYNC_RATE_VALID |
2570 CCB_TRANS_SYNC_OFFSET_VALID;
2571 }
2572 isp_prt(isp, ISP_LOGDEBUG2,
2573 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2574 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2575 sdp->isp_devparam[tgt].actv_offset, flags);
2576 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2577 ISPLOCK_2_CAMLOCK(isp);
2578 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2579 xpt_free_path(tmppath);
2580 CAMLOCK_2_ISPLOCK(isp);
2581 break;
2582 }
2583 case ISPASYNC_BUS_RESET:
2584 bus = *((int *)arg);
2585 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2586 bus);
2587 if (bus > 0 && isp->isp_path2) {
2588 ISPLOCK_2_CAMLOCK(isp);
2589 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2590 CAMLOCK_2_ISPLOCK(isp);
2591 } else if (isp->isp_path) {
2592 ISPLOCK_2_CAMLOCK(isp);
2593 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2594 CAMLOCK_2_ISPLOCK(isp);
2595 }
2596 break;
2597 case ISPASYNC_LIP:
2598 if (isp->isp_path) {
2599 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2600 }
2601 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2602 break;
2603 case ISPASYNC_LOOP_RESET:
2604 if (isp->isp_path) {
2605 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2606 }
2607 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2608 break;
2609 case ISPASYNC_LOOP_DOWN:
2610 if (isp->isp_path) {
2611 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2612 }
2613 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2614 break;
2615 case ISPASYNC_LOOP_UP:
2616 /*
2617 * Now we just note that Loop has come up. We don't
2618 * actually do anything because we're waiting for a
2619 * Change Notify before activating the FC cleanup
2620 * thread to look at the state of the loop again.
2621 */
2622 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2623 break;
2624 case ISPASYNC_PROMENADE:
2625 {
2626 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2627 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2628 static const char *roles[4] = {
2629 "(none)", "Target", "Initiator", "Target/Initiator"
2630 };
2631 fcparam *fcp = isp->isp_param;
2632 int tgt = *((int *) arg);
2633 struct lportdb *lp = &fcp->portdb[tgt];
2634
2635 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2636 roles[lp->roles & 0x3],
2637 (lp->valid)? "Arrived" : "Departed",
2638 (u_int32_t) (lp->port_wwn >> 32),
2639 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2640 (u_int32_t) (lp->node_wwn >> 32),
2641 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2642
2643 break;
2644 }
2645 case ISPASYNC_CHANGE_NOTIFY:
2646 if (arg == ISPASYNC_CHANGE_PDB) {
2647 isp_prt(isp, ISP_LOGINFO,
2648 "Port Database Changed");
2649 } else if (arg == ISPASYNC_CHANGE_SNS) {
2650 isp_prt(isp, ISP_LOGINFO,
2651 "Name Server Database Changed");
2652 }
41c20dac 2653 wakeup(&isp->isp_osinfo.kthread);
984263bc
MD
2654 break;
2655 case ISPASYNC_FABRIC_DEV:
2656 {
2657 int target, base, lim;
2658 fcparam *fcp = isp->isp_param;
2659 struct lportdb *lp = NULL;
2660 struct lportdb *clp = (struct lportdb *) arg;
2661 char *pt;
2662
2663 switch (clp->port_type) {
2664 case 1:
2665 pt = " N_Port";
2666 break;
2667 case 2:
2668 pt = " NL_Port";
2669 break;
2670 case 3:
2671 pt = "F/NL_Port";
2672 break;
2673 case 0x7f:
2674 pt = " Nx_Port";
2675 break;
2676 case 0x81:
2677 pt = " F_port";
2678 break;
2679 case 0x82:
2680 pt = " FL_Port";
2681 break;
2682 case 0x84:
2683 pt = " E_port";
2684 break;
2685 default:
2686 pt = " ";
2687 break;
2688 }
2689
2690 isp_prt(isp, ISP_LOGINFO,
2691 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2692
2693 /*
2694 * If we don't have an initiator role we bail.
2695 *
2696 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2697 */
2698
2699 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2700 break;
2701 }
2702
2703 /*
2704 * Is this entry for us? If so, we bail.
2705 */
2706
2707 if (fcp->isp_portid == clp->portid) {
2708 break;
2709 }
2710
2711 /*
2712 * Else, the default policy is to find room for it in
2713 * our local port database. Later, when we execute
2714 * the call to isp_pdb_sync either this newly arrived
2715 * or already logged in device will be (re)announced.
2716 */
2717
2718 if (fcp->isp_topo == TOPO_FL_PORT)
2719 base = FC_SNS_ID+1;
2720 else
2721 base = 0;
2722
2723 if (fcp->isp_topo == TOPO_N_PORT)
2724 lim = 1;
2725 else
2726 lim = MAX_FC_TARG;
2727
2728 /*
2729 * Is it already in our list?
2730 */
2731 for (target = base; target < lim; target++) {
2732 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2733 continue;
2734 }
2735 lp = &fcp->portdb[target];
2736 if (lp->port_wwn == clp->port_wwn &&
2737 lp->node_wwn == clp->node_wwn) {
2738 lp->fabric_dev = 1;
2739 break;
2740 }
2741 }
2742 if (target < lim) {
2743 break;
2744 }
2745 for (target = base; target < lim; target++) {
2746 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2747 continue;
2748 }
2749 lp = &fcp->portdb[target];
2750 if (lp->port_wwn == 0) {
2751 break;
2752 }
2753 }
2754 if (target == lim) {
2755 isp_prt(isp, ISP_LOGWARN,
2756 "out of space for fabric devices");
2757 break;
2758 }
2759 lp->port_type = clp->port_type;
2760 lp->fc4_type = clp->fc4_type;
2761 lp->node_wwn = clp->node_wwn;
2762 lp->port_wwn = clp->port_wwn;
2763 lp->portid = clp->portid;
2764 lp->fabric_dev = 1;
2765 break;
2766 }
2767#ifdef ISP_TARGET_MODE
2768 case ISPASYNC_TARGET_MESSAGE:
2769 {
2770 tmd_msg_t *mp = arg;
2771 isp_prt(isp, ISP_LOGALL,
2772 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2773 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2774 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2775 mp->nt_msg[0]);
2776 break;
2777 }
2778 case ISPASYNC_TARGET_EVENT:
2779 {
2780 tmd_event_t *ep = arg;
2781 isp_prt(isp, ISP_LOGALL,
2782 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2783 break;
2784 }
2785 case ISPASYNC_TARGET_ACTION:
2786 switch (((isphdr_t *)arg)->rqs_entry_type) {
2787 default:
2788 isp_prt(isp, ISP_LOGWARN,
2789 "event 0x%x for unhandled target action",
2790 ((isphdr_t *)arg)->rqs_entry_type);
2791 break;
2792 case RQSTYPE_NOTIFY:
2793 if (IS_SCSI(isp)) {
2794 rv = isp_handle_platform_notify_scsi(isp,
2795 (in_entry_t *) arg);
2796 } else {
2797 rv = isp_handle_platform_notify_fc(isp,
2798 (in_fcentry_t *) arg);
2799 }
2800 break;
2801 case RQSTYPE_ATIO:
2802 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2803 break;
2804 case RQSTYPE_ATIO2:
2805 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2806 break;
2807 case RQSTYPE_CTIO2:
2808 case RQSTYPE_CTIO:
2809 rv = isp_handle_platform_ctio(isp, arg);
2810 break;
2811 case RQSTYPE_ENABLE_LUN:
2812 case RQSTYPE_MODIFY_LUN:
2813 if (IS_DUALBUS(isp)) {
2814 bus =
2815 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2816 } else {
2817 bus = 0;
2818 }
2819 isp_cv_signal_rqe(isp, bus,
2820 ((lun_entry_t *)arg)->le_status);
2821 break;
2822 }
2823 break;
2824#endif
2825 case ISPASYNC_FW_CRASH:
2826 {
2827 u_int16_t mbox1, mbox6;
2828 mbox1 = ISP_READ(isp, OUTMAILBOX1);
2829 if (IS_DUALBUS(isp)) {
2830 mbox6 = ISP_READ(isp, OUTMAILBOX6);
2831 } else {
2832 mbox6 = 0;
2833 }
2834 isp_prt(isp, ISP_LOGERR,
2835 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
2836 mbox6, mbox1);
2837#ifdef ISP_FW_CRASH_DUMP
2838 /*
2839 * XXX: really need a thread to do this right.
2840 */
2841 if (IS_FC(isp)) {
2842 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
2843 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
2844 isp_freeze_loopdown(isp, "f/w crash");
2845 isp_fw_dump(isp);
2846 }
2847 isp_reinit(isp);
2848 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
2849#endif
2850 break;
2851 }
2852 case ISPASYNC_UNHANDLED_RESPONSE:
2853 break;
2854 default:
2855 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2856 break;
2857 }
2858 return (rv);
2859}
2860
2861
2862/*
2863 * Locks are held before coming here.
2864 */
2865void
2866isp_uninit(struct ispsoftc *isp)
2867{
2868 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2869 DISABLE_INTS(isp);
2870}
2871
2872void
2873isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2874{
e2565a42 2875 __va_list ap;
984263bc
MD
2876 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2877 return;
2878 }
2879 printf("%s: ", device_get_nameunit(isp->isp_dev));
e2565a42 2880 __va_start(ap, fmt);
984263bc 2881 vprintf(fmt, ap);
e2565a42 2882 __va_end(ap);
984263bc
MD
2883 printf("\n");
2884}