2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
30 * $FreeBSD: head/sys/dev/mpr/mpr_sas.c 331422 2018-03-23 13:52:26Z ken $
33 /* Communications core for Avago Technologies (LSI) MPT3 */
35 /* TODO Move headers to mprvar */
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
44 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48 #include <sys/queue.h>
49 #include <sys/kthread.h>
50 #include <sys/taskqueue.h>
52 #include <sys/eventhandler.h>
56 #include <machine/stdarg.h>
58 #include <bus/cam/cam.h>
59 #include <bus/cam/cam_ccb.h>
60 #include <bus/cam/cam_debug.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_sim.h>
63 #include <bus/cam/cam_xpt_periph.h>
64 #include <bus/cam/cam_periph.h>
65 #include <bus/cam/scsi/scsi_all.h>
66 #include <bus/cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <bus/cam/scsi/smp_all.h>
71 #if 0 /* XXX swildner NVMe support */
72 #include <dev/nvme/nvme.h>
75 #include <dev/raid/mpr/mpi/mpi2_type.h>
76 #include <dev/raid/mpr/mpi/mpi2.h>
77 #include <dev/raid/mpr/mpi/mpi2_ioc.h>
78 #include <dev/raid/mpr/mpi/mpi2_sas.h>
79 #include <dev/raid/mpr/mpi/mpi2_pci.h>
80 #include <dev/raid/mpr/mpi/mpi2_cnfg.h>
81 #include <dev/raid/mpr/mpi/mpi2_init.h>
82 #include <dev/raid/mpr/mpi/mpi2_tool.h>
83 #include <dev/raid/mpr/mpr_ioctl.h>
84 #include <dev/raid/mpr/mprvar.h>
85 #include <dev/raid/mpr/mpr_table.h>
86 #include <dev/raid/mpr/mpr_sas.h>
88 #define MPRSAS_DISCOVERY_TIMEOUT 20
89 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
118 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
120 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
121 static void mprsas_poll(struct cam_sim *sim);
122 static void mprsas_scsiio_timeout(void *data);
123 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
124 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
125 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
126 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
127 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
128 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
129 struct mpr_command *cm);
130 void mprsas_rescan_callback(struct cam_periph *, union ccb *);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132 struct cam_path *path, void *arg);
133 #if (__FreeBSD_version < 901503) || \
134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136 struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138 union ccb *done_ccb);
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142 struct mpr_command *cm);
144 #if __FreeBSD_version >= 900026
145 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
146 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
149 #endif //FreeBSD_version >= 900026
151 struct mprsas_target *
152 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155 struct mprsas_target *target;
158 for (i = start; i < sassc->maxtargets; i++) {
159 target = &sassc->targets[i];
160 if (target->handle == handle)
167 /* we need to freeze the simq during attach and diag reset, to avoid failing
168 * commands before device handles have been found by discovery. Since
169 * discovery involves reading config pages and possibly sending commands,
170 * discovery actions may continue even after we receive the end of discovery
171 * event, so refcount discovery actions instead of assuming we can unfreeze
172 * the simq when we get the event.
175 mprsas_startup_increment(struct mprsas_softc *sassc)
177 MPR_FUNCTRACE(sassc->sc);
179 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
180 if (sassc->startup_refcount++ == 0) {
181 /* just starting, freeze the simq */
182 mpr_dprint(sassc->sc, MPR_INIT,
183 "%s freezing simq\n", __func__);
184 #if (__FreeBSD_version >= 1000039) || \
185 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188 xpt_freeze_simq(sassc->sim, 1);
190 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
191 sassc->startup_refcount);
196 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
199 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
200 xpt_release_simq(sassc->sim, 1);
201 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
206 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 MPR_FUNCTRACE(sassc->sc);
210 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
211 if (--sassc->startup_refcount == 0) {
212 /* finished all discovery-related actions, release
213 * the simq and rescan for the latest topology.
215 mpr_dprint(sassc->sc, MPR_INIT,
216 "%s releasing simq\n", __func__);
217 sassc->flags &= ~MPRSAS_IN_STARTUP;
218 xpt_release_simq(sassc->sim, 1);
219 #if (__FreeBSD_version >= 1000039) || \
220 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223 mprsas_rescan_target(sassc->sc, NULL);
226 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
227 sassc->startup_refcount);
231 /* The firmware requires us to stop sending commands when we're doing task
232 * management, so refcount the TMs and keep the simq frozen when any are in
236 mprsas_alloc_tm(struct mpr_softc *sc)
238 struct mpr_command *tm;
241 tm = mpr_alloc_high_priority_command(sc);
246 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 int target_id = 0xFFFFFFFF;
255 * For TM's the devq is frozen for the device. Unfreeze it here and
256 * free the resources used for freezing the devq. Must clear the
257 * INRESET flag as well or scsi I/O will not work.
259 if (tm->cm_targ != NULL) {
260 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
261 target_id = tm->cm_targ->tid;
264 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
267 xpt_free_path(tm->cm_ccb->ccb_h.path);
268 xpt_free_ccb(tm->cm_ccb);
271 mpr_free_high_priority_command(sc, tm);
275 mprsas_rescan_callback(struct cam_periph *periph, union ccb *ccb)
277 if (ccb->ccb_h.status != CAM_REQ_CMP)
278 kprintf("cam_scan_callback: failure status = %x\n",
281 xpt_free_path(ccb->ccb_h.path);
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
288 struct mprsas_softc *sassc = sc->sassc;
290 target_id_t targetid;
294 pathid = cam_sim_path(sassc->sim);
296 targetid = CAM_TARGET_WILDCARD;
298 targetid = targ - sassc->targets;
301 * Allocate a CCB and schedule a rescan.
303 ccb = xpt_alloc_ccb();
305 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
309 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid,
310 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
316 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5); /* 5 = low priority */
318 /* XXX Hardwired to scan the bus for now */
319 ccb->ccb_h.func_code = XPT_SCAN_BUS;
320 ccb->ccb_h.cbfcnp = mprsas_rescan_callback;
321 ccb->crcn.flags = CAM_FLAG_NONE;
323 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
328 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
338 /* No need to be in here if debugging isn't enabled */
339 if ((cm->cm_sc->mpr_debug & level) == 0)
342 sbuf_new(&sb, str, sizeof(str), 0);
346 if (cm->cm_ccb != NULL) {
347 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
349 sbuf_cat(&sb, path_str);
350 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
351 scsi_command_string(&cm->cm_ccb->csio, &sb);
352 sbuf_printf(&sb, "length %d ",
353 cm->cm_ccb->csio.dxfer_len);
356 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
357 cam_sim_name(cm->cm_sc->sassc->sim),
358 cam_sim_unit(cm->cm_sc->sassc->sim),
359 cam_sim_bus(cm->cm_sc->sassc->sim),
360 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
364 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
365 sbuf_vprintf(&sb, fmt, ap);
367 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
373 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
375 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
376 struct mprsas_target *targ;
381 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
382 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
386 /* XXX retry the remove after the diag reset completes? */
387 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
388 "0x%04x\n", __func__, handle);
389 mprsas_free_tm(sc, tm);
393 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
394 MPI2_IOCSTATUS_SUCCESS) {
395 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
396 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
399 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
400 le32toh(reply->TerminationCount));
401 mpr_free_reply(sc, tm->cm_reply_data);
402 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
404 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
408 * Don't clear target if remove fails because things will get confusing.
409 * Leave the devname and sasaddr intact so that we know to avoid reusing
410 * this target id if possible, and so we can assign the same target id
411 * to this device if it comes back in the future.
413 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
414 MPI2_IOCSTATUS_SUCCESS) {
417 targ->encl_handle = 0x0;
418 targ->encl_level_valid = 0x0;
419 targ->encl_level = 0x0;
420 targ->connector_name[0] = ' ';
421 targ->connector_name[1] = ' ';
422 targ->connector_name[2] = ' ';
423 targ->connector_name[3] = ' ';
424 targ->encl_slot = 0x0;
425 targ->exp_dev_handle = 0x0;
427 targ->linkrate = 0x0;
430 targ->scsi_req_desc_type = 0;
433 mprsas_free_tm(sc, tm);
438 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
439 * Otherwise Volume Delete is same as Bare Drive Removal.
442 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
444 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
445 struct mpr_softc *sc;
446 struct mpr_command *cm;
447 struct mprsas_target *targ = NULL;
449 MPR_FUNCTRACE(sassc->sc);
452 targ = mprsas_find_target_by_handle(sassc, 0, handle);
454 /* FIXME: what is the action? */
455 /* We don't know about this device? */
456 mpr_dprint(sc, MPR_ERROR,
457 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
461 targ->flags |= MPRSAS_TARGET_INREMOVAL;
463 cm = mprsas_alloc_tm(sc);
465 mpr_dprint(sc, MPR_ERROR,
466 "%s: command alloc failure\n", __func__);
470 mprsas_rescan_target(sc, targ);
472 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
473 req->DevHandle = targ->handle;
474 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
475 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
477 /* SAS Hard Link Reset / SATA Link Reset */
478 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
482 cm->cm_desc.HighPriority.RequestFlags =
483 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
484 cm->cm_complete = mprsas_remove_volume;
485 cm->cm_complete_data = (void *)(uintptr_t)handle;
487 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
488 __func__, targ->tid);
489 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491 mpr_map_command(sc, cm);
495 * The firmware performs debounce on the link to avoid transient link errors
496 * and false removals. When it does decide that link has been lost and a
497 * device needs to go away, it expects that the host will perform a target reset
498 * and then an op remove. The reset has the side-effect of aborting any
499 * outstanding requests for the device, which is required for the op-remove to
500 * succeed. It's not clear if the host should check for the device coming back
501 * alive after the reset.
504 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
507 struct mpr_softc *sc;
508 struct mpr_command *cm;
509 struct mprsas_target *targ = NULL;
511 MPR_FUNCTRACE(sassc->sc);
515 targ = mprsas_find_target_by_handle(sassc, 0, handle);
517 /* FIXME: what is the action? */
518 /* We don't know about this device? */
519 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
524 targ->flags |= MPRSAS_TARGET_INREMOVAL;
526 cm = mprsas_alloc_tm(sc);
528 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
533 mprsas_rescan_target(sc, targ);
535 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
536 memset(req, 0, sizeof(*req));
537 req->DevHandle = htole16(targ->handle);
538 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
539 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
541 /* SAS Hard Link Reset / SATA Link Reset */
542 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
546 cm->cm_desc.HighPriority.RequestFlags =
547 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
548 cm->cm_complete = mprsas_remove_device;
549 cm->cm_complete_data = (void *)(uintptr_t)handle;
551 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
552 __func__, targ->tid);
553 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
555 mpr_map_command(sc, cm);
559 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
561 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
562 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
563 struct mprsas_target *targ;
564 struct mpr_command *next_cm;
569 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
570 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
574 * Currently there should be no way we can hit this case. It only
575 * happens when we have a failure to allocate chain frames, and
576 * task management commands don't have S/G lists.
578 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
579 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
580 "handle %#04x! This should not happen!\n", __func__,
581 tm->cm_flags, handle);
585 /* XXX retry the remove after the diag reset completes? */
586 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
587 "0x%04x\n", __func__, handle);
588 mprsas_free_tm(sc, tm);
592 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
593 MPI2_IOCSTATUS_SUCCESS) {
594 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
595 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
598 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
599 le32toh(reply->TerminationCount));
600 mpr_free_reply(sc, tm->cm_reply_data);
601 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
603 /* Reuse the existing command */
604 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
605 memset(req, 0, sizeof(*req));
606 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
607 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
608 req->DevHandle = htole16(handle);
610 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
611 tm->cm_complete = mprsas_remove_complete;
612 tm->cm_complete_data = (void *)(uintptr_t)handle;
614 mpr_map_command(sc, tm);
616 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
618 if (targ->encl_level_valid) {
619 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
620 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
621 targ->connector_name);
623 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
626 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
627 ccb = tm->cm_complete_data;
628 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
629 mprsas_scsiio_complete(sc, tm);
634 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
636 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
638 struct mprsas_target *targ;
639 struct mprsas_lun *lun;
643 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
644 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
647 * Currently there should be no way we can hit this case. It only
648 * happens when we have a failure to allocate chain frames, and
649 * task management commands don't have S/G lists.
651 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
652 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
653 "handle %#04x! This should not happen!\n", __func__,
654 tm->cm_flags, handle);
655 mprsas_free_tm(sc, tm);
660 /* most likely a chip reset */
661 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
662 "0x%04x\n", __func__, handle);
663 mprsas_free_tm(sc, tm);
667 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
668 __func__, handle, le16toh(reply->IOCStatus));
671 * Don't clear target if remove fails because things will get confusing.
672 * Leave the devname and sasaddr intact so that we know to avoid reusing
673 * this target id if possible, and so we can assign the same target id
674 * to this device if it comes back in the future.
676 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
677 MPI2_IOCSTATUS_SUCCESS) {
680 targ->encl_handle = 0x0;
681 targ->encl_level_valid = 0x0;
682 targ->encl_level = 0x0;
683 targ->connector_name[0] = ' ';
684 targ->connector_name[1] = ' ';
685 targ->connector_name[2] = ' ';
686 targ->connector_name[3] = ' ';
687 targ->encl_slot = 0x0;
688 targ->exp_dev_handle = 0x0;
690 targ->linkrate = 0x0;
693 targ->scsi_req_desc_type = 0;
695 while (!SLIST_EMPTY(&targ->luns)) {
696 lun = SLIST_FIRST(&targ->luns);
697 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
702 mprsas_free_tm(sc, tm);
706 mprsas_register_events(struct mpr_softc *sc)
711 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
712 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
713 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
714 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
716 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
717 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
718 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
719 setbit(events, MPI2_EVENT_IR_VOLUME);
720 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
721 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
722 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
723 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
724 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
725 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
726 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
727 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
728 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
729 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
733 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
734 &sc->sassc->mprsas_eh);
740 mpr_attach_sas(struct mpr_softc *sc)
742 struct mprsas_softc *sassc;
744 int unit, error = 0, reqs;
747 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
749 sassc = kmalloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
751 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
752 "Cannot allocate SAS subsystem memory\n");
757 * XXX MaxTargets could change during a reinit. Since we don't
758 * resize the targets[] array during such an event, cache the value
759 * of MaxTargets here so that we don't get into trouble later. This
760 * should move into the reinit logic.
762 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
763 sassc->targets = kmalloc(sizeof(struct mprsas_target) *
764 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
765 if (!sassc->targets) {
766 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
767 "Cannot allocate SAS target memory\n");
774 reqs = sc->num_reqs - sc->num_prireqs - 1;
775 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
776 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
781 unit = device_get_unit(sc->mpr_dev);
782 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
783 unit, &sc->mpr_lock, reqs, reqs, sassc->devq);
784 cam_simq_release(sassc->devq);
785 if (sassc->sim == NULL) {
786 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
791 TAILQ_INIT(&sassc->ev_queue);
793 /* Initialize taskqueue for Event Handling */
794 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
795 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
796 taskqueue_thread_enqueue, &sassc->ev_tq);
797 taskqueue_start_threads(&sassc->ev_tq, 1, TDPRI_KERN_DAEMON,
799 device_get_nameunit(sc->mpr_dev));
804 * XXX There should be a bus for every port on the adapter, but since
805 * we're just going to fake the topology for now, we'll pretend that
806 * everything is just a target on a single bus.
808 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
809 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
810 "Error %d registering SCSI bus\n", error);
816 * Assume that discovery events will start right away.
818 * Hold off boot until discovery is complete.
820 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
821 sc->sassc->startup_refcount = 0;
822 mprsas_startup_increment(sassc);
824 callout_init_mp(&sassc->discovery_callout);
827 * Register for async events so we can determine the EEDP
828 * capabilities of devices.
830 status = xpt_create_path(&sassc->path, /*periph*/NULL,
831 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
833 if (status != CAM_REQ_CMP) {
834 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
835 "Error %#x creating sim path\n", status);
840 #if (__FreeBSD_version >= 1000006) || \
841 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
842 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
844 event = AC_FOUND_DEVICE;
848 * Prior to the CAM locking improvements, we can't call
849 * xpt_register_async() with a particular path specified.
851 * If a path isn't specified, xpt_register_async() will
852 * generate a wildcard path and acquire the XPT lock while
853 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
854 * It will then drop the XPT lock once that is done.
856 * If a path is specified for xpt_register_async(), it will
857 * not acquire and drop the XPT lock around the call to
858 * xpt_action(). xpt_action() asserts that the caller
859 * holds the SIM lock, so the SIM lock has to be held when
860 * calling xpt_register_async() when the path is specified.
862 * But xpt_register_async calls xpt_for_all_devices(),
863 * which calls xptbustraverse(), which will acquire each
864 * SIM lock. When it traverses our particular bus, it will
865 * necessarily acquire the SIM lock, which will lead to a
866 * recursive lock acquisition.
868 * The CAM locking changes fix this problem by acquiring
869 * the XPT topology lock around bus traversal in
870 * xptbustraverse(), so the caller can hold the SIM lock
871 * and it does not cause a recursive lock acquisition.
873 * These __FreeBSD_version values are approximate, especially
874 * for stable/10, which is two months later than the actual
878 #if (__FreeBSD_version < 1000703) || \
879 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
881 status = xpt_register_async(event, mprsas_async, sc,
885 status = xpt_register_async(event, mprsas_async, sc,
889 if (status != CAM_REQ_CMP) {
890 mpr_dprint(sc, MPR_ERROR,
891 "Error %#x registering async handler for "
892 "AC_ADVINFO_CHANGED events\n", status);
893 xpt_free_path(sassc->path);
897 if (status != CAM_REQ_CMP) {
899 * EEDP use is the exception, not the rule.
900 * Warn the user, but do not fail to attach.
902 mpr_printf(sc, "EEDP capabilities disabled.\n");
907 mprsas_register_events(sc);
912 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
917 mpr_detach_sas(struct mpr_softc *sc)
919 struct mprsas_softc *sassc;
920 struct mprsas_lun *lun, *lun_tmp;
921 struct mprsas_target *targ;
926 if (sc->sassc == NULL)
930 mpr_deregister_events(sc, sassc->mprsas_eh);
933 * Drain and free the event handling taskqueue with the lock
934 * unheld so that any parallel processing tasks drain properly
935 * without deadlocking.
937 if (sassc->ev_tq != NULL)
938 taskqueue_free(sassc->ev_tq);
940 /* Make sure CAM doesn't wedge if we had to bail out early. */
943 while (sassc->startup_refcount != 0)
944 mprsas_startup_decrement(sassc);
946 /* Deregister our async handler */
947 if (sassc->path != NULL) {
948 xpt_register_async(0, mprsas_async, sc, sassc->path);
949 xpt_free_path(sassc->path);
953 if (sassc->flags & MPRSAS_IN_STARTUP)
954 xpt_release_simq(sassc->sim, 1);
956 if (sassc->sim != NULL) {
957 xpt_bus_deregister(cam_sim_path(sassc->sim));
958 cam_sim_free(sassc->sim);
963 for (i = 0; i < sassc->maxtargets; i++) {
964 targ = &sassc->targets[i];
965 SLIST_FOREACH_MUTABLE(lun, &targ->luns, lun_link, lun_tmp) {
969 kfree(sassc->targets, M_MPR);
977 mprsas_discovery_end(struct mprsas_softc *sassc)
979 struct mpr_softc *sc = sassc->sc;
983 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
984 callout_stop(&sassc->discovery_callout);
987 * After discovery has completed, check the mapping table for any
988 * missing devices and update their missing counts. Only do this once
989 * whenever the driver is initialized so that missing counts aren't
990 * updated unnecessarily. Note that just because discovery has
991 * completed doesn't mean that events have been processed yet. The
992 * check_devices function is a callout timer that checks if ALL devices
993 * are missing. If so, it will wait a little longer for events to
994 * complete and keep resetting itself until some device in the mapping
995 * table is not missing, meaning that event processing has started.
997 if (sc->track_mapping_events) {
998 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
999 "completed. Check for missing devices in the mapping "
1001 callout_reset(&sc->device_check_callout,
1002 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1008 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1010 struct mprsas_softc *sassc;
1012 sassc = cam_sim_softc(sim);
1014 MPR_FUNCTRACE(sassc->sc);
1015 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1016 ccb->ccb_h.func_code);
1017 KKASSERT(lockowned(&sassc->sc->mpr_lock));
1019 switch (ccb->ccb_h.func_code) {
1022 struct ccb_pathinq *cpi = &ccb->cpi;
1023 struct mpr_softc *sc = sassc->sc;
1025 cpi->version_num = 1;
1026 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1027 cpi->target_sprt = 0;
1028 #if (__FreeBSD_version >= 1000039) || \
1029 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1030 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1031 #elif defined(__DragonFly__)
1032 cpi->hba_misc = PIM_NOBUSRESET;
1034 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1036 cpi->hba_eng_cnt = 0;
1037 cpi->max_target = sassc->maxtargets - 1;
1041 * initiator_id is set here to an ID outside the set of valid
1042 * target IDs (including volumes).
1044 cpi->initiator_id = sassc->maxtargets;
1045 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1046 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1047 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1048 cpi->unit_number = cam_sim_unit(sim);
1049 cpi->bus_id = cam_sim_bus(sim);
1051 * XXXSLM-I think this needs to change based on config page or
1052 * something instead of hardcoded to 150000.
1054 cpi->base_transfer_speed = 150000;
1055 cpi->transport = XPORT_SAS;
1056 cpi->transport_version = 0;
1057 cpi->protocol = PROTO_SCSI;
1058 cpi->protocol_version = SCSI_REV_SPC;
1059 cpi->maxio = sc->maxio;
1060 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1063 case XPT_GET_TRAN_SETTINGS:
1065 struct ccb_trans_settings *cts;
1066 struct ccb_trans_settings_sas *sas;
1067 struct ccb_trans_settings_scsi *scsi;
1068 struct mprsas_target *targ;
1071 sas = &cts->xport_specific.sas;
1072 scsi = &cts->proto_specific.scsi;
1074 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1075 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1076 cts->ccb_h.target_id));
1077 targ = &sassc->targets[cts->ccb_h.target_id];
1078 if (targ->handle == 0x0) {
1079 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1083 cts->protocol_version = SCSI_REV_SPC2;
1084 cts->transport = XPORT_SAS;
1085 cts->transport_version = 0;
1087 sas->valid = CTS_SAS_VALID_SPEED;
1088 switch (targ->linkrate) {
1090 sas->bitrate = 150000;
1093 sas->bitrate = 300000;
1096 sas->bitrate = 600000;
1099 sas->bitrate = 1200000;
1105 cts->protocol = PROTO_SCSI;
1106 scsi->valid = CTS_SCSI_VALID_TQ;
1107 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1109 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1112 case XPT_CALC_GEOMETRY:
1113 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1114 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1117 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1119 mprsas_action_resetdev(sassc, ccb);
1124 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1125 "for abort or reset\n");
1126 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1129 mprsas_action_scsiio(sassc, ccb);
1131 #if __FreeBSD_version >= 900026
1133 mprsas_action_smpio(sassc, ccb);
1137 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1145 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1146 target_id_t target_id, lun_id_t lun_id)
1148 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1149 struct cam_path *path;
1151 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1152 ac_code, target_id, (uintmax_t)lun_id);
1154 if (xpt_create_path(&path, NULL,
1155 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1156 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1161 xpt_async(ac_code, path, NULL);
1162 xpt_free_path(path);
1166 mprsas_complete_all_commands(struct mpr_softc *sc)
1168 struct mpr_command *cm;
1173 KKASSERT(lockowned(&sc->mpr_lock));
1175 /* complete all commands with a NULL reply */
1176 for (i = 1; i < sc->num_reqs; i++) {
1177 cm = &sc->commands[i];
1178 if (cm->cm_state == MPR_CM_STATE_FREE)
1181 cm->cm_state = MPR_CM_STATE_BUSY;
1182 cm->cm_reply = NULL;
1185 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1186 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1188 if (cm->cm_complete != NULL) {
1189 mprsas_log_command(cm, MPR_RECOVERY,
1190 "completing cm %p state %x ccb %p for diag reset\n",
1191 cm, cm->cm_state, cm->cm_ccb);
1192 cm->cm_complete(sc, cm);
1194 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1195 mprsas_log_command(cm, MPR_RECOVERY,
1196 "waking up cm %p state %x ccb %p for diag reset\n",
1197 cm, cm->cm_state, cm->cm_ccb);
1202 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1203 /* this should never happen, but if it does, log */
1204 mprsas_log_command(cm, MPR_RECOVERY,
1205 "cm %p state %x flags 0x%x ccb %p during diag "
1206 "reset\n", cm, cm->cm_state, cm->cm_flags,
1211 sc->io_cmds_active = 0;
1215 mprsas_handle_reinit(struct mpr_softc *sc)
1219 /* Go back into startup mode and freeze the simq, so that CAM
1220 * doesn't send any commands until after we've rediscovered all
1221 * targets and found the proper device handles for them.
1223 * After the reset, portenable will trigger discovery, and after all
1224 * discovery-related activities have finished, the simq will be
1227 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1228 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1229 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1230 mprsas_startup_increment(sc->sassc);
1232 /* notify CAM of a bus reset */
1233 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1236 /* complete and cleanup after all outstanding commands */
1237 mprsas_complete_all_commands(sc);
1239 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1240 __func__, sc->sassc->startup_refcount);
1242 /* zero all the target handles, since they may change after the
1243 * reset, and we have to rediscover all the targets and use the new
1246 for (i = 0; i < sc->sassc->maxtargets; i++) {
1247 if (sc->sassc->targets[i].outstanding != 0)
1248 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1249 i, sc->sassc->targets[i].outstanding);
1250 sc->sassc->targets[i].handle = 0x0;
1251 sc->sassc->targets[i].exp_dev_handle = 0x0;
1252 sc->sassc->targets[i].outstanding = 0;
1253 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1257 mprsas_tm_timeout(void *data)
1259 struct mpr_command *tm = data;
1260 struct mpr_softc *sc = tm->cm_sc;
1262 KKASSERT(lockowned(&sc->mpr_lock));
1264 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1267 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1268 ("command not inqueue\n"));
1270 tm->cm_state = MPR_CM_STATE_BUSY;
1275 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1277 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1278 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1279 unsigned int cm_count = 0;
1280 struct mpr_command *cm;
1281 struct mprsas_target *targ;
1283 callout_stop(&tm->cm_callout);
1285 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1286 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1290 * Currently there should be no way we can hit this case. It only
1291 * happens when we have a failure to allocate chain frames, and
1292 * task management commands don't have S/G lists.
1294 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1295 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1296 "%s: cm_flags = %#x for LUN reset! "
1297 "This should not happen!\n", __func__, tm->cm_flags);
1298 mprsas_free_tm(sc, tm);
1302 if (reply == NULL) {
1303 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1305 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1306 /* this completion was due to a reset, just cleanup */
1307 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1308 "reset, ignoring NULL LUN reset reply\n");
1310 mprsas_free_tm(sc, tm);
1313 /* we should have gotten a reply. */
1314 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1315 "LUN reset attempt, resetting controller\n");
1321 mpr_dprint(sc, MPR_RECOVERY,
1322 "logical unit reset status 0x%x code 0x%x count %u\n",
1323 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1324 le32toh(reply->TerminationCount));
1327 * See if there are any outstanding commands for this LUN.
1328 * This could be made more efficient by using a per-LU data
1329 * structure of some sort.
1331 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1332 if (cm->cm_lun == tm->cm_lun)
1336 if (cm_count == 0) {
1337 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1338 "Finished recovery after LUN reset for target %u\n",
1341 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1345 * We've finished recovery for this logical unit. check and
1346 * see if some other logical unit has a timedout command
1347 * that needs to be processed.
1349 cm = TAILQ_FIRST(&targ->timedout_commands);
1351 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1352 "More commands to abort for target %u\n", targ->tid);
1353 mprsas_send_abort(sc, tm, cm);
1356 mprsas_free_tm(sc, tm);
1359 /* if we still have commands for this LUN, the reset
1360 * effectively failed, regardless of the status reported.
1361 * Escalate to a target reset.
1363 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1364 "logical unit reset complete for target %u, but still "
1365 "have %u command(s), sending target reset\n", targ->tid,
1367 mprsas_send_reset(sc, tm,
1368 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1373 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1375 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1376 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1377 struct mprsas_target *targ;
1379 callout_stop(&tm->cm_callout);
1381 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1382 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1386 * Currently there should be no way we can hit this case. It only
1387 * happens when we have a failure to allocate chain frames, and
1388 * task management commands don't have S/G lists.
1390 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1391 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1392 "reset! This should not happen!\n", __func__, tm->cm_flags);
1393 mprsas_free_tm(sc, tm);
1397 if (reply == NULL) {
1398 mpr_dprint(sc, MPR_RECOVERY,
1399 "NULL target reset reply for tm %p TaskMID %u\n",
1400 tm, le16toh(req->TaskMID));
1401 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1402 /* this completion was due to a reset, just cleanup */
1403 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1404 "reset, ignoring NULL target reset reply\n");
1406 mprsas_free_tm(sc, tm);
1409 /* we should have gotten a reply. */
1410 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1411 "target reset attempt, resetting controller\n");
1417 mpr_dprint(sc, MPR_RECOVERY,
1418 "target reset status 0x%x code 0x%x count %u\n",
1419 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1420 le32toh(reply->TerminationCount));
1422 if (targ->outstanding == 0) {
1424 * We've finished recovery for this target and all
1425 * of its logical units.
1427 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1428 "Finished reset recovery for target %u\n", targ->tid);
1430 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1434 mprsas_free_tm(sc, tm);
1437 * After a target reset, if this target still has
1438 * outstanding commands, the reset effectively failed,
1439 * regardless of the status reported. escalate.
1441 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1442 "Target reset complete for target %u, but still have %u "
1443 "command(s), resetting controller\n", targ->tid,
1449 #define MPR_RESET_TIMEOUT 30
1452 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1454 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1455 struct mprsas_target *target;
1458 target = tm->cm_targ;
1459 if (target->handle == 0) {
1460 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1461 "%d\n", __func__, target->tid);
1465 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1466 req->DevHandle = htole16(target->handle);
1467 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1468 req->TaskType = type;
1470 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1471 /* XXX Need to handle invalid LUNs */
1472 MPR_SET_LUN(req->LUN, tm->cm_lun);
1473 tm->cm_targ->logical_unit_resets++;
1474 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1475 "Sending logical unit reset to target %u lun %d\n",
1476 target->tid, tm->cm_lun);
1477 tm->cm_complete = mprsas_logical_unit_reset_complete;
1478 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1479 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1481 * Target reset method =
1482 * SAS Hard Link Reset / SATA Link Reset
1484 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1485 tm->cm_targ->target_resets++;
1486 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1487 "Sending target reset to target %u\n", target->tid);
1488 tm->cm_complete = mprsas_target_reset_complete;
1489 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1492 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1496 if (target->encl_level_valid) {
1497 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1498 "At enclosure level %d, slot %d, connector name (%4s)\n",
1499 target->encl_level, target->encl_slot,
1500 target->connector_name);
1504 tm->cm_desc.HighPriority.RequestFlags =
1505 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1506 tm->cm_complete_data = (void *)tm;
1508 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1509 mprsas_tm_timeout, tm);
1511 err = mpr_map_command(sc, tm);
1513 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1514 "error %d sending reset type %u\n", err, type);
1521 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1523 struct mpr_command *cm;
1524 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1525 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1526 struct mprsas_target *targ;
1528 callout_stop(&tm->cm_callout);
1530 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1531 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1535 * Currently there should be no way we can hit this case. It only
1536 * happens when we have a failure to allocate chain frames, and
1537 * task management commands don't have S/G lists.
1539 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1540 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1541 "cm_flags = %#x for abort %p TaskMID %u!\n",
1542 tm->cm_flags, tm, le16toh(req->TaskMID));
1543 mprsas_free_tm(sc, tm);
1547 if (reply == NULL) {
1548 mpr_dprint(sc, MPR_RECOVERY,
1549 "NULL abort reply for tm %p TaskMID %u\n",
1550 tm, le16toh(req->TaskMID));
1551 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1552 /* this completion was due to a reset, just cleanup */
1553 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1554 "reset, ignoring NULL abort reply\n");
1556 mprsas_free_tm(sc, tm);
1558 /* we should have gotten a reply. */
1559 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1560 "abort attempt, resetting controller\n");
1566 mpr_dprint(sc, MPR_RECOVERY,
1567 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1568 le16toh(req->TaskMID),
1569 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1570 le32toh(reply->TerminationCount));
1572 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1575 * if there are no more timedout commands, we're done with
1576 * error recovery for this target.
1578 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1579 "Finished abort recovery for target %u\n", targ->tid);
1581 mprsas_free_tm(sc, tm);
1582 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1583 /* abort success, but we have more timedout commands to abort */
1584 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1585 "Continuing abort recovery for target %u\n", targ->tid);
1586 mprsas_send_abort(sc, tm, cm);
1589 * we didn't get a command completion, so the abort
1590 * failed as far as we're concerned. escalate.
1592 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1593 "Abort failed for target %u, sending logical unit reset\n",
1596 mprsas_send_reset(sc, tm,
1597 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1601 #define MPR_ABORT_TIMEOUT 5
1604 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1605 struct mpr_command *cm)
1607 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1608 struct mprsas_target *targ;
1612 if (targ->handle == 0) {
1613 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1614 "%s null devhandle for target_id %d\n",
1615 __func__, cm->cm_ccb->ccb_h.target_id);
1619 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1620 "Aborting command %p\n", cm);
1622 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1623 req->DevHandle = htole16(targ->handle);
1624 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1625 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1627 /* XXX Need to handle invalid LUNs */
1628 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1630 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1633 tm->cm_desc.HighPriority.RequestFlags =
1634 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1635 tm->cm_complete = mprsas_abort_complete;
1636 tm->cm_complete_data = (void *)tm;
1637 tm->cm_targ = cm->cm_targ;
1638 tm->cm_lun = cm->cm_lun;
1640 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1641 mprsas_tm_timeout, tm);
1645 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1647 err = mpr_map_command(sc, tm);
1649 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1650 "error %d sending abort for cm %p SMID %u\n",
1651 err, cm, req->TaskMID);
1656 mprsas_scsiio_timeout(void *data)
1658 #if 0 /* XXX swildner: sbintime */
1659 sbintime_t elapsed, now;
1662 struct mpr_softc *sc;
1663 struct mpr_command *cm;
1664 struct mprsas_target *targ;
1666 cm = (struct mpr_command *)data;
1669 #if 0 /* XXX swildner: sbintime */
1674 KKASSERT(lockowned(&sc->mpr_lock));
1676 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1679 * Run the interrupt handler to make sure it's not pending. This
1680 * isn't perfect because the command could have already completed
1681 * and been re-used, though this is unlikely.
1683 mpr_intr_locked(sc);
1684 if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1685 mprsas_log_command(cm, MPR_XINFO,
1686 "SCSI command %p almost timed out\n", cm);
1690 if (cm->cm_ccb == NULL) {
1691 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1698 #if 0 /* XXX swildner: sbintime */
1699 elapsed = now - ccb->ccb_h.qos.sim_data;
1700 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1701 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1702 targ->tid, targ->handle, ccb->ccb_h.timeout,
1703 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1705 if (targ->encl_level_valid) {
1706 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1707 "At enclosure level %d, slot %d, connector name (%4s)\n",
1708 targ->encl_level, targ->encl_slot, targ->connector_name);
1711 /* XXX first, check the firmware state, to see if it's still
1712 * operational. if not, do a diag reset.
1714 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1715 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1716 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1718 if (targ->tm != NULL) {
1719 /* target already in recovery, just queue up another
1720 * timedout command to be processed later.
1722 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1723 "processing by tm %p\n", cm, targ->tm);
1725 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1727 /* start recovery by aborting the first timedout command */
1728 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1729 "Sending abort to target %u for SMID %d\n", targ->tid,
1730 cm->cm_desc.Default.SMID);
1731 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1733 mprsas_send_abort(sc, targ->tm, cm);
1736 /* XXX queue this target up for recovery once a TM becomes
1737 * available. The firmware only has a limited number of
1738 * HighPriority credits for the high priority requests used
1739 * for task management, and we ran out.
1741 * Isilon: don't worry about this for now, since we have
1742 * more credits than disks in an enclosure, and limit
1743 * ourselves to one TM per target for recovery.
1745 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1746 "timedout cm %p failed to allocate a tm\n", cm);
1750 #if 0 /* XXX swildner: NVMe support */
1752 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1754 * Return 0 - for success,
1755 * 1 - to immediately return back the command with success status to CAM
1756 * negative value - to fallback to firmware path i.e. issue scsi unmap
1757 * to FW without any translation.
1760 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1761 union ccb *ccb, struct mprsas_target *targ)
1763 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1764 struct ccb_scsiio *csio;
1765 struct unmap_parm_list *plist;
1766 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1767 struct nvme_command *c;
1769 uint16_t ndesc, list_len, data_length;
1770 struct mpr_prp_page *prp_page_info;
1771 uint64_t nvme_dsm_ranges_dma_handle;
1774 #if __FreeBSD_version >= 1100103
1775 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1777 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1778 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1779 ccb->csio.cdb_io.cdb_ptr[8]);
1781 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1782 ccb->csio.cdb_io.cdb_bytes[8]);
1786 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1790 plist = kmalloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1792 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1793 "save UNMAP data\n");
1797 /* Copy SCSI unmap data to a local buffer */
1798 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1800 /* return back the unmap command to CAM with success status,
1801 * if number of descripts is zero.
1803 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1805 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1806 "UNMAP cmd is Zero\n");
1811 data_length = ndesc * sizeof(struct nvme_dsm_range);
1812 if (data_length > targ->MDTS) {
1813 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1814 "Device's MDTS: %d\n", data_length, targ->MDTS);
1819 prp_page_info = mpr_alloc_prp_page(sc);
1820 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1821 "UNMAP command.\n", __func__));
1824 * Insert the allocated PRP page into the command's PRP page list. This
1825 * will be freed when the command is freed.
1827 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1829 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1830 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1832 bzero(nvme_dsm_ranges, data_length);
1834 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1835 * for each descriptors contained in SCSI UNMAP data.
1837 for (i = 0; i < ndesc; i++) {
1838 nvme_dsm_ranges[i].length =
1839 htole32(be32toh(plist->desc[i].nlb));
1840 nvme_dsm_ranges[i].starting_lba =
1841 htole64(be64toh(plist->desc[i].slba));
1842 nvme_dsm_ranges[i].attributes = 0;
1845 /* Build MPI2.6's NVMe Encapsulated Request Message */
1846 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1847 bzero(req, sizeof(*req));
1848 req->DevHandle = htole16(targ->handle);
1849 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1850 req->Flags = MPI26_NVME_FLAGS_WRITE;
1851 req->ErrorResponseBaseAddress.High =
1852 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1853 req->ErrorResponseBaseAddress.Low =
1854 htole32(cm->cm_sense_busaddr);
1855 req->ErrorResponseAllocationLength =
1856 htole16(sizeof(struct nvme_completion));
1857 req->EncapsulatedCommandLength =
1858 htole16(sizeof(struct nvme_command));
1859 req->DataLength = htole32(data_length);
1861 /* Build NVMe DSM command */
1862 c = (struct nvme_command *) req->NVMe_Command;
1863 c->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT);
1864 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1865 c->cdw10 = htole32(ndesc - 1);
1866 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1868 cm->cm_length = data_length;
1871 cm->cm_complete = mprsas_scsiio_complete;
1872 cm->cm_complete_data = ccb;
1874 cm->cm_lun = csio->ccb_h.target_lun;
1877 cm->cm_desc.Default.RequestFlags =
1878 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1880 csio->ccb_h.qos.sim_data = sbinuptime();
1881 #if __FreeBSD_version >= 1000029
1882 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1883 mprsas_scsiio_timeout, cm, 0);
1884 #else //__FreeBSD_version < 1000029
1885 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1886 mprsas_scsiio_timeout, cm);
1887 #endif //__FreeBSD_version >= 1000029
1890 targ->outstanding++;
1891 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1892 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1894 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1895 __func__, cm, ccb, targ->outstanding);
1897 mpr_build_nvme_prp(sc, cm, req,
1898 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1899 mpr_map_command(sc, cm);
1902 kfree(plist, M_MPR);
1908 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1910 MPI2_SCSI_IO_REQUEST *req;
1911 struct ccb_scsiio *csio;
1912 struct mpr_softc *sc;
1913 struct mprsas_target *targ;
1914 struct mprsas_lun *lun;
1915 struct mpr_command *cm;
1916 uint8_t i, lba_byte, *ref_tag_addr;
1917 #if 0 /* XXX swildner: NVMe support */
1918 uint8_t scsi_opcode;
1920 uint16_t eedp_flags;
1921 uint32_t mpi_control;
1922 #if 0 /* XXX swildner: NVMe support */
1928 KKASSERT(lockowned(&sc->mpr_lock));
1931 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1932 ("Target %d out of bounds in XPT_SCSI_IO\n",
1933 csio->ccb_h.target_id));
1934 targ = &sassc->targets[csio->ccb_h.target_id];
1935 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1936 if (targ->handle == 0x0) {
1937 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1938 __func__, csio->ccb_h.target_id);
1939 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1943 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1944 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1945 "supported %u\n", __func__, csio->ccb_h.target_id);
1946 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1951 * Sometimes, it is possible to get a command that is not "In
1952 * Progress" and was actually aborted by the upper layer. Check for
1953 * this here and complete the command without error.
1955 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1956 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1957 "target %u\n", __func__, csio->ccb_h.target_id);
1962 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1963 * that the volume has timed out. We want volumes to be enumerated
1964 * until they are deleted/removed, not just failed.
1966 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1967 if (targ->devinfo == 0)
1968 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1970 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1975 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1976 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1977 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1983 * If target has a reset in progress, freeze the devq and return. The
1984 * devq will be released when the TM reset is finished.
1986 if (targ->flags & MPRSAS_TARGET_INRESET) {
1987 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1988 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1989 __func__, targ->tid);
1990 xpt_freeze_devq(ccb->ccb_h.path, 1);
1995 cm = mpr_alloc_command(sc);
1996 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1998 mpr_free_command(sc, cm);
2000 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2001 xpt_freeze_simq(sassc->sim, 1);
2002 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2004 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2005 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2010 #if 0 /* XXX swildner: NVMe support */
2011 /* For NVME device's issue UNMAP command directly to NVME drives by
2012 * constructing equivalent native NVMe DataSetManagement command.
2014 #if __FreeBSD_version >= 1100103
2015 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2017 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2018 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2020 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2022 if (scsi_opcode == UNMAP &&
2024 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2025 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2026 if (rc == 1) { /* return command to CAM with success status */
2027 mpr_free_command(sc, cm);
2028 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2031 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2036 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2037 bzero(req, sizeof(*req));
2038 req->DevHandle = htole16(targ->handle);
2039 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2041 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2042 req->SenseBufferLength = MPR_SENSE_LEN;
2044 req->ChainOffset = 0;
2045 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2050 req->DataLength = htole32(csio->dxfer_len);
2051 req->BidirectionalDataLength = 0;
2052 req->IoFlags = htole16(csio->cdb_len);
2055 /* Note: BiDirectional transfers are not supported */
2056 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2058 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2059 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2062 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2063 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2067 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2071 if (csio->cdb_len == 32)
2072 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2074 * It looks like the hardware doesn't require an explicit tag
2075 * number for each transaction. SAM Task Management not supported
2078 switch (csio->tag_action) {
2079 case MSG_HEAD_OF_Q_TAG:
2080 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2082 case MSG_ORDERED_Q_TAG:
2083 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2086 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2088 case CAM_TAG_ACTION_NONE:
2089 case MSG_SIMPLE_Q_TAG:
2091 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2094 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2095 req->Control = htole32(mpi_control);
2097 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2098 mpr_free_command(sc, cm);
2099 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2104 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2105 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2107 KASSERT(csio->cdb_len <= IOCDBLEN,
2108 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2109 "is not set", csio->cdb_len));
2110 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2112 req->IoFlags = htole16(csio->cdb_len);
2115 * Check if EEDP is supported and enabled. If it is then check if the
2116 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2117 * is formatted for EEDP support. If all of this is true, set CDB up
2118 * for EEDP transfer.
2120 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2121 if (sc->eedp_enabled && eedp_flags) {
2122 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2123 if (lun->lun_id == csio->ccb_h.target_lun) {
2128 if ((lun != NULL) && (lun->eedp_formatted)) {
2129 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2130 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2131 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2132 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2133 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2135 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2137 req->EEDPFlags = htole16(eedp_flags);
2140 * If CDB less than 32, fill in Primary Ref Tag with
2141 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2142 * already there. Also, set protection bit. FreeBSD
2143 * currently does not support CDBs bigger than 16, but
2144 * the code doesn't hurt, and will be here for the
2147 if (csio->cdb_len != 32) {
2148 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2149 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2150 PrimaryReferenceTag;
2151 for (i = 0; i < 4; i++) {
2153 req->CDB.CDB32[lba_byte + i];
2156 req->CDB.EEDP32.PrimaryReferenceTag =
2158 CDB.EEDP32.PrimaryReferenceTag);
2159 req->CDB.EEDP32.PrimaryApplicationTagMask =
2162 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2165 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2166 req->EEDPFlags = htole16(eedp_flags);
2167 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2173 cm->cm_length = csio->dxfer_len;
2174 if (cm->cm_length != 0) {
2176 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2180 cm->cm_sge = &req->SGL;
2181 cm->cm_sglsize = (32 - 24) * 4;
2182 cm->cm_complete = mprsas_scsiio_complete;
2183 cm->cm_complete_data = ccb;
2185 cm->cm_lun = csio->ccb_h.target_lun;
2188 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2189 * and set descriptor type.
2191 if (targ->scsi_req_desc_type ==
2192 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2193 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2194 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2195 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2196 if (!sc->atomic_desc_capable) {
2197 cm->cm_desc.FastPathSCSIIO.DevHandle =
2198 htole16(targ->handle);
2201 cm->cm_desc.SCSIIO.RequestFlags =
2202 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2203 if (!sc->atomic_desc_capable)
2204 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2207 #if 0 /* XXX swildner sbintime */
2208 csio->ccb_h.qos.sim_data = sbinuptime();
2210 #if __FreeBSD_version >= 1000029
2211 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2212 mprsas_scsiio_timeout, cm, 0);
2213 #else //__FreeBSD_version < 1000029
2214 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2215 mprsas_scsiio_timeout, cm);
2216 #endif //__FreeBSD_version >= 1000029
2219 targ->outstanding++;
2220 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2221 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2223 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2224 __func__, cm, ccb, targ->outstanding);
2226 mpr_map_command(sc, cm);
2231 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2234 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2235 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2239 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2240 MPI2_IOCSTATUS_MASK;
2241 u8 scsi_state = mpi_reply->SCSIState;
2242 u8 scsi_status = mpi_reply->SCSIStatus;
2243 char *desc_ioc_state = NULL;
2244 char *desc_scsi_status = NULL;
2245 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2247 if (log_info == 0x31170000)
2250 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2252 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2255 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2256 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2257 if (targ->encl_level_valid) {
2258 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2259 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2260 targ->connector_name);
2264 * We can add more detail about underflow data here
2267 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2268 "scsi_state %pb%i\n", desc_scsi_status, scsi_status,
2269 "\20" "\1AutosenseValid" "\2AutosenseFailed"
2270 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid",
2273 if (sc->mpr_debug & MPR_XINFO &&
2274 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2275 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2276 scsi_sense_print(csio);
2277 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2280 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2281 response_info = le32toh(mpi_reply->ResponseInfo);
2282 response_bytes = (u8 *)&response_info;
2283 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2285 mpr_describe_table(mpr_scsi_taskmgmt_string,
2286 response_bytes[0]));
2290 #if 0 /* XXX swildner: NVMe support */
2291 /** mprsas_nvme_trans_status_code
2293 * Convert Native NVMe command error status to
2294 * equivalent SCSI error status.
2296 * Returns appropriate scsi_status
2299 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2300 struct mpr_command *cm)
2302 u8 status = MPI2_SCSI_STATUS_GOOD;
2303 int skey, asc, ascq;
2304 union ccb *ccb = cm->cm_complete_data;
2305 int returned_sense_len;
2308 sct = NVME_STATUS_GET_SCT(nvme_status);
2309 sc = NVME_STATUS_GET_SC(nvme_status);
2311 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2312 skey = SSD_KEY_ILLEGAL_REQUEST;
2313 asc = SCSI_ASC_NO_SENSE;
2314 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2317 case NVME_SCT_GENERIC:
2319 case NVME_SC_SUCCESS:
2320 status = MPI2_SCSI_STATUS_GOOD;
2321 skey = SSD_KEY_NO_SENSE;
2322 asc = SCSI_ASC_NO_SENSE;
2323 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2325 case NVME_SC_INVALID_OPCODE:
2326 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2327 skey = SSD_KEY_ILLEGAL_REQUEST;
2328 asc = SCSI_ASC_ILLEGAL_COMMAND;
2329 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2331 case NVME_SC_INVALID_FIELD:
2332 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2333 skey = SSD_KEY_ILLEGAL_REQUEST;
2334 asc = SCSI_ASC_INVALID_CDB;
2335 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2337 case NVME_SC_DATA_TRANSFER_ERROR:
2338 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2339 skey = SSD_KEY_MEDIUM_ERROR;
2340 asc = SCSI_ASC_NO_SENSE;
2341 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2343 case NVME_SC_ABORTED_POWER_LOSS:
2344 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2345 skey = SSD_KEY_ABORTED_COMMAND;
2346 asc = SCSI_ASC_WARNING;
2347 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2349 case NVME_SC_INTERNAL_DEVICE_ERROR:
2350 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2351 skey = SSD_KEY_HARDWARE_ERROR;
2352 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2353 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2355 case NVME_SC_ABORTED_BY_REQUEST:
2356 case NVME_SC_ABORTED_SQ_DELETION:
2357 case NVME_SC_ABORTED_FAILED_FUSED:
2358 case NVME_SC_ABORTED_MISSING_FUSED:
2359 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2360 skey = SSD_KEY_ABORTED_COMMAND;
2361 asc = SCSI_ASC_NO_SENSE;
2362 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2364 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2365 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2366 skey = SSD_KEY_ILLEGAL_REQUEST;
2367 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2368 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2370 case NVME_SC_LBA_OUT_OF_RANGE:
2371 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2372 skey = SSD_KEY_ILLEGAL_REQUEST;
2373 asc = SCSI_ASC_ILLEGAL_BLOCK;
2374 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2376 case NVME_SC_CAPACITY_EXCEEDED:
2377 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2378 skey = SSD_KEY_MEDIUM_ERROR;
2379 asc = SCSI_ASC_NO_SENSE;
2380 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2382 case NVME_SC_NAMESPACE_NOT_READY:
2383 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2384 skey = SSD_KEY_NOT_READY;
2385 asc = SCSI_ASC_LUN_NOT_READY;
2386 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2390 case NVME_SCT_COMMAND_SPECIFIC:
2392 case NVME_SC_INVALID_FORMAT:
2393 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2394 skey = SSD_KEY_ILLEGAL_REQUEST;
2395 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2396 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2398 case NVME_SC_CONFLICTING_ATTRIBUTES:
2399 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2400 skey = SSD_KEY_ILLEGAL_REQUEST;
2401 asc = SCSI_ASC_INVALID_CDB;
2402 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2406 case NVME_SCT_MEDIA_ERROR:
2408 case NVME_SC_WRITE_FAULTS:
2409 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2410 skey = SSD_KEY_MEDIUM_ERROR;
2411 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2412 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2414 case NVME_SC_UNRECOVERED_READ_ERROR:
2415 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2416 skey = SSD_KEY_MEDIUM_ERROR;
2417 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2418 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2420 case NVME_SC_GUARD_CHECK_ERROR:
2421 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2422 skey = SSD_KEY_MEDIUM_ERROR;
2423 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2424 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2426 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2427 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2428 skey = SSD_KEY_MEDIUM_ERROR;
2429 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2430 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2432 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2433 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2434 skey = SSD_KEY_MEDIUM_ERROR;
2435 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2436 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2438 case NVME_SC_COMPARE_FAILURE:
2439 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2440 skey = SSD_KEY_MISCOMPARE;
2441 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2442 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2444 case NVME_SC_ACCESS_DENIED:
2445 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2446 skey = SSD_KEY_ILLEGAL_REQUEST;
2447 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2448 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2454 returned_sense_len = sizeof(struct scsi_sense_data);
2455 if (returned_sense_len < ccb->csio.sense_len)
2456 ccb->csio.sense_resid = ccb->csio.sense_len -
2459 ccb->csio.sense_resid = 0;
2461 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2462 1, skey, asc, ascq, SSD_ELEM_NONE);
2463 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2468 /** mprsas_complete_nvme_unmap
2470 * Complete native NVMe command issued using NVMe Encapsulated
2474 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2476 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2477 struct nvme_completion *nvme_completion = NULL;
2478 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2480 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2481 if (le16toh(mpi_reply->ErrorResponseCount)){
2482 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2483 scsi_status = mprsas_nvme_trans_status_code(
2484 nvme_completion->status, cm);
2491 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2493 MPI2_SCSI_IO_REPLY *rep;
2495 struct ccb_scsiio *csio;
2496 struct mprsas_softc *sassc;
2497 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2498 u8 *TLR_bits, TLR_on, *scsi_cdb;
2501 struct mprsas_target *target;
2502 target_id_t target_id;
2505 mpr_dprint(sc, MPR_TRACE,
2506 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2507 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2508 cm->cm_targ->outstanding);
2510 callout_stop(&cm->cm_callout);
2511 KKASSERT(lockowned(&sc->mpr_lock));
2514 ccb = cm->cm_complete_data;
2516 target_id = csio->ccb_h.target_id;
2517 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2519 * XXX KDM if the chain allocation fails, does it matter if we do
2520 * the sync and unload here? It is simpler to do it in every case,
2521 * assuming it doesn't cause problems.
2523 if (cm->cm_data != NULL) {
2524 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2525 dir = BUS_DMASYNC_POSTREAD;
2526 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2527 dir = BUS_DMASYNC_POSTWRITE;
2528 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2529 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2532 cm->cm_targ->completed++;
2533 cm->cm_targ->outstanding--;
2534 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2535 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2537 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2538 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2539 cm->cm_state = MPR_CM_STATE_BUSY;
2540 if (cm->cm_reply != NULL)
2541 mprsas_log_command(cm, MPR_RECOVERY,
2542 "completed timedout cm %p ccb %p during recovery "
2543 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2544 le16toh(rep->IOCStatus), rep->SCSIStatus,
2545 rep->SCSIState, le32toh(rep->TransferCount));
2547 mprsas_log_command(cm, MPR_RECOVERY,
2548 "completed timedout cm %p ccb %p during recovery\n",
2550 } else if (cm->cm_targ->tm != NULL) {
2551 if (cm->cm_reply != NULL)
2552 mprsas_log_command(cm, MPR_RECOVERY,
2553 "completed cm %p ccb %p during recovery "
2554 "ioc %x scsi %x state %x xfer %u\n",
2555 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2556 rep->SCSIStatus, rep->SCSIState,
2557 le32toh(rep->TransferCount));
2559 mprsas_log_command(cm, MPR_RECOVERY,
2560 "completed cm %p ccb %p during recovery\n",
2562 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2563 mprsas_log_command(cm, MPR_RECOVERY,
2564 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2567 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2569 * We ran into an error after we tried to map the command,
2570 * so we're getting a callback without queueing the command
2571 * to the hardware. So we set the status here, and it will
2572 * be retained below. We'll go through the "fast path",
2573 * because there can be no reply when we haven't actually
2574 * gone out to the hardware.
2576 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2579 * Currently the only error included in the mask is
2580 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2581 * chain frames. We need to freeze the queue until we get
2582 * a command that completed without this error, which will
2583 * hopefully have some chain frames attached that we can
2584 * use. If we wanted to get smarter about it, we would
2585 * only unfreeze the queue in this condition when we're
2586 * sure that we're getting some chain frames back. That's
2587 * probably unnecessary.
2589 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2590 xpt_freeze_simq(sassc->sim, 1);
2591 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2592 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2593 "freezing SIM queue\n");
2598 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2599 * flag, and use it in a few places in the rest of this function for
2600 * convenience. Use the macro if available.
2602 #if __FreeBSD_version >= 1100103
2603 scsi_cdb = scsiio_cdb_ptr(csio);
2605 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2606 scsi_cdb = csio->cdb_io.cdb_ptr;
2608 scsi_cdb = csio->cdb_io.cdb_bytes;
2612 * If this is a Start Stop Unit command and it was issued by the driver
2613 * during shutdown, decrement the refcount to account for all of the
2614 * commands that were sent. All SSU commands should be completed before
2615 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2618 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2619 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2623 /* Take the fast path to completion */
2624 if (cm->cm_reply == NULL) {
2625 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2626 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2627 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2629 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2630 csio->scsi_status = SCSI_STATUS_OK;
2632 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2633 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2634 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2635 mpr_dprint(sc, MPR_XINFO,
2636 "Unfreezing SIM queue\n");
2641 * There are two scenarios where the status won't be
2642 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2643 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2645 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2647 * Freeze the dev queue so that commands are
2648 * executed in the correct order after error
2651 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2652 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2654 mpr_free_command(sc, cm);
2659 #if 0 /* XXX swildner: NVMe support */
2660 target = &sassc->targets[target_id];
2661 if (scsi_cdb[0] == UNMAP &&
2663 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2664 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2665 csio->scsi_status = rep->SCSIStatus;
2669 mprsas_log_command(cm, MPR_XINFO,
2670 "ioc %x scsi %x state %x xfer %u\n",
2671 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2672 le32toh(rep->TransferCount));
2674 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2675 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2676 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2678 case MPI2_IOCSTATUS_SUCCESS:
2679 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2680 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2681 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2682 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2684 /* Completion failed at the transport level. */
2685 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2686 MPI2_SCSI_STATE_TERMINATED)) {
2687 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2691 /* In a modern packetized environment, an autosense failure
2692 * implies that there's not much else that can be done to
2693 * recover the command.
2695 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2696 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2701 * CAM doesn't care about SAS Response Info data, but if this is
2702 * the state check if TLR should be done. If not, clear the
2703 * TLR_bits for the target.
2705 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2706 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2707 == MPR_SCSI_RI_INVALID_FRAME)) {
2708 sc->mapping_table[target_id].TLR_bits =
2709 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2713 * Intentionally override the normal SCSI status reporting
2714 * for these two cases. These are likely to happen in a
2715 * multi-initiator environment, and we want to make sure that
2716 * CAM retries these commands rather than fail them.
2718 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2719 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2720 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2724 /* Handle normal status and sense */
2725 csio->scsi_status = rep->SCSIStatus;
2726 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2727 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2729 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2731 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2732 int sense_len, returned_sense_len;
2734 returned_sense_len = min(le32toh(rep->SenseCount),
2735 sizeof(struct scsi_sense_data));
2736 if (returned_sense_len < csio->sense_len)
2737 csio->sense_resid = csio->sense_len -
2740 csio->sense_resid = 0;
2742 sense_len = min(returned_sense_len,
2743 csio->sense_len - csio->sense_resid);
2744 bzero(&csio->sense_data, sizeof(csio->sense_data));
2745 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2746 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2750 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2751 * and it's page code 0 (Supported Page List), and there is
2752 * inquiry data, and this is for a sequential access device, and
2753 * the device is an SSP target, and TLR is supported by the
2754 * controller, turn the TLR_bits value ON if page 0x90 is
2757 if ((scsi_cdb[0] == INQUIRY) &&
2758 (scsi_cdb[1] & SI_EVPD) &&
2759 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2760 #if 0 /* XXX swildner */
2761 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2763 (csio->data_ptr != NULL) &&
2764 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2765 (sc->control_TLR) &&
2766 (sc->mapping_table[target_id].device_info &
2767 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2768 vpd_list = (struct scsi_vpd_supported_page_list *)
2770 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2771 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2772 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2773 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2774 alloc_len -= csio->resid;
2775 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2776 if (vpd_list->list[i] == 0x90) {
2784 * If this is a SATA direct-access end device, mark it so that
2785 * a SCSI StartStopUnit command will be sent to it when the
2786 * driver is being shutdown.
2788 if ((scsi_cdb[0] == INQUIRY) &&
2789 (csio->data_ptr != NULL) &&
2790 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2791 (sc->mapping_table[target_id].device_info &
2792 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2793 ((sc->mapping_table[target_id].device_info &
2794 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2795 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2796 target = &sassc->targets[target_id];
2797 target->supports_SSU = TRUE;
2798 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2802 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2803 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2805 * If devinfo is 0 this will be a volume. In that case don't
2806 * tell CAM that the volume is not there. We want volumes to
2807 * be enumerated until they are deleted/removed, not just
2810 if (cm->cm_targ->devinfo == 0)
2811 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2813 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2815 case MPI2_IOCSTATUS_INVALID_SGL:
2816 mpr_print_scsiio_cmd(sc, cm);
2817 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2819 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2821 * This is one of the responses that comes back when an I/O
2822 * has been aborted. If it is because of a timeout that we
2823 * initiated, just set the status to CAM_CMD_TIMEOUT.
2824 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2825 * command is the same (it gets retried, subject to the
2826 * retry counter), the only difference is what gets printed
2829 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2830 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2832 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2834 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2835 /* resid is ignored for this condition */
2837 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2839 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2840 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2842 * These can sometimes be transient transport-related
2843 * errors, and sometimes persistent drive-related errors.
2844 * We used to retry these without decrementing the retry
2845 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2846 * we hit a persistent drive problem that returns one of
2847 * these error codes, we would retry indefinitely. So,
2848 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2849 * count and avoid infinite retries. We're taking the
2850 * potential risk of flagging false failures in the event
2851 * of a topology-related error (e.g. a SAS expander problem
2852 * causes a command addressed to a drive to fail), but
2853 * avoiding getting into an infinite retry loop.
2855 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2856 mpr_dprint(sc, MPR_INFO,
2857 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2858 mpr_describe_table(mpr_iocstatus_string,
2859 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2860 target_id, cm->cm_desc.Default.SMID,
2861 le32toh(rep->IOCLogInfo));
2862 mpr_dprint(sc, MPR_XINFO,
2863 "SCSIStatus %x SCSIState %x xfercount %u\n",
2864 rep->SCSIStatus, rep->SCSIState,
2865 le32toh(rep->TransferCount));
2867 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2868 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2869 case MPI2_IOCSTATUS_INVALID_VPID:
2870 case MPI2_IOCSTATUS_INVALID_FIELD:
2871 case MPI2_IOCSTATUS_INVALID_STATE:
2872 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2873 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2874 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2875 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2876 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2878 mprsas_log_command(cm, MPR_XINFO,
2879 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2880 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2881 rep->SCSIStatus, rep->SCSIState,
2882 le32toh(rep->TransferCount));
2883 csio->resid = cm->cm_length;
2885 #if 0 /* XXX swildner: NVMe support */
2886 if (scsi_cdb[0] == UNMAP &&
2888 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2889 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2892 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2897 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2899 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2900 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2901 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2902 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2906 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2907 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2908 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2911 mpr_free_command(sc, cm);
2915 #if __FreeBSD_version >= 900026
2917 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2919 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2920 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2924 ccb = cm->cm_complete_data;
2927 * Currently there should be no way we can hit this case. It only
2928 * happens when we have a failure to allocate chain frames, and SMP
2929 * commands require two S/G elements only. That should be handled
2930 * in the standard request size.
2932 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2933 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2934 "request!\n", __func__, cm->cm_flags);
2935 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2939 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2941 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2942 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2946 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2947 sasaddr = le32toh(req->SASAddress.Low);
2948 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2950 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2951 MPI2_IOCSTATUS_SUCCESS ||
2952 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2953 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2954 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2955 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2959 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2960 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2962 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2963 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2965 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2969 * We sync in both directions because we had DMAs in the S/G list
2970 * in both directions.
2972 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2974 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2975 mpr_free_command(sc, cm);
2980 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2982 struct mpr_command *cm;
2983 uint8_t *request, *response;
2984 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2985 struct mpr_softc *sc;
2993 #if (__FreeBSD_version >= 1000028) || \
2994 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2995 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2996 case CAM_DATA_PADDR:
2997 case CAM_DATA_SG_PADDR:
2999 * XXX We don't yet support physical addresses here.
3001 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3002 "supported\n", __func__);
3003 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3008 * The chip does not support more than one buffer for the
3009 * request or response.
3011 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3012 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3013 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3014 "response buffer segments not supported for SMP\n",
3016 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3022 * The CAM_SCATTER_VALID flag was originally implemented
3023 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3024 * We have two. So, just take that flag to mean that we
3025 * might have S/G lists, and look at the S/G segment count
3026 * to figure out whether that is the case for each individual
3029 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3030 bus_dma_segment_t *req_sg;
3032 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3033 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3035 request = ccb->smpio.smp_request;
3037 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3038 bus_dma_segment_t *rsp_sg;
3040 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3041 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3043 response = ccb->smpio.smp_response;
3045 case CAM_DATA_VADDR:
3046 request = ccb->smpio.smp_request;
3047 response = ccb->smpio.smp_response;
3050 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3054 #else /* __FreeBSD_version < 1000028 */
3056 * XXX We don't yet support physical addresses here.
3058 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3059 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3060 "supported\n", __func__);
3061 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3067 * If the user wants to send an S/G list, check to make sure they
3068 * have single buffers.
3070 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3072 * The chip does not support more than one buffer for the
3073 * request or response.
3075 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3076 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3077 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3078 "response buffer segments not supported for SMP\n",
3080 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3086 * The CAM_SCATTER_VALID flag was originally implemented
3087 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3088 * We have two. So, just take that flag to mean that we
3089 * might have S/G lists, and look at the S/G segment count
3090 * to figure out whether that is the case for each individual
3093 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3094 bus_dma_segment_t *req_sg;
3096 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3097 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3099 request = ccb->smpio.smp_request;
3101 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3102 bus_dma_segment_t *rsp_sg;
3104 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3105 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3107 response = ccb->smpio.smp_response;
3109 request = ccb->smpio.smp_request;
3110 response = ccb->smpio.smp_response;
3112 #endif /* __FreeBSD_version < 1000028 */
3114 cm = mpr_alloc_command(sc);
3116 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3118 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3123 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3124 bzero(req, sizeof(*req));
3125 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3127 /* Allow the chip to use any route to this SAS address. */
3128 req->PhysicalPort = 0xff;
3130 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3132 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3134 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3135 "%#jx\n", __func__, (uintmax_t)sasaddr);
3137 mpr_init_sge(cm, req, &req->SGL);
3140 * Set up a uio to pass into mpr_map_command(). This allows us to
3141 * do one map command, and one busdma call in there.
3143 cm->cm_uio.uio_iov = cm->cm_iovec;
3144 cm->cm_uio.uio_iovcnt = 2;
3145 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3148 * The read/write flag isn't used by busdma, but set it just in
3149 * case. This isn't exactly accurate, either, since we're going in
3152 cm->cm_uio.uio_rw = UIO_WRITE;
3154 cm->cm_iovec[0].iov_base = request;
3155 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3156 cm->cm_iovec[1].iov_base = response;
3157 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3159 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3160 cm->cm_iovec[1].iov_len;
3163 * Trigger a warning message in mpr_data_cb() for the user if we
3164 * wind up exceeding two S/G segments. The chip expects one
3165 * segment for the request and another for the response.
3167 cm->cm_max_segs = 2;
3169 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3170 cm->cm_complete = mprsas_smpio_complete;
3171 cm->cm_complete_data = ccb;
3174 * Tell the mapping code that we're using a uio, and that this is
3175 * an SMP passthrough request. There is a little special-case
3176 * logic there (in mpr_data_cb()) to handle the bidirectional
3179 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3180 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3182 /* The chip data format is little endian. */
3183 req->SASAddress.High = htole32(sasaddr >> 32);
3184 req->SASAddress.Low = htole32(sasaddr);
3187 * XXX Note that we don't have a timeout/abort mechanism here.
3188 * From the manual, it looks like task management requests only
3189 * work for SCSI IO and SATA passthrough requests. We may need to
3190 * have a mechanism to retry requests in the event of a chip reset
3191 * at least. Hopefully the chip will insure that any errors short
3192 * of that are relayed back to the driver.
3194 error = mpr_map_command(sc, cm);
3195 if ((error != 0) && (error != EINPROGRESS)) {
3196 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3197 "mpr_map_command()\n", __func__, error);
3204 mpr_free_command(sc, cm);
3205 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3211 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3213 struct mpr_softc *sc;
3214 struct mprsas_target *targ;
3215 uint64_t sasaddr = 0;
3220 * Make sure the target exists.
3222 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3223 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3224 targ = &sassc->targets[ccb->ccb_h.target_id];
3225 if (targ->handle == 0x0) {
3226 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3227 __func__, ccb->ccb_h.target_id);
3228 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3234 * If this device has an embedded SMP target, we'll talk to it
3236 * figure out what the expander's address is.
3238 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3239 sasaddr = targ->sasaddr;
3242 * If we don't have a SAS address for the expander yet, try
3243 * grabbing it from the page 0x83 information cached in the
3244 * transport layer for this target. LSI expanders report the
3245 * expander SAS address as the port-associated SAS address in
3246 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3249 * XXX KDM disable this for now, but leave it commented out so that
3250 * it is obvious that this is another possible way to get the SAS
3253 * The parent handle method below is a little more reliable, and
3254 * the other benefit is that it works for devices other than SES
3255 * devices. So you can send a SMP request to a da(4) device and it
3256 * will get routed to the expander that device is attached to.
3257 * (Assuming the da(4) device doesn't contain an SMP target...)
3261 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3265 * If we still don't have a SAS address for the expander, look for
3266 * the parent device of this device, which is probably the expander.
3269 #ifdef OLD_MPR_PROBE
3270 struct mprsas_target *parent_target;
3273 if (targ->parent_handle == 0x0) {
3274 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3275 "a valid parent handle!\n", __func__, targ->handle);
3276 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3279 #ifdef OLD_MPR_PROBE
3280 parent_target = mprsas_find_target_by_handle(sassc, 0,
3281 targ->parent_handle);
3283 if (parent_target == NULL) {
3284 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3285 "a valid parent target!\n", __func__, targ->handle);
3286 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3290 if ((parent_target->devinfo &
3291 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3292 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3293 "does not have an SMP target!\n", __func__,
3294 targ->handle, parent_target->handle);
3295 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3299 sasaddr = parent_target->sasaddr;
3300 #else /* OLD_MPR_PROBE */
3301 if ((targ->parent_devinfo &
3302 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3303 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3304 "does not have an SMP target!\n", __func__,
3305 targ->handle, targ->parent_handle);
3306 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3310 if (targ->parent_sasaddr == 0x0) {
3311 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3312 "%d does not have a valid SAS address!\n", __func__,
3313 targ->handle, targ->parent_handle);
3314 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3318 sasaddr = targ->parent_sasaddr;
3319 #endif /* OLD_MPR_PROBE */
3324 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3325 "handle %d\n", __func__, targ->handle);
3326 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3329 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3337 #endif //__FreeBSD_version >= 900026
3340 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3342 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3343 struct mpr_softc *sc;
3344 struct mpr_command *tm;
3345 struct mprsas_target *targ;
3347 MPR_FUNCTRACE(sassc->sc);
3348 KKASSERT(lockowned(&sassc->sc->mpr_lock));
3350 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3351 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3353 tm = mpr_alloc_command(sc);
3355 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3356 "mprsas_action_resetdev\n");
3357 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3362 targ = &sassc->targets[ccb->ccb_h.target_id];
3363 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3364 req->DevHandle = htole16(targ->handle);
3365 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3366 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3368 /* SAS Hard Link Reset / SATA Link Reset */
3369 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3372 tm->cm_desc.HighPriority.RequestFlags =
3373 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3374 tm->cm_complete = mprsas_resetdev_complete;
3375 tm->cm_complete_data = ccb;
3377 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3378 __func__, targ->tid);
3380 targ->flags |= MPRSAS_TARGET_INRESET;
3382 mpr_map_command(sc, tm);
3386 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3388 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3392 KKASSERT(lockowned(&sc->mpr_lock));
3394 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3395 ccb = tm->cm_complete_data;
3398 * Currently there should be no way we can hit this case. It only
3399 * happens when we have a failure to allocate chain frames, and
3400 * task management commands don't have S/G lists.
3402 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3403 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3405 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3407 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3408 "handle %#04x! This should not happen!\n", __func__,
3409 tm->cm_flags, req->DevHandle);
3410 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3414 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3415 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3417 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3418 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3419 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3423 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3427 mprsas_free_tm(sc, tm);
3432 mprsas_poll(struct cam_sim *sim)
3434 struct mprsas_softc *sassc;
3436 sassc = cam_sim_softc(sim);
3438 if (sassc->sc->mpr_debug & MPR_TRACE) {
3439 /* frequent debug messages during a panic just slow
3440 * everything down too much.
3442 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3444 sassc->sc->mpr_debug &= ~MPR_TRACE;
3447 mpr_intr_locked(sassc->sc);
3451 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3454 struct mpr_softc *sc;
3456 sc = (struct mpr_softc *)callback_arg;
3459 #if (__FreeBSD_version >= 1000006) || \
3460 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3461 case AC_ADVINFO_CHANGED: {
3462 struct mprsas_target *target;
3463 struct mprsas_softc *sassc;
3464 struct scsi_read_capacity_data_long rcap_buf;
3465 struct ccb_dev_advinfo cdai;
3466 struct mprsas_lun *lun;
3471 buftype = (uintptr_t)arg;
3477 * We're only interested in read capacity data changes.
3479 if (buftype != CDAI_TYPE_RCAPLONG)
3483 * See the comment in mpr_attach_sas() for a detailed
3484 * explanation. In these versions of FreeBSD we register
3485 * for all events and filter out the events that don't
3488 #if (__FreeBSD_version < 1000703) || \
3489 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3490 if (xpt_path_path_id(path) != sassc->sim->path_id)
3495 * We should have a handle for this, but check to make sure.
3497 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3498 ("Target %d out of bounds in mprsas_async\n",
3499 xpt_path_target_id(path)));
3500 target = &sassc->targets[xpt_path_target_id(path)];
3501 if (target->handle == 0)
3504 lunid = xpt_path_lun_id(path);
3506 SLIST_FOREACH(lun, &target->luns, lun_link) {
3507 if (lun->lun_id == lunid) {
3513 if (found_lun == 0) {
3514 lun = kmalloc(sizeof(struct mprsas_lun), M_MPR,
3517 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3518 "LUN for EEDP support.\n");
3521 lun->lun_id = lunid;
3522 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3525 bzero(&rcap_buf, sizeof(rcap_buf));
3526 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3527 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3528 cdai.ccb_h.flags = CAM_DIR_IN;
3529 cdai.buftype = CDAI_TYPE_RCAPLONG;
3530 #if (__FreeBSD_version >= 1100061) || \
3531 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3532 cdai.flags = CDAI_FLAG_NONE;
3536 cdai.bufsiz = sizeof(rcap_buf);
3537 cdai.buf = (uint8_t *)&rcap_buf;
3538 xpt_action((union ccb *)&cdai);
3539 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3540 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3542 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3543 && (rcap_buf.prot & SRC16_PROT_EN)) {
3544 switch (rcap_buf.prot & SRC16_P_TYPE) {
3547 lun->eedp_formatted = TRUE;
3548 lun->eedp_block_size =
3549 scsi_4btoul(rcap_buf.length);
3553 lun->eedp_formatted = FALSE;
3554 lun->eedp_block_size = 0;
3558 lun->eedp_formatted = FALSE;
3559 lun->eedp_block_size = 0;
3564 case AC_FOUND_DEVICE: {
3565 struct ccb_getdev *cgd;
3568 * See the comment in mpr_attach_sas() for a detailed
3569 * explanation. In these versions of FreeBSD we register
3570 * for all events and filter out the events that don't
3573 #if (__FreeBSD_version < 1000703) || \
3574 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3575 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3580 #if (__FreeBSD_version < 901503) || \
3581 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3582 mprsas_check_eedp(sc, path, cgd);
3591 #if (__FreeBSD_version < 901503) || \
3592 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3594 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3595 struct ccb_getdev *cgd)
3597 struct mprsas_softc *sassc = sc->sassc;
3598 struct ccb_scsiio *csio;
3599 struct scsi_read_capacity_16 *scsi_cmd;
3600 struct scsi_read_capacity_eedp *rcap_buf;
3602 target_id_t targetid;
3605 struct cam_path *local_path;
3606 struct mprsas_target *target;
3607 struct mprsas_lun *lun;
3611 pathid = cam_sim_path(sassc->sim);
3612 targetid = xpt_path_target_id(path);
3613 lunid = xpt_path_lun_id(path);
3615 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3616 "mprsas_check_eedp\n", targetid));
3617 target = &sassc->targets[targetid];
3618 if (target->handle == 0x0)
3622 * Determine if the device is EEDP capable.
3624 * If this flag is set in the inquiry data, the device supports
3625 * protection information, and must support the 16 byte read capacity
3626 * command, otherwise continue without sending read cap 16.
3628 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3632 * Issue a READ CAPACITY 16 command. This info is used to determine if
3633 * the LUN is formatted for EEDP support.
3635 ccb = xpt_alloc_ccb();
3637 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3642 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3644 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3651 * If LUN is already in list, don't create a new one.
3654 SLIST_FOREACH(lun, &target->luns, lun_link) {
3655 if (lun->lun_id == lunid) {
3661 lun = kmalloc(sizeof(struct mprsas_lun), M_MPR,
3664 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3666 xpt_free_path(local_path);
3670 lun->lun_id = lunid;
3671 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3674 xpt_path_string(local_path, path_str, sizeof(path_str));
3675 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3676 path_str, target->handle);
3679 * Issue a READ CAPACITY 16 command for the LUN. The
3680 * mprsas_read_cap_done function will load the read cap info into the
3683 rcap_buf = kmalloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3685 if (rcap_buf == NULL) {
3686 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3687 "buffer for EEDP support.\n");
3688 xpt_free_path(ccb->ccb_h.path);
3692 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_NORMAL);
3694 csio->ccb_h.func_code = XPT_SCSI_IO;
3695 csio->ccb_h.flags = CAM_DIR_IN;
3696 csio->ccb_h.retry_count = 4;
3697 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3698 csio->ccb_h.timeout = 60000;
3699 csio->data_ptr = (uint8_t *)rcap_buf;
3700 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3701 csio->sense_len = MPR_SENSE_LEN;
3702 csio->cdb_len = sizeof(*scsi_cmd);
3703 csio->tag_action = MSG_SIMPLE_Q_TAG;
3705 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3706 bzero(scsi_cmd, sizeof(*scsi_cmd));
3707 scsi_cmd->opcode = 0x9E;
3708 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3709 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3711 ccb->ccb_h.ppriv_ptr1 = sassc;
3716 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3718 struct mprsas_softc *sassc;
3719 struct mprsas_target *target;
3720 struct mprsas_lun *lun;
3721 struct scsi_read_capacity_eedp *rcap_buf;
3723 if (done_ccb == NULL)
3726 /* Driver need to release devq, it Scsi command is
3727 * generated by driver internally.
3728 * Currently there is a single place where driver
3729 * calls scsi command internally. In future if driver
3730 * calls more scsi command internally, it needs to release
3731 * devq internally, since those command will not go back to
3734 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3735 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3736 xpt_release_devq(done_ccb->ccb_h.path,
3737 /*count*/ 1, /*run_queue*/TRUE);
3740 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3743 * Get the LUN ID for the path and look it up in the LUN list for the
3746 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3747 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3748 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3749 target = &sassc->targets[done_ccb->ccb_h.target_id];
3750 SLIST_FOREACH(lun, &target->luns, lun_link) {
3751 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3755 * Got the LUN in the target's LUN list. Fill it in with EEDP
3756 * info. If the READ CAP 16 command had some SCSI error (common
3757 * if command is not supported), mark the lun as not supporting
3758 * EEDP and set the block size to 0.
3760 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3761 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3762 lun->eedp_formatted = FALSE;
3763 lun->eedp_block_size = 0;
3767 if (rcap_buf->protect & 0x01) {
3768 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3769 "%d is formatted for EEDP support.\n",
3770 done_ccb->ccb_h.target_lun,
3771 done_ccb->ccb_h.target_id);
3772 lun->eedp_formatted = TRUE;
3773 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3778 // Finished with this CCB and path.
3779 kfree(rcap_buf, M_MPR);
3780 xpt_free_path(done_ccb->ccb_h.path);
3781 xpt_free_ccb(done_ccb);
3783 #endif /* (__FreeBSD_version < 901503) || \
3784 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3787 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3788 struct mprsas_target *target, lun_id_t lun_id)
3794 * Set the INRESET flag for this target so that no I/O will be sent to
3795 * the target until the reset has completed. If an I/O request does
3796 * happen, the devq will be frozen. The CCB holds the path which is
3797 * used to release the devq. The devq is released and the CCB is freed
3798 * when the TM completes.
3800 ccb = xpt_alloc_ccb();
3802 path_id = cam_sim_path(sc->sassc->sim);
3803 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3804 target->tid, lun_id) != CAM_REQ_CMP) {
3808 tm->cm_targ = target;
3809 target->flags |= MPRSAS_TARGET_INRESET;
3815 mprsas_startup(struct mpr_softc *sc)
3818 * Send the port enable message and set the wait_for_port_enable flag.
3819 * This flag helps to keep the simq frozen until all discovery events
3822 sc->wait_for_port_enable = 1;
3823 mprsas_send_portenable(sc);
3828 mprsas_send_portenable(struct mpr_softc *sc)
3830 MPI2_PORT_ENABLE_REQUEST *request;
3831 struct mpr_command *cm;
3835 if ((cm = mpr_alloc_command(sc)) == NULL)
3837 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3838 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3839 request->MsgFlags = 0;
3841 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3842 cm->cm_complete = mprsas_portenable_complete;
3846 mpr_map_command(sc, cm);
3847 mpr_dprint(sc, MPR_XINFO,
3848 "mpr_send_portenable finished cm %p req %p complete %p\n",
3849 cm, cm->cm_req, cm->cm_complete);
3854 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3856 MPI2_PORT_ENABLE_REPLY *reply;
3857 struct mprsas_softc *sassc;
3863 * Currently there should be no way we can hit this case. It only
3864 * happens when we have a failure to allocate chain frames, and
3865 * port enable commands don't have S/G lists.
3867 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3868 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3869 "This should not happen!\n", __func__, cm->cm_flags);
3872 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3874 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3875 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3876 MPI2_IOCSTATUS_SUCCESS)
3877 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3879 mpr_free_command(sc, cm);
3881 * Done waiting for port enable to complete. Decrement the refcount.
3882 * If refcount is 0, discovery is complete and a rescan of the bus can
3885 sc->wait_for_port_enable = 0;
3886 sc->port_enable_complete = 1;
3887 wakeup(&sc->port_enable_complete);
3888 mprsas_startup_decrement(sassc);
3892 mprsas_check_id(struct mprsas_softc *sassc, int id)
3894 struct mpr_softc *sc = sassc->sc;
3898 ids = &sc->exclude_ids[0];
3899 while((name = strsep(&ids, ",")) != NULL) {
3900 if (name[0] == '\0')
3902 if (strtol(name, NULL, 0) == (long)id)
3910 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3912 struct mprsas_softc *sassc;
3913 struct mprsas_lun *lun, *lun_tmp;
3914 struct mprsas_target *targ;
3919 * The number of targets is based on IOC Facts, so free all of
3920 * the allocated LUNs for each target and then the target buffer
3923 for (i=0; i< maxtargets; i++) {
3924 targ = &sassc->targets[i];
3925 SLIST_FOREACH_MUTABLE(lun, &targ->luns, lun_link, lun_tmp) {
3929 kfree(sassc->targets, M_MPR);
3931 sassc->targets = kmalloc(sizeof(struct mprsas_target) * maxtargets,
3932 M_MPR, M_WAITOK|M_ZERO);
3933 if (!sassc->targets) {
3934 panic("%s failed to alloc targets with error %d\n",