2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
53 * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
56 /* Communications core for LSI MPT2 */
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
69 #include <sys/malloc.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
80 #include <machine/stdarg.h>
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
108 #define MPSSAS_DISCOVERY_TIMEOUT 20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
112 * static array to check SCSI OpCode for EEDP protection bits
114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 #if 0 /* XXX unused */
140 static void mpssas_discovery_timeout(void *data);
142 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
143 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
144 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
145 static void mpssas_poll(struct cam_sim *sim);
146 static void mpssas_scsiio_timeout(void *data);
147 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
148 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
149 struct mps_command *cm, union ccb *ccb);
150 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
151 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
152 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
153 #if __FreeBSD_version >= 900026
154 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
155 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
157 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
158 #endif //FreeBSD_version >= 900026
159 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
160 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
161 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
162 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
163 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
164 static void mpssas_scanner_thread(void *arg);
165 #if __FreeBSD_version >= 1000006
166 static void mpssas_async(void *callback_arg, uint32_t code,
167 struct cam_path *path, void *arg);
169 static void mpssas_check_eedp(struct mpssas_softc *sassc);
170 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
172 static int mpssas_send_portenable(struct mps_softc *sc);
173 static void mpssas_portenable_complete(struct mps_softc *sc,
174 struct mps_command *cm);
176 static struct mpssas_target *
177 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
179 struct mpssas_target *target;
182 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
183 target = &sassc->targets[i];
184 if (target->handle == handle)
191 /* we need to freeze the simq during attach and diag reset, to avoid failing
192 * commands before device handles have been found by discovery. Since
193 * discovery involves reading config pages and possibly sending commands,
194 * discovery actions may continue even after we receive the end of discovery
195 * event, so refcount discovery actions instead of assuming we can unfreeze
196 * the simq when we get the event.
199 mpssas_startup_increment(struct mpssas_softc *sassc)
201 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
202 if (sassc->startup_refcount++ == 0) {
203 /* just starting, freeze the simq */
204 mps_dprint(sassc->sc, MPS_INFO,
205 "%s freezing simq\n", __func__);
206 xpt_freeze_simq(sassc->sim, 1);
208 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
209 sassc->startup_refcount);
214 mpssas_startup_decrement(struct mpssas_softc *sassc)
216 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
217 if (--sassc->startup_refcount == 0) {
218 /* finished all discovery-related actions, release
219 * the simq and rescan for the latest topology.
221 mps_dprint(sassc->sc, MPS_INFO,
222 "%s releasing simq\n", __func__);
223 sassc->flags &= ~MPSSAS_IN_STARTUP;
224 xpt_release_simq(sassc->sim, 1);
225 mpssas_rescan_target(sassc->sc, NULL);
227 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
232 /* LSI's firmware requires us to stop sending commands when we're doing task
233 * management, so refcount the TMs and keep the simq frozen when any are in
237 mpssas_alloc_tm(struct mps_softc *sc)
239 struct mps_command *tm;
241 tm = mps_alloc_high_priority_command(sc);
243 if (sc->sassc->tm_count++ == 0) {
244 mps_printf(sc, "%s freezing simq\n", __func__);
245 xpt_freeze_simq(sc->sassc->sim, 1);
247 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
248 sc->sassc->tm_count);
254 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
259 /* if there are no TMs in use, we can release the simq. We use our
260 * own refcount so that it's easier for a diag reset to cleanup and
263 if (--sc->sassc->tm_count == 0) {
264 mps_printf(sc, "%s releasing simq\n", __func__);
265 xpt_release_simq(sc->sassc->sim, 1);
267 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
268 sc->sassc->tm_count);
270 mps_free_high_priority_command(sc, tm);
275 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
277 struct mpssas_softc *sassc = sc->sassc;
279 target_id_t targetid;
282 pathid = cam_sim_path(sassc->sim);
284 targetid = CAM_TARGET_WILDCARD;
286 targetid = targ - sassc->targets;
289 * Allocate a CCB and schedule a rescan.
291 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
293 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
300 /* XXX Hardwired to scan the bus for now */
301 ccb->ccb_h.func_code = XPT_SCAN_BUS;
302 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
303 mpssas_rescan(sassc, ccb);
307 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
317 sbuf_new(&sb, str, sizeof(str), 0);
321 if (cm->cm_ccb != NULL) {
322 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
324 sbuf_cat(&sb, path_str);
325 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 scsi_command_string(&cm->cm_ccb->csio, &sb);
327 sbuf_printf(&sb, "length %d ",
328 cm->cm_ccb->csio.dxfer_len);
332 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 cam_sim_name(cm->cm_sc->sassc->sim),
334 cam_sim_unit(cm->cm_sc->sassc->sim),
335 cam_sim_bus(cm->cm_sc->sassc->sim),
336 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
340 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 sbuf_vprintf(&sb, fmt, ap);
343 kprintf("%s", sbuf_data(&sb));
349 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
351 struct mpssas_softc *sassc = sc->sassc;
352 path_id_t pathid = cam_sim_path(sassc->sim);
353 struct cam_path *path;
355 mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
356 if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
357 mps_printf(sc, "unable to create path for lost target %d\n",
362 xpt_async(AC_LOST_DEVICE, path, NULL);
367 * The MPT2 firmware performs debounce on the link to avoid transient link
368 * errors and false removals. When it does decide that link has been lost
369 * and a device need to go away, it expects that the host will perform a
370 * target reset and then an op remove. The reset has the side-effect of
371 * aborting any outstanding requests for the device, which is required for
372 * the op-remove to succeed. It's not clear if the host should check for
373 * the device coming back alive after the reset.
376 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
378 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
379 struct mps_softc *sc;
380 struct mps_command *cm;
381 struct mpssas_target *targ = NULL;
383 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
386 * If this is a WD controller, determine if the disk should be exposed
387 * to the OS or not. If disk should be exposed, return from this
388 * function without doing anything.
391 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
392 MPS_WD_EXPOSE_ALWAYS)) {
396 targ = mpssas_find_target_by_handle(sassc, 0, handle);
398 /* FIXME: what is the action? */
399 /* We don't know about this device? */
400 kprintf("%s: invalid handle 0x%x \n", __func__, handle);
404 targ->flags |= MPSSAS_TARGET_INREMOVAL;
406 cm = mpssas_alloc_tm(sc);
408 mps_printf(sc, "%s: command alloc failure\n", __func__);
412 mpssas_lost_target(sc, targ);
414 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
415 memset(req, 0, sizeof(*req));
416 req->DevHandle = targ->handle;
417 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
418 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
420 /* SAS Hard Link Reset / SATA Link Reset */
421 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
425 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
426 cm->cm_complete = mpssas_remove_device;
427 cm->cm_complete_data = (void *)(uintptr_t)handle;
428 mps_map_command(sc, cm);
432 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
434 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
435 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
436 struct mpssas_target *targ;
437 struct mps_command *next_cm;
440 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
442 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
443 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
447 * Currently there should be no way we can hit this case. It only
448 * happens when we have a failure to allocate chain frames, and
449 * task management commands don't have S/G lists.
451 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
452 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
453 "This should not happen!\n", __func__, tm->cm_flags,
455 mpssas_free_tm(sc, tm);
460 /* XXX retry the remove after the diag reset completes? */
461 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
463 mpssas_free_tm(sc, tm);
467 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
468 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
469 reply->IOCStatus, handle);
470 mpssas_free_tm(sc, tm);
474 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
475 (u_int)reply->TerminationCount);
476 mps_free_reply(sc, tm->cm_reply_data);
477 tm->cm_reply = NULL; /* Ensures the the reply won't get re-freed */
479 /* Reuse the existing command */
480 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
481 memset(req, 0, sizeof(*req));
482 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
483 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
484 req->DevHandle = handle;
486 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
487 tm->cm_complete = mpssas_remove_complete;
488 tm->cm_complete_data = (void *)(uintptr_t)handle;
490 mps_map_command(sc, tm);
492 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
494 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
497 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
498 ccb = tm->cm_complete_data;
499 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
500 mpssas_scsiio_complete(sc, tm);
505 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
507 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
509 struct mpssas_target *targ;
511 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
513 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
514 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
517 * Currently there should be no way we can hit this case. It only
518 * happens when we have a failure to allocate chain frames, and
519 * task management commands don't have S/G lists.
521 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
522 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
523 "This should not happen!\n", __func__, tm->cm_flags,
525 mpssas_free_tm(sc, tm);
530 /* most likely a chip reset */
531 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
533 mpssas_free_tm(sc, tm);
537 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
538 handle, reply->IOCStatus);
541 * Don't clear target if remove fails because things will get confusing.
542 * Leave the devname and sasaddr intact so that we know to avoid reusing
543 * this target id if possible, and so we can assign the same target id
544 * to this device if it comes back in the future.
546 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
549 targ->encl_handle = 0x0;
550 targ->encl_slot = 0x0;
551 targ->exp_dev_handle = 0x0;
553 targ->linkrate = 0x0;
557 mpssas_free_tm(sc, tm);
561 mpssas_register_events(struct mps_softc *sc)
566 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
567 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
568 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
569 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
570 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
571 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
572 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
573 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
574 setbit(events, MPI2_EVENT_IR_VOLUME);
575 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
576 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
577 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
579 mps_register_events(sc, events, mpssas_evt_handler, NULL,
580 &sc->sassc->mpssas_eh);
586 mps_attach_sas(struct mps_softc *sc)
588 struct mpssas_softc *sassc;
589 #if __FreeBSD_version >= 1000006
594 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
596 sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
597 sassc->targets = kmalloc(sizeof(struct mpssas_target) *
598 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
602 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
603 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
608 unit = device_get_unit(sc->mps_dev);
609 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
610 unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
611 if (sassc->sim == NULL) {
612 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
617 TAILQ_INIT(&sassc->ev_queue);
619 /* Initialize taskqueue for Event Handling */
620 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
621 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
622 taskqueue_thread_enqueue, &sassc->ev_tq);
624 /* Run the task queue with lowest priority */
625 taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
626 device_get_nameunit(sc->mps_dev));
628 TAILQ_INIT(&sassc->ccb_scanq);
629 error = mps_kproc_create(mpssas_scanner_thread, sassc,
630 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
632 mps_printf(sc, "Error %d starting rescan thread\n", error);
637 sassc->flags |= MPSSAS_SCANTHREAD;
640 * XXX There should be a bus for every port on the adapter, but since
641 * we're just going to fake the topology for now, we'll pretend that
642 * everything is just a target on a single bus.
644 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
645 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
652 * Assume that discovery events will start right away. Freezing
653 * the simq will prevent the CAM boottime scanner from running
654 * before discovery is complete.
656 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
657 xpt_freeze_simq(sassc->sim, 1);
658 sc->sassc->startup_refcount = 0;
660 callout_init_mp(&sassc->discovery_callout);
661 sassc->discovery_timeouts = 0;
665 #if __FreeBSD_version >= 1000006
666 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
667 if (status != CAM_REQ_CMP) {
668 mps_printf(sc, "Error %#x registering async handler for "
669 "AC_ADVINFO_CHANGED events\n", status);
675 mpssas_register_events(sc);
683 mps_detach_sas(struct mps_softc *sc)
685 struct mpssas_softc *sassc;
687 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
689 if (sc->sassc == NULL)
693 mps_deregister_events(sc, sassc->mpssas_eh);
696 * Drain and free the event handling taskqueue with the lock
697 * unheld so that any parallel processing tasks drain properly
698 * without deadlocking.
700 if (sassc->ev_tq != NULL)
701 taskqueue_free(sassc->ev_tq);
703 /* Make sure CAM doesn't wedge if we had to bail out early. */
706 /* Deregister our async handler */
707 #if __FreeBSD_version >= 1000006
708 xpt_register_async(0, mpssas_async, sc, NULL);
711 if (sassc->flags & MPSSAS_IN_STARTUP)
712 xpt_release_simq(sassc->sim, 1);
714 if (sassc->sim != NULL) {
715 xpt_bus_deregister(cam_sim_path(sassc->sim));
716 cam_sim_free(sassc->sim);
719 if (sassc->flags & MPSSAS_SCANTHREAD) {
720 sassc->flags |= MPSSAS_SHUTDOWN;
721 wakeup(&sassc->ccb_scanq);
723 if (sassc->flags & MPSSAS_SCANTHREAD) {
724 lksleep(&sassc->flags, &sc->mps_lock, 0,
725 "mps_shutdown", 30 * hz);
730 if (sassc->devq != NULL)
731 cam_simq_release(sassc->devq);
733 kfree(sassc->targets, M_MPT2);
734 kfree(sassc, M_MPT2);
741 mpssas_discovery_end(struct mpssas_softc *sassc)
743 struct mps_softc *sc = sassc->sc;
745 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
747 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
748 callout_stop(&sassc->discovery_callout);
752 #if 0 /* XXX unused */
754 mpssas_discovery_timeout(void *data)
756 struct mpssas_softc *sassc = data;
757 struct mps_softc *sc;
760 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
764 "Timeout waiting for discovery, interrupts may not be working!\n");
765 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
767 /* Poll the hardware for events in case interrupts aren't working */
770 mps_printf(sassc->sc,
771 "Finished polling after discovery timeout at %d\n", ticks);
773 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
774 mpssas_discovery_end(sassc);
776 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
777 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
778 callout_reset(&sassc->discovery_callout,
779 MPSSAS_DISCOVERY_TIMEOUT * hz,
780 mpssas_discovery_timeout, sassc);
781 sassc->discovery_timeouts++;
783 mps_dprint(sassc->sc, MPS_FAULT,
784 "Discovery timed out, continuing.\n");
785 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
786 mpssas_discovery_end(sassc);
795 mpssas_action(struct cam_sim *sim, union ccb *ccb)
797 struct mpssas_softc *sassc;
799 sassc = cam_sim_softc(sim);
801 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
802 ccb->ccb_h.func_code);
803 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
805 switch (ccb->ccb_h.func_code) {
808 struct ccb_pathinq *cpi = &ccb->cpi;
810 cpi->version_num = 1;
811 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
812 cpi->target_sprt = 0;
813 cpi->hba_misc = PIM_NOBUSRESET;
814 cpi->hba_eng_cnt = 0;
815 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
817 cpi->initiator_id = 255;
818 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
819 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
820 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
821 cpi->unit_number = cam_sim_unit(sim);
822 cpi->bus_id = cam_sim_bus(sim);
823 cpi->base_transfer_speed = 150000;
824 cpi->transport = XPORT_SAS;
825 cpi->transport_version = 0;
826 cpi->protocol = PROTO_SCSI;
827 cpi->protocol_version = SCSI_REV_SPC;
828 #if __FreeBSD_version >= 800001
830 * XXX KDM where does this number come from?
832 cpi->maxio = 256 * 1024;
834 cpi->ccb_h.status = CAM_REQ_CMP;
837 case XPT_GET_TRAN_SETTINGS:
839 struct ccb_trans_settings *cts;
840 struct ccb_trans_settings_sas *sas;
841 struct ccb_trans_settings_scsi *scsi;
842 struct mpssas_target *targ;
845 sas = &cts->xport_specific.sas;
846 scsi = &cts->proto_specific.scsi;
848 targ = &sassc->targets[cts->ccb_h.target_id];
849 if (targ->handle == 0x0) {
850 cts->ccb_h.status = CAM_TID_INVALID;
854 cts->protocol_version = SCSI_REV_SPC2;
855 cts->transport = XPORT_SAS;
856 cts->transport_version = 0;
858 sas->valid = CTS_SAS_VALID_SPEED;
859 switch (targ->linkrate) {
861 sas->bitrate = 150000;
864 sas->bitrate = 300000;
867 sas->bitrate = 600000;
873 cts->protocol = PROTO_SCSI;
874 scsi->valid = CTS_SCSI_VALID_TQ;
875 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
877 cts->ccb_h.status = CAM_REQ_CMP;
880 case XPT_CALC_GEOMETRY:
881 cam_calc_geometry(&ccb->ccg, /*extended*/1);
882 ccb->ccb_h.status = CAM_REQ_CMP;
885 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
886 mpssas_action_resetdev(sassc, ccb);
891 mps_printf(sassc->sc, "mpssas_action faking success for "
893 ccb->ccb_h.status = CAM_REQ_CMP;
896 mpssas_action_scsiio(sassc, ccb);
898 #if __FreeBSD_version >= 900026
900 mpssas_action_smpio(sassc, ccb);
904 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
912 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
913 target_id_t target_id, lun_id_t lun_id)
915 path_id_t path_id = cam_sim_path(sc->sassc->sim);
916 struct cam_path *path;
918 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
919 ac_code, target_id, lun_id);
921 if (xpt_create_path(&path, NULL,
922 path_id, target_id, lun_id) != CAM_REQ_CMP) {
923 mps_printf(sc, "unable to create path for reset "
928 xpt_async(ac_code, path, NULL);
933 mpssas_complete_all_commands(struct mps_softc *sc)
935 struct mps_command *cm;
939 mps_printf(sc, "%s\n", __func__);
940 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
942 /* complete all commands with a NULL reply */
943 for (i = 1; i < sc->num_reqs; i++) {
944 cm = &sc->commands[i];
948 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
949 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
951 if (cm->cm_complete != NULL) {
952 mpssas_log_command(cm,
953 "completing cm %p state %x ccb %p for diag reset\n",
954 cm, cm->cm_state, cm->cm_ccb);
956 cm->cm_complete(sc, cm);
960 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
961 mpssas_log_command(cm,
962 "waking up cm %p state %x ccb %p for diag reset\n",
963 cm, cm->cm_state, cm->cm_ccb);
968 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
969 /* this should never happen, but if it does, log */
970 mpssas_log_command(cm,
971 "cm %p state %x flags 0x%x ccb %p during diag "
972 "reset\n", cm, cm->cm_state, cm->cm_flags,
979 mpssas_handle_reinit(struct mps_softc *sc)
983 /* Go back into startup mode and freeze the simq, so that CAM
984 * doesn't send any commands until after we've rediscovered all
985 * targets and found the proper device handles for them.
987 * After the reset, portenable will trigger discovery, and after all
988 * discovery-related activities have finished, the simq will be
991 mps_printf(sc, "%s startup\n", __func__);
992 sc->sassc->flags |= MPSSAS_IN_STARTUP;
993 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
994 xpt_freeze_simq(sc->sassc->sim, 1);
996 /* notify CAM of a bus reset */
997 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1000 /* complete and cleanup after all outstanding commands */
1001 mpssas_complete_all_commands(sc);
1003 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1004 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1007 * The simq was explicitly frozen above, so set the refcount to 0.
1008 * The simq will be explicitly released after port enable completes.
1010 sc->sassc->startup_refcount = 0;
1012 /* zero all the target handles, since they may change after the
1013 * reset, and we have to rediscover all the targets and use the new
1016 for (i = 0; i < sc->facts->MaxTargets; i++) {
1017 if (sc->sassc->targets[i].outstanding != 0)
1018 mps_printf(sc, "target %u outstanding %u\n",
1019 i, sc->sassc->targets[i].outstanding);
1020 sc->sassc->targets[i].handle = 0x0;
1021 sc->sassc->targets[i].exp_dev_handle = 0x0;
1022 sc->sassc->targets[i].outstanding = 0;
1023 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1027 mpssas_tm_timeout(void *data)
1029 struct mps_command *tm = data;
1030 struct mps_softc *sc = tm->cm_sc;
1033 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1039 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1041 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1042 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1043 unsigned int cm_count = 0;
1044 struct mps_command *cm;
1045 struct mpssas_target *targ;
1047 callout_stop(&tm->cm_callout);
1049 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1050 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1054 * Currently there should be no way we can hit this case. It only
1055 * happens when we have a failure to allocate chain frames, and
1056 * task management commands don't have S/G lists.
1058 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1059 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1060 "This should not happen!\n", __func__, tm->cm_flags);
1061 mpssas_free_tm(sc, tm);
1065 if (reply == NULL) {
1066 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1067 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1068 /* this completion was due to a reset, just cleanup */
1069 targ->flags &= ~MPSSAS_TARGET_INRESET;
1071 mpssas_free_tm(sc, tm);
1074 /* we should have gotten a reply. */
1080 mpssas_log_command(tm,
1081 "logical unit reset status 0x%x code 0x%x count %u\n",
1082 reply->IOCStatus, reply->ResponseCode,
1083 reply->TerminationCount);
1085 /* See if there are any outstanding commands for this LUN.
1086 * This could be made more efficient by using a per-LU data
1087 * structure of some sort.
1089 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1090 if (cm->cm_lun == tm->cm_lun)
1094 if (cm_count == 0) {
1095 mpssas_log_command(tm,
1096 "logical unit %u finished recovery after reset\n",
1099 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1102 /* we've finished recovery for this logical unit. check and
1103 * see if some other logical unit has a timedout command
1104 * that needs to be processed.
1106 cm = TAILQ_FIRST(&targ->timedout_commands);
1108 mpssas_send_abort(sc, tm, cm);
1112 mpssas_free_tm(sc, tm);
1116 /* if we still have commands for this LUN, the reset
1117 * effectively failed, regardless of the status reported.
1118 * Escalate to a target reset.
1120 mpssas_log_command(tm,
1121 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1123 mpssas_send_reset(sc, tm,
1124 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1129 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1131 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1132 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1133 struct mpssas_target *targ;
1135 callout_stop(&tm->cm_callout);
1137 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1138 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1142 * Currently there should be no way we can hit this case. It only
1143 * happens when we have a failure to allocate chain frames, and
1144 * task management commands don't have S/G lists.
1146 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1147 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1148 "This should not happen!\n", __func__, tm->cm_flags);
1149 mpssas_free_tm(sc, tm);
1153 if (reply == NULL) {
1154 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1155 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1156 /* this completion was due to a reset, just cleanup */
1157 targ->flags &= ~MPSSAS_TARGET_INRESET;
1159 mpssas_free_tm(sc, tm);
1162 /* we should have gotten a reply. */
1168 mpssas_log_command(tm,
1169 "target reset status 0x%x code 0x%x count %u\n",
1170 reply->IOCStatus, reply->ResponseCode,
1171 reply->TerminationCount);
1173 targ->flags &= ~MPSSAS_TARGET_INRESET;
1175 if (targ->outstanding == 0) {
1176 /* we've finished recovery for this target and all
1177 * of its logical units.
1179 mpssas_log_command(tm,
1180 "recovery finished after target reset\n");
1182 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1186 mpssas_free_tm(sc, tm);
1189 /* after a target reset, if this target still has
1190 * outstanding commands, the reset effectively failed,
1191 * regardless of the status reported. escalate.
1193 mpssas_log_command(tm,
1194 "target reset complete for tm %p, but still have %u command(s)\n",
1195 tm, targ->outstanding);
1200 #define MPS_RESET_TIMEOUT 30
1203 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1205 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1206 struct mpssas_target *target;
1209 target = tm->cm_targ;
1210 if (target->handle == 0) {
1211 mps_printf(sc, "%s null devhandle for target_id %d\n",
1212 __func__, target->tid);
1216 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1217 req->DevHandle = target->handle;
1218 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1219 req->TaskType = type;
1221 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1222 /* XXX Need to handle invalid LUNs */
1223 MPS_SET_LUN(req->LUN, tm->cm_lun);
1224 tm->cm_targ->logical_unit_resets++;
1225 mpssas_log_command(tm, "sending logical unit reset\n");
1226 tm->cm_complete = mpssas_logical_unit_reset_complete;
1228 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1229 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1230 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1231 tm->cm_targ->target_resets++;
1232 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1233 mpssas_log_command(tm, "sending target reset\n");
1234 tm->cm_complete = mpssas_target_reset_complete;
1237 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1242 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1243 tm->cm_complete_data = (void *)tm;
1245 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1246 mpssas_tm_timeout, tm);
1248 err = mps_map_command(sc, tm);
1250 mpssas_log_command(tm,
1251 "error %d sending reset type %u\n",
1259 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1261 struct mps_command *cm;
1262 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1263 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1264 struct mpssas_target *targ;
1266 callout_stop(&tm->cm_callout);
1268 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1269 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1273 * Currently there should be no way we can hit this case. It only
1274 * happens when we have a failure to allocate chain frames, and
1275 * task management commands don't have S/G lists.
1277 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1278 mpssas_log_command(tm,
1279 "cm_flags = %#x for abort %p TaskMID %u!\n",
1280 tm->cm_flags, tm, req->TaskMID);
1281 mpssas_free_tm(sc, tm);
1285 if (reply == NULL) {
1286 mpssas_log_command(tm,
1287 "NULL abort reply for tm %p TaskMID %u\n",
1289 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1290 /* this completion was due to a reset, just cleanup */
1292 mpssas_free_tm(sc, tm);
1295 /* we should have gotten a reply. */
1301 mpssas_log_command(tm,
1302 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1304 reply->IOCStatus, reply->ResponseCode,
1305 reply->TerminationCount);
1307 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1309 /* if there are no more timedout commands, we're done with
1310 * error recovery for this target.
1312 mpssas_log_command(tm,
1313 "finished recovery after aborting TaskMID %u\n",
1317 mpssas_free_tm(sc, tm);
1319 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1320 /* abort success, but we have more timedout commands to abort */
1321 mpssas_log_command(tm,
1322 "continuing recovery after aborting TaskMID %u\n",
1325 mpssas_send_abort(sc, tm, cm);
1328 /* we didn't get a command completion, so the abort
1329 * failed as far as we're concerned. escalate.
1331 mpssas_log_command(tm,
1332 "abort failed for TaskMID %u tm %p\n",
1335 mpssas_send_reset(sc, tm,
1336 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1340 #define MPS_ABORT_TIMEOUT 5
1343 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1345 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1346 struct mpssas_target *targ;
1350 if (targ->handle == 0) {
1351 mps_printf(sc, "%s null devhandle for target_id %d\n",
1352 __func__, cm->cm_ccb->ccb_h.target_id);
1356 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1357 req->DevHandle = targ->handle;
1358 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1359 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1361 /* XXX Need to handle invalid LUNs */
1362 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1364 req->TaskMID = cm->cm_desc.Default.SMID;
1367 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1368 tm->cm_complete = mpssas_abort_complete;
1369 tm->cm_complete_data = (void *)tm;
1370 tm->cm_targ = cm->cm_targ;
1371 tm->cm_lun = cm->cm_lun;
1373 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1374 mpssas_tm_timeout, tm);
1378 err = mps_map_command(sc, tm);
1380 mpssas_log_command(tm,
1381 "error %d sending abort for cm %p SMID %u\n",
1382 err, cm, req->TaskMID);
1388 mpssas_scsiio_timeout(void *data)
1390 struct mps_softc *sc;
1391 struct mps_command *cm;
1392 struct mpssas_target *targ;
1394 cm = (struct mps_command *)data;
1397 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1399 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1402 * Run the interrupt handler to make sure it's not pending. This
1403 * isn't perfect because the command could have already completed
1404 * and been re-used, though this is unlikely.
1406 mps_intr_locked(sc);
1407 if (cm->cm_state == MPS_CM_STATE_FREE) {
1408 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1412 if (cm->cm_ccb == NULL) {
1413 mps_printf(sc, "command timeout with NULL ccb\n");
1417 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1423 /* XXX first, check the firmware state, to see if it's still
1424 * operational. if not, do a diag reset.
1427 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1428 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1429 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1431 if (targ->tm != NULL) {
1432 /* target already in recovery, just queue up another
1433 * timedout command to be processed later.
1435 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1438 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1439 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1442 /* start recovery by aborting the first timedout command */
1443 mpssas_send_abort(sc, targ->tm, cm);
1446 /* XXX queue this target up for recovery once a TM becomes
1447 * available. The firmware only has a limited number of
1448 * HighPriority credits for the high priority requests used
1449 * for task management, and we ran out.
1451 * Isilon: don't worry about this for now, since we have
1452 * more credits than disks in an enclosure, and limit
1453 * ourselves to one TM per target for recovery.
1455 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1462 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1464 MPI2_SCSI_IO_REQUEST *req;
1465 struct ccb_scsiio *csio;
1466 struct mps_softc *sc;
1467 struct mpssas_target *targ;
1468 struct mpssas_lun *lun;
1469 struct mps_command *cm;
1470 uint8_t i, lba_byte, *ref_tag_addr;
1471 uint16_t eedp_flags;
1474 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1475 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1478 targ = &sassc->targets[csio->ccb_h.target_id];
1479 if (targ->handle == 0x0) {
1480 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1481 __func__, csio->ccb_h.target_id);
1482 csio->ccb_h.status = CAM_TID_INVALID;
1487 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1488 * that the volume has timed out. We want volumes to be enumerated
1489 * until they are deleted/removed, not just failed.
1491 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1492 if (targ->devinfo == 0)
1493 csio->ccb_h.status = CAM_REQ_CMP;
1495 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1500 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1501 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1502 csio->ccb_h.status = CAM_TID_INVALID;
1507 cm = mps_alloc_command(sc);
1509 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1510 xpt_freeze_simq(sassc->sim, 1);
1511 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1513 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1514 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1519 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1520 bzero(req, sizeof(*req));
1521 req->DevHandle = targ->handle;
1522 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1524 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1525 req->SenseBufferLength = MPS_SENSE_LEN;
1527 req->ChainOffset = 0;
1528 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1533 req->DataLength = csio->dxfer_len;
1534 req->BidirectionalDataLength = 0;
1535 req->IoFlags = csio->cdb_len;
1538 /* Note: BiDirectional transfers are not supported */
1539 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1541 req->Control = MPI2_SCSIIO_CONTROL_READ;
1542 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1545 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1546 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1550 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1555 * It looks like the hardware doesn't require an explicit tag
1556 * number for each transaction. SAM Task Management not supported
1559 switch (csio->tag_action) {
1560 case MSG_HEAD_OF_Q_TAG:
1561 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1563 case MSG_ORDERED_Q_TAG:
1564 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1567 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1569 case CAM_TAG_ACTION_NONE:
1570 case MSG_SIMPLE_Q_TAG:
1572 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1575 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1577 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1578 mps_free_command(sc, cm);
1579 ccb->ccb_h.status = CAM_LUN_INVALID;
1584 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1585 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1587 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1588 req->IoFlags = csio->cdb_len;
1591 * Check if EEDP is supported and enabled. If it is then check if the
1592 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1593 * is formatted for EEDP support. If all of this is true, set CDB up
1594 * for EEDP transfer.
1596 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1597 if (sc->eedp_enabled && eedp_flags) {
1598 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1599 if (lun->lun_id == csio->ccb_h.target_lun) {
1604 if ((lun != NULL) && (lun->eedp_formatted)) {
1605 req->EEDPBlockSize = lun->eedp_block_size;
1606 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1607 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1608 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1609 req->EEDPFlags = eedp_flags;
1612 * If CDB less than 32, fill in Primary Ref Tag with
1613 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1614 * already there. Also, set protection bit. FreeBSD
1615 * currently does not support CDBs bigger than 16, but
1616 * the code doesn't hurt, and will be here for the
1619 if (csio->cdb_len != 32) {
1620 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1621 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1622 PrimaryReferenceTag;
1623 for (i = 0; i < 4; i++) {
1625 req->CDB.CDB32[lba_byte + i];
1628 req->CDB.EEDP32.PrimaryApplicationTagMask =
1630 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1634 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1635 req->EEDPFlags = eedp_flags;
1636 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1642 cm->cm_data = csio->data_ptr;
1643 cm->cm_length = csio->dxfer_len;
1644 cm->cm_sge = &req->SGL;
1645 cm->cm_sglsize = (32 - 24) * 4;
1646 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1647 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1648 cm->cm_complete = mpssas_scsiio_complete;
1649 cm->cm_complete_data = ccb;
1651 cm->cm_lun = csio->ccb_h.target_lun;
1655 * If HBA is a WD and the command is not for a retry, try to build a
1656 * direct I/O message. If failed, or the command is for a retry, send
1657 * the I/O to the IR volume itself.
1659 if (sc->WD_valid_config) {
1660 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1661 mpssas_direct_drive_io(sassc, cm, ccb);
1663 ccb->ccb_h.status = CAM_REQ_INPROG;
1667 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1668 mpssas_scsiio_timeout, cm);
1671 targ->outstanding++;
1672 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1674 if ((sc->mps_debug & MPS_TRACE) != 0)
1675 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1676 __func__, cm, ccb, targ->outstanding);
1678 mps_map_command(sc, cm);
1683 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1685 MPI2_SCSI_IO_REPLY *rep;
1687 struct ccb_scsiio *csio;
1688 struct mpssas_softc *sassc;
1689 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1690 u8 *TLR_bits, TLR_on;
1694 mps_dprint(sc, MPS_TRACE,
1695 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1696 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1697 cm->cm_targ->outstanding);
1699 callout_stop(&cm->cm_callout);
1700 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1703 ccb = cm->cm_complete_data;
1705 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1707 * XXX KDM if the chain allocation fails, does it matter if we do
1708 * the sync and unload here? It is simpler to do it in every case,
1709 * assuming it doesn't cause problems.
1711 if (cm->cm_data != NULL) {
1712 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1713 dir = BUS_DMASYNC_POSTREAD;
1714 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1715 dir = BUS_DMASYNC_POSTWRITE;;
1716 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1717 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1720 cm->cm_targ->completed++;
1721 cm->cm_targ->outstanding--;
1722 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1724 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1725 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1726 if (cm->cm_reply != NULL)
1727 mpssas_log_command(cm,
1728 "completed timedout cm %p ccb %p during recovery "
1729 "ioc %x scsi %x state %x xfer %u\n",
1731 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1732 rep->TransferCount);
1734 mpssas_log_command(cm,
1735 "completed timedout cm %p ccb %p during recovery\n",
1737 } else if (cm->cm_targ->tm != NULL) {
1738 if (cm->cm_reply != NULL)
1739 mpssas_log_command(cm,
1740 "completed cm %p ccb %p during recovery "
1741 "ioc %x scsi %x state %x xfer %u\n",
1743 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1744 rep->TransferCount);
1746 mpssas_log_command(cm,
1747 "completed cm %p ccb %p during recovery\n",
1749 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1750 mpssas_log_command(cm,
1751 "reset completed cm %p ccb %p\n",
1755 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1757 * We ran into an error after we tried to map the command,
1758 * so we're getting a callback without queueing the command
1759 * to the hardware. So we set the status here, and it will
1760 * be retained below. We'll go through the "fast path",
1761 * because there can be no reply when we haven't actually
1762 * gone out to the hardware.
1764 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1767 * Currently the only error included in the mask is
1768 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1769 * chain frames. We need to freeze the queue until we get
1770 * a command that completed without this error, which will
1771 * hopefully have some chain frames attached that we can
1772 * use. If we wanted to get smarter about it, we would
1773 * only unfreeze the queue in this condition when we're
1774 * sure that we're getting some chain frames back. That's
1775 * probably unnecessary.
1777 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1778 xpt_freeze_simq(sassc->sim, 1);
1779 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1780 mps_dprint(sc, MPS_INFO, "Error sending command, "
1781 "freezing SIM queue\n");
1785 /* Take the fast path to completion */
1786 if (cm->cm_reply == NULL) {
1787 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1788 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1789 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1791 ccb->ccb_h.status = CAM_REQ_CMP;
1792 ccb->csio.scsi_status = SCSI_STATUS_OK;
1794 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1795 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1796 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1797 mps_dprint(sc, MPS_INFO,
1798 "Unfreezing SIM queue\n");
1803 * There are two scenarios where the status won't be
1804 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1805 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1807 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1809 * Freeze the dev queue so that commands are
1810 * executed in the correct order with after error
1813 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1814 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1816 mps_free_command(sc, cm);
1821 if (sc->mps_debug & MPS_TRACE)
1822 mpssas_log_command(cm,
1823 "ioc %x scsi %x state %x xfer %u\n",
1824 rep->IOCStatus, rep->SCSIStatus,
1825 rep->SCSIState, rep->TransferCount);
1828 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1829 * Volume if an error occurred (normal I/O retry). Use the original
1830 * CCB, but set a flag that this will be a retry so that it's sent to
1831 * the original volume. Free the command but reuse the CCB.
1833 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1834 mps_free_command(sc, cm);
1835 ccb->ccb_h.status = MPS_WD_RETRY;
1836 mpssas_action_scsiio(sassc, ccb);
1840 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1841 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1842 csio->resid = cm->cm_length - rep->TransferCount;
1844 case MPI2_IOCSTATUS_SUCCESS:
1845 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1847 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1848 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1849 mpssas_log_command(cm, "recovered error\n");
1851 /* Completion failed at the transport level. */
1852 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1853 MPI2_SCSI_STATE_TERMINATED)) {
1854 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1858 /* In a modern packetized environment, an autosense failure
1859 * implies that there's not much else that can be done to
1860 * recover the command.
1862 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1863 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1868 * CAM doesn't care about SAS Response Info data, but if this is
1869 * the state check if TLR should be done. If not, clear the
1870 * TLR_bits for the target.
1872 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1873 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1874 MPS_SCSI_RI_INVALID_FRAME)) {
1875 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1876 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1880 * Intentionally override the normal SCSI status reporting
1881 * for these two cases. These are likely to happen in a
1882 * multi-initiator environment, and we want to make sure that
1883 * CAM retries these commands rather than fail them.
1885 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1886 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1887 ccb->ccb_h.status = CAM_REQ_ABORTED;
1891 /* Handle normal status and sense */
1892 csio->scsi_status = rep->SCSIStatus;
1893 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1894 ccb->ccb_h.status = CAM_REQ_CMP;
1896 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1898 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1899 int sense_len, returned_sense_len;
1901 returned_sense_len = min(rep->SenseCount,
1902 sizeof(struct scsi_sense_data));
1903 if (returned_sense_len < ccb->csio.sense_len)
1904 ccb->csio.sense_resid = ccb->csio.sense_len -
1907 ccb->csio.sense_resid = 0;
1909 sense_len = min(returned_sense_len,
1910 ccb->csio.sense_len - ccb->csio.sense_resid);
1911 bzero(&ccb->csio.sense_data,
1912 sizeof(&ccb->csio.sense_data));
1913 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1914 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1918 * Check if this is an INQUIRY command. If it's a VPD inquiry,
1919 * and it's page code 0 (Supported Page List), and there is
1920 * inquiry data, and this is for a sequential access device, and
1921 * the device is an SSP target, and TLR is supported by the
1922 * controller, turn the TLR_bits value ON if page 0x90 is
1925 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1926 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1927 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1928 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1929 T_SEQUENTIAL) && (sc->control_TLR) &&
1930 (sc->mapping_table[csio->ccb_h.target_id].device_info &
1931 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1932 vpd_list = (struct scsi_vpd_supported_page_list *)
1934 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1936 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1937 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1938 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1939 csio->cdb_io.cdb_bytes[4];
1940 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1941 if (vpd_list->list[i] == 0x90) {
1948 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1949 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1951 * If devinfo is 0 this will be a volume. In that case don't
1952 * tell CAM that the volume is not there. We want volumes to
1953 * be enumerated until they are deleted/removed, not just
1956 if (cm->cm_targ->devinfo == 0)
1957 ccb->ccb_h.status = CAM_REQ_CMP;
1959 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1961 case MPI2_IOCSTATUS_INVALID_SGL:
1962 mps_print_scsiio_cmd(sc, cm);
1963 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1965 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1967 * This is one of the responses that comes back when an I/O
1968 * has been aborted. If it is because of a timeout that we
1969 * initiated, just set the status to CAM_CMD_TIMEOUT.
1970 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
1971 * command is the same (it gets retried, subject to the
1972 * retry counter), the only difference is what gets printed
1975 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1976 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1978 ccb->ccb_h.status = CAM_REQ_ABORTED;
1980 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1981 /* resid is ignored for this condition */
1983 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1985 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1986 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1988 * Since these are generally external (i.e. hopefully
1989 * transient transport-related) errors, retry these without
1990 * decrementing the retry count.
1992 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1993 mpssas_log_command(cm,
1994 "terminated ioc %x scsi %x state %x xfer %u\n",
1995 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1996 rep->TransferCount);
1998 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1999 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2000 case MPI2_IOCSTATUS_INVALID_VPID:
2001 case MPI2_IOCSTATUS_INVALID_FIELD:
2002 case MPI2_IOCSTATUS_INVALID_STATE:
2003 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2004 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2005 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2006 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2007 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2009 mpssas_log_command(cm,
2010 "completed ioc %x scsi %x state %x xfer %u\n",
2011 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2012 rep->TransferCount);
2013 csio->resid = cm->cm_length;
2014 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2018 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2019 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2020 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2021 mps_dprint(sc, MPS_INFO, "Command completed, "
2022 "unfreezing SIM queue\n");
2025 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2026 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2027 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2030 mps_free_command(sc, cm);
2035 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2037 pMpi2SCSIIORequest_t pIO_req;
2038 struct mps_softc *sc = sassc->sc;
2040 uint32_t physLBA, stripe_offset, stripe_unit;
2041 uint32_t io_size, column;
2042 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2045 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2046 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2047 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2048 * bit different than the 10/16 CDBs, handle them separately.
2050 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2051 CDB = pIO_req->CDB.CDB32;
2054 * Handle 6 byte CDBs.
2056 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2057 (CDB[0] == WRITE_6))) {
2059 * Get the transfer size in blocks.
2061 io_size = (cm->cm_length >> sc->DD_block_exponent);
2064 * Get virtual LBA given in the CDB.
2066 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2067 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2070 * Check that LBA range for I/O does not exceed volume's
2073 if ((virtLBA + (uint64_t)io_size - 1) <=
2076 * Check if the I/O crosses a stripe boundary. If not,
2077 * translate the virtual LBA to a physical LBA and set
2078 * the DevHandle for the PhysDisk to be used. If it
2079 * does cross a boundry, do normal I/O. To get the
2080 * right DevHandle to use, get the map number for the
2081 * column, then use that map number to look up the
2082 * DevHandle of the PhysDisk.
2084 stripe_offset = (uint32_t)virtLBA &
2085 (sc->DD_stripe_size - 1);
2086 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2087 physLBA = (uint32_t)virtLBA >>
2088 sc->DD_stripe_exponent;
2089 stripe_unit = physLBA / sc->DD_num_phys_disks;
2090 column = physLBA % sc->DD_num_phys_disks;
2091 pIO_req->DevHandle =
2092 sc->DD_column_map[column].dev_handle;
2093 cm->cm_desc.SCSIIO.DevHandle =
2096 physLBA = (stripe_unit <<
2097 sc->DD_stripe_exponent) + stripe_offset;
2098 ptrLBA = &pIO_req->CDB.CDB32[1];
2099 physLBA_byte = (uint8_t)(physLBA >> 16);
2100 *ptrLBA = physLBA_byte;
2101 ptrLBA = &pIO_req->CDB.CDB32[2];
2102 physLBA_byte = (uint8_t)(physLBA >> 8);
2103 *ptrLBA = physLBA_byte;
2104 ptrLBA = &pIO_req->CDB.CDB32[3];
2105 physLBA_byte = (uint8_t)physLBA;
2106 *ptrLBA = physLBA_byte;
2109 * Set flag that Direct Drive I/O is
2112 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2119 * Handle 10 or 16 byte CDBs.
2121 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2122 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2123 (CDB[0] == WRITE_16))) {
2125 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2126 * are 0. If not, this is accessing beyond 2TB so handle it in
2127 * the else section. 10-byte CDB's are OK.
2129 if ((CDB[0] < READ_16) ||
2130 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2132 * Get the transfer size in blocks.
2134 io_size = (cm->cm_length >> sc->DD_block_exponent);
2137 * Get virtual LBA. Point to correct lower 4 bytes of
2138 * LBA in the CDB depending on command.
2140 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2141 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2142 ((uint64_t)CDB[lba_idx + 1] << 16) |
2143 ((uint64_t)CDB[lba_idx + 2] << 8) |
2144 (uint64_t)CDB[lba_idx + 3];
2147 * Check that LBA range for I/O does not exceed volume's
2150 if ((virtLBA + (uint64_t)io_size - 1) <=
2153 * Check if the I/O crosses a stripe boundary.
2154 * If not, translate the virtual LBA to a
2155 * physical LBA and set the DevHandle for the
2156 * PhysDisk to be used. If it does cross a
2157 * boundry, do normal I/O. To get the right
2158 * DevHandle to use, get the map number for the
2159 * column, then use that map number to look up
2160 * the DevHandle of the PhysDisk.
2162 stripe_offset = (uint32_t)virtLBA &
2163 (sc->DD_stripe_size - 1);
2164 if ((stripe_offset + io_size) <=
2165 sc->DD_stripe_size) {
2166 physLBA = (uint32_t)virtLBA >>
2167 sc->DD_stripe_exponent;
2168 stripe_unit = physLBA /
2169 sc->DD_num_phys_disks;
2171 sc->DD_num_phys_disks;
2172 pIO_req->DevHandle =
2173 sc->DD_column_map[column].
2175 cm->cm_desc.SCSIIO.DevHandle =
2178 physLBA = (stripe_unit <<
2179 sc->DD_stripe_exponent) +
2182 &pIO_req->CDB.CDB32[lba_idx];
2183 physLBA_byte = (uint8_t)(physLBA >> 24);
2184 *ptrLBA = physLBA_byte;
2186 &pIO_req->CDB.CDB32[lba_idx + 1];
2187 physLBA_byte = (uint8_t)(physLBA >> 16);
2188 *ptrLBA = physLBA_byte;
2190 &pIO_req->CDB.CDB32[lba_idx + 2];
2191 physLBA_byte = (uint8_t)(physLBA >> 8);
2192 *ptrLBA = physLBA_byte;
2194 &pIO_req->CDB.CDB32[lba_idx + 3];
2195 physLBA_byte = (uint8_t)physLBA;
2196 *ptrLBA = physLBA_byte;
2199 * Set flag that Direct Drive I/O is
2202 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2207 * 16-byte CDB and the upper 4 bytes of the CDB are not
2208 * 0. Get the transfer size in blocks.
2210 io_size = (cm->cm_length >> sc->DD_block_exponent);
2215 virtLBA = ((uint64_t)CDB[2] << 54) |
2216 ((uint64_t)CDB[3] << 48) |
2217 ((uint64_t)CDB[4] << 40) |
2218 ((uint64_t)CDB[5] << 32) |
2219 ((uint64_t)CDB[6] << 24) |
2220 ((uint64_t)CDB[7] << 16) |
2221 ((uint64_t)CDB[8] << 8) |
2225 * Check that LBA range for I/O does not exceed volume's
2228 if ((virtLBA + (uint64_t)io_size - 1) <=
2231 * Check if the I/O crosses a stripe boundary.
2232 * If not, translate the virtual LBA to a
2233 * physical LBA and set the DevHandle for the
2234 * PhysDisk to be used. If it does cross a
2235 * boundry, do normal I/O. To get the right
2236 * DevHandle to use, get the map number for the
2237 * column, then use that map number to look up
2238 * the DevHandle of the PhysDisk.
2240 stripe_offset = (uint32_t)virtLBA &
2241 (sc->DD_stripe_size - 1);
2242 if ((stripe_offset + io_size) <=
2243 sc->DD_stripe_size) {
2244 physLBA = (uint32_t)(virtLBA >>
2245 sc->DD_stripe_exponent);
2246 stripe_unit = physLBA /
2247 sc->DD_num_phys_disks;
2249 sc->DD_num_phys_disks;
2250 pIO_req->DevHandle =
2251 sc->DD_column_map[column].
2253 cm->cm_desc.SCSIIO.DevHandle =
2256 physLBA = (stripe_unit <<
2257 sc->DD_stripe_exponent) +
2261 * Set upper 4 bytes of LBA to 0. We
2262 * assume that the phys disks are less
2263 * than 2 TB's in size. Then, set the
2266 pIO_req->CDB.CDB32[2] = 0;
2267 pIO_req->CDB.CDB32[3] = 0;
2268 pIO_req->CDB.CDB32[4] = 0;
2269 pIO_req->CDB.CDB32[5] = 0;
2270 ptrLBA = &pIO_req->CDB.CDB32[6];
2271 physLBA_byte = (uint8_t)(physLBA >> 24);
2272 *ptrLBA = physLBA_byte;
2273 ptrLBA = &pIO_req->CDB.CDB32[7];
2274 physLBA_byte = (uint8_t)(physLBA >> 16);
2275 *ptrLBA = physLBA_byte;
2276 ptrLBA = &pIO_req->CDB.CDB32[8];
2277 physLBA_byte = (uint8_t)(physLBA >> 8);
2278 *ptrLBA = physLBA_byte;
2279 ptrLBA = &pIO_req->CDB.CDB32[9];
2280 physLBA_byte = (uint8_t)physLBA;
2281 *ptrLBA = physLBA_byte;
2284 * Set flag that Direct Drive I/O is
2287 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2294 #if __FreeBSD_version >= 900026
2296 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2298 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2299 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2303 ccb = cm->cm_complete_data;
2306 * Currently there should be no way we can hit this case. It only
2307 * happens when we have a failure to allocate chain frames, and SMP
2308 * commands require two S/G elements only. That should be handled
2309 * in the standard request size.
2311 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2312 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2313 __func__, cm->cm_flags);
2314 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2318 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2320 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2321 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2325 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2326 sasaddr = le32toh(req->SASAddress.Low);
2327 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2329 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2330 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2331 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2332 __func__, rpl->IOCStatus, rpl->SASStatus);
2333 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2337 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2338 "%#jx completed successfully\n", __func__,
2339 (uintmax_t)sasaddr);
2341 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2342 ccb->ccb_h.status = CAM_REQ_CMP;
2344 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2348 * We sync in both directions because we had DMAs in the S/G list
2349 * in both directions.
2351 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2352 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2353 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2354 mps_free_command(sc, cm);
2359 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2361 struct mps_command *cm;
2362 uint8_t *request, *response;
2363 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2364 struct mps_softc *sc;
2373 * XXX We don't yet support physical addresses here.
2375 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2376 mps_printf(sc, "%s: physical addresses not supported\n",
2378 ccb->ccb_h.status = CAM_REQ_INVALID;
2384 * If the user wants to send an S/G list, check to make sure they
2385 * have single buffers.
2387 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2389 * The chip does not support more than one buffer for the
2390 * request or response.
2392 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2393 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2394 mps_printf(sc, "%s: multiple request or response "
2395 "buffer segments not supported for SMP\n",
2397 ccb->ccb_h.status = CAM_REQ_INVALID;
2403 * The CAM_SCATTER_VALID flag was originally implemented
2404 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2405 * We have two. So, just take that flag to mean that we
2406 * might have S/G lists, and look at the S/G segment count
2407 * to figure out whether that is the case for each individual
2410 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2411 bus_dma_segment_t *req_sg;
2413 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2414 request = (uint8_t *)req_sg[0].ds_addr;
2416 request = ccb->smpio.smp_request;
2418 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2419 bus_dma_segment_t *rsp_sg;
2421 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2422 response = (uint8_t *)rsp_sg[0].ds_addr;
2424 response = ccb->smpio.smp_response;
2426 request = ccb->smpio.smp_request;
2427 response = ccb->smpio.smp_response;
2430 cm = mps_alloc_command(sc);
2432 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2433 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2438 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2439 bzero(req, sizeof(*req));
2440 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2442 /* Allow the chip to use any route to this SAS address. */
2443 req->PhysicalPort = 0xff;
2445 req->RequestDataLength = ccb->smpio.smp_request_len;
2447 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2449 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2450 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2452 mpi_init_sge(cm, req, &req->SGL);
2455 * Set up a uio to pass into mps_map_command(). This allows us to
2456 * do one map command, and one busdma call in there.
2458 cm->cm_uio.uio_iov = cm->cm_iovec;
2459 cm->cm_uio.uio_iovcnt = 2;
2460 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2463 * The read/write flag isn't used by busdma, but set it just in
2464 * case. This isn't exactly accurate, either, since we're going in
2467 cm->cm_uio.uio_rw = UIO_WRITE;
2469 cm->cm_iovec[0].iov_base = request;
2470 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2471 cm->cm_iovec[1].iov_base = response;
2472 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2474 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2475 cm->cm_iovec[1].iov_len;
2478 * Trigger a warning message in mps_data_cb() for the user if we
2479 * wind up exceeding two S/G segments. The chip expects one
2480 * segment for the request and another for the response.
2482 cm->cm_max_segs = 2;
2484 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2485 cm->cm_complete = mpssas_smpio_complete;
2486 cm->cm_complete_data = ccb;
2489 * Tell the mapping code that we're using a uio, and that this is
2490 * an SMP passthrough request. There is a little special-case
2491 * logic there (in mps_data_cb()) to handle the bidirectional
2494 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2495 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2497 /* The chip data format is little endian. */
2498 req->SASAddress.High = htole32(sasaddr >> 32);
2499 req->SASAddress.Low = htole32(sasaddr);
2502 * XXX Note that we don't have a timeout/abort mechanism here.
2503 * From the manual, it looks like task management requests only
2504 * work for SCSI IO and SATA passthrough requests. We may need to
2505 * have a mechanism to retry requests in the event of a chip reset
2506 * at least. Hopefully the chip will insure that any errors short
2507 * of that are relayed back to the driver.
2509 error = mps_map_command(sc, cm);
2510 if ((error != 0) && (error != EINPROGRESS)) {
2511 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2519 mps_free_command(sc, cm);
2520 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2527 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2529 struct mps_softc *sc;
2530 struct mpssas_target *targ;
2531 uint64_t sasaddr = 0;
2536 * Make sure the target exists.
2538 targ = &sassc->targets[ccb->ccb_h.target_id];
2539 if (targ->handle == 0x0) {
2540 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2541 ccb->ccb_h.target_id);
2542 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2548 * If this device has an embedded SMP target, we'll talk to it
2550 * figure out what the expander's address is.
2552 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2553 sasaddr = targ->sasaddr;
2556 * If we don't have a SAS address for the expander yet, try
2557 * grabbing it from the page 0x83 information cached in the
2558 * transport layer for this target. LSI expanders report the
2559 * expander SAS address as the port-associated SAS address in
2560 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2563 * XXX KDM disable this for now, but leave it commented out so that
2564 * it is obvious that this is another possible way to get the SAS
2567 * The parent handle method below is a little more reliable, and
2568 * the other benefit is that it works for devices other than SES
2569 * devices. So you can send a SMP request to a da(4) device and it
2570 * will get routed to the expander that device is attached to.
2571 * (Assuming the da(4) device doesn't contain an SMP target...)
2575 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2579 * If we still don't have a SAS address for the expander, look for
2580 * the parent device of this device, which is probably the expander.
2583 #ifdef OLD_MPS_PROBE
2584 struct mpssas_target *parent_target;
2587 if (targ->parent_handle == 0x0) {
2588 mps_printf(sc, "%s: handle %d does not have a valid "
2589 "parent handle!\n", __func__, targ->handle);
2590 ccb->ccb_h.status = CAM_REQ_INVALID;
2593 #ifdef OLD_MPS_PROBE
2594 parent_target = mpssas_find_target_by_handle(sassc, 0,
2595 targ->parent_handle);
2597 if (parent_target == NULL) {
2598 mps_printf(sc, "%s: handle %d does not have a valid "
2599 "parent target!\n", __func__, targ->handle);
2600 ccb->ccb_h.status = CAM_REQ_INVALID;
2604 if ((parent_target->devinfo &
2605 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2606 mps_printf(sc, "%s: handle %d parent %d does not "
2607 "have an SMP target!\n", __func__,
2608 targ->handle, parent_target->handle);
2609 ccb->ccb_h.status = CAM_REQ_INVALID;
2614 sasaddr = parent_target->sasaddr;
2615 #else /* OLD_MPS_PROBE */
2616 if ((targ->parent_devinfo &
2617 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2618 mps_printf(sc, "%s: handle %d parent %d does not "
2619 "have an SMP target!\n", __func__,
2620 targ->handle, targ->parent_handle);
2621 ccb->ccb_h.status = CAM_REQ_INVALID;
2625 if (targ->parent_sasaddr == 0x0) {
2626 mps_printf(sc, "%s: handle %d parent handle %d does "
2627 "not have a valid SAS address!\n",
2628 __func__, targ->handle, targ->parent_handle);
2629 ccb->ccb_h.status = CAM_REQ_INVALID;
2633 sasaddr = targ->parent_sasaddr;
2634 #endif /* OLD_MPS_PROBE */
2639 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2640 __func__, targ->handle);
2641 ccb->ccb_h.status = CAM_REQ_INVALID;
2644 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2652 #endif //__FreeBSD_version >= 900026
2655 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2657 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2658 struct mps_softc *sc;
2659 struct mps_command *tm;
2660 struct mpssas_target *targ;
2662 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2663 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2666 tm = mps_alloc_command(sc);
2668 mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
2669 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2674 targ = &sassc->targets[ccb->ccb_h.target_id];
2675 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2676 req->DevHandle = targ->handle;
2677 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2678 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2680 /* SAS Hard Link Reset / SATA Link Reset */
2681 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2684 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2685 tm->cm_complete = mpssas_resetdev_complete;
2686 tm->cm_complete_data = ccb;
2687 mps_map_command(sc, tm);
2691 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2693 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2696 mps_dprint(sc, MPS_TRACE, __func__);
2697 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2699 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2700 ccb = tm->cm_complete_data;
2703 * Currently there should be no way we can hit this case. It only
2704 * happens when we have a failure to allocate chain frames, and
2705 * task management commands don't have S/G lists.
2707 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2708 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2710 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2712 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2713 "This should not happen!\n", __func__, tm->cm_flags,
2715 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2719 kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2720 resp->IOCStatus, resp->ResponseCode);
2722 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2723 ccb->ccb_h.status = CAM_REQ_CMP;
2724 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2728 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2732 mpssas_free_tm(sc, tm);
2737 mpssas_poll(struct cam_sim *sim)
2739 struct mpssas_softc *sassc;
2741 sassc = cam_sim_softc(sim);
2743 if (sassc->sc->mps_debug & MPS_TRACE) {
2744 /* frequent debug messages during a panic just slow
2745 * everything down too much.
2747 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2748 sassc->sc->mps_debug &= ~MPS_TRACE;
2751 mps_intr_locked(sassc->sc);
2755 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2757 struct mpssas_softc *sassc;
2760 if (done_ccb == NULL)
2763 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2765 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2767 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2768 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2770 xpt_free_path(done_ccb->ccb_h.path);
2771 xpt_free_ccb(done_ccb);
2773 #if __FreeBSD_version < 1000006
2775 * Before completing scan, get EEDP stuff for all of the existing
2778 mpssas_check_eedp(sassc);
2783 /* thread to handle bus rescans */
2785 mpssas_scanner_thread(void *arg)
2787 struct mpssas_softc *sassc;
2788 struct mps_softc *sc;
2791 sassc = (struct mpssas_softc *)arg;
2794 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2798 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2799 if (sassc->flags & MPSSAS_SHUTDOWN) {
2800 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2803 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2806 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2810 sassc->flags &= ~MPSSAS_SCANTHREAD;
2811 wakeup(&sassc->flags);
2813 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2818 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2822 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2824 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2829 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2830 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2832 /* Prepare request */
2833 ccb->ccb_h.ppriv_ptr1 = sassc;
2834 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2835 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2836 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2837 wakeup(&sassc->ccb_scanq);
2840 #if __FreeBSD_version >= 1000006
2842 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2845 struct mps_softc *sc;
2847 sc = (struct mps_softc *)callback_arg;
2850 case AC_ADVINFO_CHANGED: {
2851 struct mpssas_target *target;
2852 struct mpssas_softc *sassc;
2853 struct scsi_read_capacity_data_long rcap_buf;
2854 struct ccb_dev_advinfo cdai;
2855 struct mpssas_lun *lun;
2860 buftype = (uintptr_t)arg;
2866 * We're only interested in read capacity data changes.
2868 if (buftype != CDAI_TYPE_RCAPLONG)
2872 * We're only interested in devices that are attached to
2875 if (xpt_path_path_id(path) != sassc->sim->path_id)
2879 * We should have a handle for this, but check to make sure.
2881 target = &sassc->targets[xpt_path_target_id(path)];
2882 if (target->handle == 0)
2885 lunid = xpt_path_lun_id(path);
2887 SLIST_FOREACH(lun, &target->luns, lun_link) {
2888 if (lun->lun_id == lunid) {
2894 if (found_lun == 0) {
2895 lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2898 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2899 "LUN for EEDP support.\n");
2902 lun->lun_id = lunid;
2903 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2906 bzero(&rcap_buf, sizeof(rcap_buf));
2907 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2908 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2909 cdai.ccb_h.flags = CAM_DIR_IN;
2910 cdai.buftype = CDAI_TYPE_RCAPLONG;
2912 cdai.bufsiz = sizeof(rcap_buf);
2913 cdai.buf = (uint8_t *)&rcap_buf;
2914 xpt_action((union ccb *)&cdai);
2915 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2916 cam_release_devq(cdai.ccb_h.path,
2919 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2920 && (rcap_buf.prot & SRC16_PROT_EN)) {
2921 lun->eedp_formatted = TRUE;
2922 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2924 lun->eedp_formatted = FALSE;
2925 lun->eedp_block_size = 0;
2933 #else /* __FreeBSD_version >= 1000006 */
2936 mpssas_check_eedp(struct mpssas_softc *sassc)
2938 struct mps_softc *sc = sassc->sc;
2939 struct ccb_scsiio *csio;
2940 struct scsi_read_capacity_16 *scsi_cmd;
2941 struct scsi_read_capacity_eedp *rcap_buf;
2943 path_id_t pathid = cam_sim_path(sassc->sim);
2944 target_id_t targetid;
2946 struct cam_periph *found_periph;
2947 struct mpssas_target *target;
2948 struct mpssas_lun *lun;
2952 * Issue a READ CAPACITY 16 command to each LUN of each target. This
2953 * info is used to determine if the LUN is formatted for EEDP support.
2955 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2956 target = &sassc->targets[targetid];
2957 if (target->handle == 0x0) {
2964 kmalloc(sizeof(struct scsi_read_capacity_eedp),
2965 M_MPT2, M_NOWAIT | M_ZERO);
2966 if (rcap_buf == NULL) {
2967 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2968 "capacity buffer for EEDP support.\n");
2972 ccb = kmalloc(sizeof(union ccb), M_TEMP,
2975 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2976 pathid, targetid, lunid) != CAM_REQ_CMP) {
2977 mps_dprint(sc, MPS_FAULT, "Unable to create "
2978 "path for EEDP support\n");
2979 kfree(rcap_buf, M_MPT2);
2985 * If a periph is returned, the LUN exists. Create an
2986 * entry in the target's LUN list.
2988 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2991 * If LUN is already in list, don't create a new
2995 SLIST_FOREACH(lun, &target->luns, lun_link) {
2996 if (lun->lun_id == lunid) {
3002 lun = kmalloc(sizeof(struct mpssas_lun),
3003 M_MPT2, M_WAITOK | M_ZERO);
3005 mps_dprint(sc, MPS_FAULT,
3006 "Unable to alloc LUN for "
3008 kfree(rcap_buf, M_MPT2);
3009 xpt_free_path(ccb->ccb_h.path);
3013 lun->lun_id = lunid;
3014 SLIST_INSERT_HEAD(&target->luns, lun,
3020 * Issue a READ CAPACITY 16 command for the LUN.
3021 * The mpssas_read_cap_done function will load
3022 * the read cap info into the LUN struct.
3025 csio->ccb_h.func_code = XPT_SCSI_IO;
3026 csio->ccb_h.flags = CAM_DIR_IN;
3027 csio->ccb_h.retry_count = 4;
3028 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3029 csio->ccb_h.timeout = 60000;
3030 csio->data_ptr = (uint8_t *)rcap_buf;
3031 csio->dxfer_len = sizeof(struct
3032 scsi_read_capacity_eedp);
3033 csio->sense_len = MPS_SENSE_LEN;
3034 csio->cdb_len = sizeof(*scsi_cmd);
3035 csio->tag_action = MSG_SIMPLE_Q_TAG;
3037 scsi_cmd = (struct scsi_read_capacity_16 *)
3038 &csio->cdb_io.cdb_bytes;
3039 bzero(scsi_cmd, sizeof(*scsi_cmd));
3040 scsi_cmd->opcode = 0x9E;
3041 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3042 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3043 scsi_read_capacity_eedp);
3046 * Set the path, target and lun IDs for the READ
3049 ccb->ccb_h.path_id =
3050 xpt_path_path_id(ccb->ccb_h.path);
3051 ccb->ccb_h.target_id =
3052 xpt_path_target_id(ccb->ccb_h.path);
3053 ccb->ccb_h.target_lun =
3054 xpt_path_lun_id(ccb->ccb_h.path);
3056 ccb->ccb_h.ppriv_ptr1 = sassc;
3059 kfree(rcap_buf, M_MPT2);
3060 xpt_free_path(ccb->ccb_h.path);
3063 } while (found_periph);
3069 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3071 struct mpssas_softc *sassc;
3072 struct mpssas_target *target;
3073 struct mpssas_lun *lun;
3074 struct scsi_read_capacity_eedp *rcap_buf;
3076 if (done_ccb == NULL)
3079 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3082 * Get the LUN ID for the path and look it up in the LUN list for the
3085 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3086 target = &sassc->targets[done_ccb->ccb_h.target_id];
3087 SLIST_FOREACH(lun, &target->luns, lun_link) {
3088 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3092 * Got the LUN in the target's LUN list. Fill it in
3093 * with EEDP info. If the READ CAP 16 command had some
3094 * SCSI error (common if command is not supported), mark
3095 * the lun as not supporting EEDP and set the block size
3098 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3099 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3100 lun->eedp_formatted = FALSE;
3101 lun->eedp_block_size = 0;
3105 if (rcap_buf->protect & 0x01) {
3106 lun->eedp_formatted = TRUE;
3107 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3112 // Finished with this CCB and path.
3113 kfree(rcap_buf, M_MPT2);
3114 xpt_free_path(done_ccb->ccb_h.path);
3115 xpt_free_ccb(done_ccb);
3117 #endif /* __FreeBSD_version >= 1000006 */
3120 mpssas_startup(struct mps_softc *sc)
3122 struct mpssas_softc *sassc;
3125 * Send the port enable message and set the wait_for_port_enable flag.
3126 * This flag helps to keep the simq frozen until all discovery events
3130 mpssas_startup_increment(sassc);
3131 sc->wait_for_port_enable = 1;
3132 mpssas_send_portenable(sc);
3137 mpssas_send_portenable(struct mps_softc *sc)
3139 MPI2_PORT_ENABLE_REQUEST *request;
3140 struct mps_command *cm;
3142 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3144 if ((cm = mps_alloc_command(sc)) == NULL)
3146 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3147 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3148 request->MsgFlags = 0;
3150 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3151 cm->cm_complete = mpssas_portenable_complete;
3155 mps_map_command(sc, cm);
3156 mps_dprint(sc, MPS_TRACE,
3157 "mps_send_portenable finished cm %p req %p complete %p\n",
3158 cm, cm->cm_req, cm->cm_complete);
3163 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3165 MPI2_PORT_ENABLE_REPLY *reply;
3166 struct mpssas_softc *sassc;
3167 struct mpssas_target *target;
3170 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3174 * Currently there should be no way we can hit this case. It only
3175 * happens when we have a failure to allocate chain frames, and
3176 * port enable commands don't have S/G lists.
3178 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3179 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3180 "This should not happen!\n", __func__, cm->cm_flags);
3183 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3185 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3186 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3187 MPI2_IOCSTATUS_SUCCESS)
3188 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3190 mps_free_command(sc, cm);
3191 if (sc->mps_ich.ich_arg != NULL) {
3192 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3193 config_intrhook_disestablish(&sc->mps_ich);
3194 sc->mps_ich.ich_arg = NULL;
3198 * Get WarpDrive info after discovery is complete but before the scan
3199 * starts. At this point, all devices are ready to be exposed to the
3200 * OS. If devices should be hidden instead, take them out of the
3201 * 'targets' array before the scan. The devinfo for a disk will have
3202 * some info and a volume's will be 0. Use that to remove disks.
3204 mps_wd_config_pages(sc);
3205 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3206 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3207 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3208 MPS_WD_HIDE_IF_VOLUME))) {
3209 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3210 target = &sassc->targets[i];
3211 if (target->devinfo) {
3212 target->devinfo = 0x0;
3213 target->encl_handle = 0x0;
3214 target->encl_slot = 0x0;
3215 target->handle = 0x0;
3217 target->linkrate = 0x0;
3218 target->flags = 0x0;
3224 * Done waiting for port enable to complete. Decrement the refcount.
3225 * If refcount is 0, discovery is complete and a rescan of the bus can
3226 * take place. Since the simq was explicitly frozen before port
3227 * enable, it must be explicitly released here to keep the
3228 * freeze/release count in sync.
3230 sc->wait_for_port_enable = 0;
3231 sc->port_enable_complete = 1;
3232 mpssas_startup_decrement(sassc);
3233 xpt_release_simq(sassc->sim, 1);