2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
53 * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
56 /* Communications core for LSI MPT2 */
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
69 #include <sys/malloc.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
80 #include <machine/stdarg.h>
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
108 #define MPSSAS_DISCOVERY_TIMEOUT 20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
112 * static array to check SCSI OpCode for EEDP protection bits
114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 static void mpssas_log_command(struct mps_command *, const char *, ...)
141 #if 0 /* XXX unused */
142 static void mpssas_discovery_timeout(void *data);
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151 struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 struct cam_path *path, void *arg);
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176 struct mps_command *cm);
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
181 struct mpssas_target *target;
184 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 target = &sassc->targets[i];
186 if (target->handle == handle)
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194 * commands before device handles have been found by discovery. Since
195 * discovery involves reading config pages and possibly sending commands,
196 * discovery actions may continue even after we receive the end of discovery
197 * event, so refcount discovery actions instead of assuming we can unfreeze
198 * the simq when we get the event.
201 mpssas_startup_increment(struct mpssas_softc *sassc)
203 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 if (sassc->startup_refcount++ == 0) {
205 /* just starting, freeze the simq */
206 mps_dprint(sassc->sc, MPS_INFO,
207 "%s freezing simq\n", __func__);
208 xpt_freeze_simq(sassc->sim, 1);
210 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 sassc->startup_refcount);
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
218 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 if (--sassc->startup_refcount == 0) {
220 /* finished all discovery-related actions, release
221 * the simq and rescan for the latest topology.
223 mps_dprint(sassc->sc, MPS_INFO,
224 "%s releasing simq\n", __func__);
225 sassc->flags &= ~MPSSAS_IN_STARTUP;
226 xpt_release_simq(sassc->sim, 1);
227 mpssas_rescan_target(sassc->sc, NULL);
229 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 sassc->startup_refcount);
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235 * management, so refcount the TMs and keep the simq frozen when any are in
239 mpssas_alloc_tm(struct mps_softc *sc)
241 struct mps_command *tm;
243 tm = mps_alloc_high_priority_command(sc);
245 if (sc->sassc->tm_count++ == 0) {
246 mps_printf(sc, "%s freezing simq\n", __func__);
247 xpt_freeze_simq(sc->sassc->sim, 1);
249 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 sc->sassc->tm_count);
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
261 /* if there are no TMs in use, we can release the simq. We use our
262 * own refcount so that it's easier for a diag reset to cleanup and
265 if (--sc->sassc->tm_count == 0) {
266 mps_printf(sc, "%s releasing simq\n", __func__);
267 xpt_release_simq(sc->sassc->sim, 1);
269 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 sc->sassc->tm_count);
272 mps_free_high_priority_command(sc, tm);
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
279 struct mpssas_softc *sassc = sc->sassc;
281 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
295 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
296 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
302 /* XXX Hardwired to scan the bus for now */
303 ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 mpssas_rescan(sassc, ccb);
309 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
319 sbuf_new(&sb, str, sizeof(str), 0);
323 if (cm->cm_ccb != NULL) {
324 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
326 sbuf_cat(&sb, path_str);
327 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 scsi_command_string(&cm->cm_ccb->csio, &sb);
329 sbuf_printf(&sb, "length %d ",
330 cm->cm_ccb->csio.dxfer_len);
334 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 cam_sim_name(cm->cm_sc->sassc->sim),
336 cam_sim_unit(cm->cm_sc->sassc->sim),
337 cam_sim_bus(cm->cm_sc->sassc->sim),
338 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
342 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 sbuf_vprintf(&sb, fmt, ap);
345 kprintf("%s", sbuf_data(&sb));
351 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
353 struct mpssas_softc *sassc = sc->sassc;
354 path_id_t pathid = cam_sim_path(sassc->sim);
355 struct cam_path *path;
357 mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
358 if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
359 mps_printf(sc, "unable to create path for lost target %d\n",
364 xpt_async(AC_LOST_DEVICE, path, NULL);
369 * The MPT2 firmware performs debounce on the link to avoid transient link
370 * errors and false removals. When it does decide that link has been lost
371 * and a device need to go away, it expects that the host will perform a
372 * target reset and then an op remove. The reset has the side-effect of
373 * aborting any outstanding requests for the device, which is required for
374 * the op-remove to succeed. It's not clear if the host should check for
375 * the device coming back alive after the reset.
378 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
380 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
381 struct mps_softc *sc;
382 struct mps_command *cm;
383 struct mpssas_target *targ = NULL;
385 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
388 * If this is a WD controller, determine if the disk should be exposed
389 * to the OS or not. If disk should be exposed, return from this
390 * function without doing anything.
393 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
394 MPS_WD_EXPOSE_ALWAYS)) {
398 targ = mpssas_find_target_by_handle(sassc, 0, handle);
400 /* FIXME: what is the action? */
401 /* We don't know about this device? */
402 kprintf("%s: invalid handle 0x%x \n", __func__, handle);
406 targ->flags |= MPSSAS_TARGET_INREMOVAL;
408 cm = mpssas_alloc_tm(sc);
410 mps_printf(sc, "%s: command alloc failure\n", __func__);
414 mpssas_lost_target(sc, targ);
416 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
417 memset(req, 0, sizeof(*req));
418 req->DevHandle = targ->handle;
419 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
420 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
422 /* SAS Hard Link Reset / SATA Link Reset */
423 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
427 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
428 cm->cm_complete = mpssas_remove_device;
429 cm->cm_complete_data = (void *)(uintptr_t)handle;
430 mps_map_command(sc, cm);
434 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
436 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
437 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
438 struct mpssas_target *targ;
439 struct mps_command *next_cm;
442 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
444 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
445 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
449 * Currently there should be no way we can hit this case. It only
450 * happens when we have a failure to allocate chain frames, and
451 * task management commands don't have S/G lists.
453 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
454 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
455 "This should not happen!\n", __func__, tm->cm_flags,
457 mpssas_free_tm(sc, tm);
462 /* XXX retry the remove after the diag reset completes? */
463 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
465 mpssas_free_tm(sc, tm);
469 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
470 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
471 reply->IOCStatus, handle);
472 mpssas_free_tm(sc, tm);
476 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
477 reply->TerminationCount);
478 mps_free_reply(sc, tm->cm_reply_data);
479 tm->cm_reply = NULL; /* Ensures the the reply won't get re-freed */
481 /* Reuse the existing command */
482 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
483 memset(req, 0, sizeof(*req));
484 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
485 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
486 req->DevHandle = handle;
488 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
489 tm->cm_complete = mpssas_remove_complete;
490 tm->cm_complete_data = (void *)(uintptr_t)handle;
492 mps_map_command(sc, tm);
494 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
496 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
499 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
500 ccb = tm->cm_complete_data;
501 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
502 mpssas_scsiio_complete(sc, tm);
507 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
509 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
511 struct mpssas_target *targ;
513 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
515 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
516 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
519 * Currently there should be no way we can hit this case. It only
520 * happens when we have a failure to allocate chain frames, and
521 * task management commands don't have S/G lists.
523 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
524 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
525 "This should not happen!\n", __func__, tm->cm_flags,
527 mpssas_free_tm(sc, tm);
532 /* most likely a chip reset */
533 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
535 mpssas_free_tm(sc, tm);
539 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
540 handle, reply->IOCStatus);
543 * Don't clear target if remove fails because things will get confusing.
544 * Leave the devname and sasaddr intact so that we know to avoid reusing
545 * this target id if possible, and so we can assign the same target id
546 * to this device if it comes back in the future.
548 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
551 targ->encl_handle = 0x0;
552 targ->encl_slot = 0x0;
553 targ->exp_dev_handle = 0x0;
555 targ->linkrate = 0x0;
559 mpssas_free_tm(sc, tm);
563 mpssas_register_events(struct mps_softc *sc)
568 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
569 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
570 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
571 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
572 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
573 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
574 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
575 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
576 setbit(events, MPI2_EVENT_IR_VOLUME);
577 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
578 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
579 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
581 mps_register_events(sc, events, mpssas_evt_handler, NULL,
582 &sc->sassc->mpssas_eh);
588 mps_attach_sas(struct mps_softc *sc)
590 struct mpssas_softc *sassc;
591 #if __FreeBSD_version >= 1000006
596 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
598 sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
599 sassc->targets = kmalloc(sizeof(struct mpssas_target) *
600 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
604 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
605 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
610 unit = device_get_unit(sc->mps_dev);
611 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
612 unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
613 if (sassc->sim == NULL) {
614 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
619 TAILQ_INIT(&sassc->ev_queue);
621 /* Initialize taskqueue for Event Handling */
622 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
623 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
624 taskqueue_thread_enqueue, &sassc->ev_tq);
626 /* Run the task queue with lowest priority */
627 taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
628 device_get_nameunit(sc->mps_dev));
630 TAILQ_INIT(&sassc->ccb_scanq);
631 error = mps_kproc_create(mpssas_scanner_thread, sassc,
632 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
634 mps_printf(sc, "Error %d starting rescan thread\n", error);
639 sassc->flags |= MPSSAS_SCANTHREAD;
642 * XXX There should be a bus for every port on the adapter, but since
643 * we're just going to fake the topology for now, we'll pretend that
644 * everything is just a target on a single bus.
646 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
647 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
654 * Assume that discovery events will start right away. Freezing
655 * the simq will prevent the CAM boottime scanner from running
656 * before discovery is complete.
658 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
659 xpt_freeze_simq(sassc->sim, 1);
660 sc->sassc->startup_refcount = 0;
662 callout_init_mp(&sassc->discovery_callout);
663 sassc->discovery_timeouts = 0;
667 #if __FreeBSD_version >= 1000006
668 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
669 if (status != CAM_REQ_CMP) {
670 mps_printf(sc, "Error %#x registering async handler for "
671 "AC_ADVINFO_CHANGED events\n", status);
677 mpssas_register_events(sc);
685 mps_detach_sas(struct mps_softc *sc)
687 struct mpssas_softc *sassc;
689 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
691 if (sc->sassc == NULL)
695 mps_deregister_events(sc, sassc->mpssas_eh);
698 * Drain and free the event handling taskqueue with the lock
699 * unheld so that any parallel processing tasks drain properly
700 * without deadlocking.
702 if (sassc->ev_tq != NULL)
703 taskqueue_free(sassc->ev_tq);
705 /* Make sure CAM doesn't wedge if we had to bail out early. */
708 /* Deregister our async handler */
709 #if __FreeBSD_version >= 1000006
710 xpt_register_async(0, mpssas_async, sc, NULL);
713 if (sassc->flags & MPSSAS_IN_STARTUP)
714 xpt_release_simq(sassc->sim, 1);
716 if (sassc->sim != NULL) {
717 xpt_bus_deregister(cam_sim_path(sassc->sim));
718 cam_sim_free(sassc->sim);
721 if (sassc->flags & MPSSAS_SCANTHREAD) {
722 sassc->flags |= MPSSAS_SHUTDOWN;
723 wakeup(&sassc->ccb_scanq);
725 if (sassc->flags & MPSSAS_SCANTHREAD) {
726 lksleep(&sassc->flags, &sc->mps_lock, 0,
727 "mps_shutdown", 30 * hz);
732 if (sassc->devq != NULL)
733 cam_simq_release(sassc->devq);
735 kfree(sassc->targets, M_MPT2);
736 kfree(sassc, M_MPT2);
743 mpssas_discovery_end(struct mpssas_softc *sassc)
745 struct mps_softc *sc = sassc->sc;
747 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
749 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
750 callout_stop(&sassc->discovery_callout);
754 #if 0 /* XXX unused */
756 mpssas_discovery_timeout(void *data)
758 struct mpssas_softc *sassc = data;
759 struct mps_softc *sc;
762 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
766 "Timeout waiting for discovery, interrupts may not be working!\n");
767 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
769 /* Poll the hardware for events in case interrupts aren't working */
772 mps_printf(sassc->sc,
773 "Finished polling after discovery timeout at %d\n", ticks);
775 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
776 mpssas_discovery_end(sassc);
778 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
779 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
780 callout_reset(&sassc->discovery_callout,
781 MPSSAS_DISCOVERY_TIMEOUT * hz,
782 mpssas_discovery_timeout, sassc);
783 sassc->discovery_timeouts++;
785 mps_dprint(sassc->sc, MPS_FAULT,
786 "Discovery timed out, continuing.\n");
787 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
788 mpssas_discovery_end(sassc);
797 mpssas_action(struct cam_sim *sim, union ccb *ccb)
799 struct mpssas_softc *sassc;
801 sassc = cam_sim_softc(sim);
803 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
804 ccb->ccb_h.func_code);
805 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
807 switch (ccb->ccb_h.func_code) {
810 struct ccb_pathinq *cpi = &ccb->cpi;
812 cpi->version_num = 1;
813 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
814 cpi->target_sprt = 0;
815 cpi->hba_misc = PIM_NOBUSRESET;
816 cpi->hba_eng_cnt = 0;
817 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
819 cpi->initiator_id = 255;
820 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
821 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
822 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
823 cpi->unit_number = cam_sim_unit(sim);
824 cpi->bus_id = cam_sim_bus(sim);
825 cpi->base_transfer_speed = 150000;
826 cpi->transport = XPORT_SAS;
827 cpi->transport_version = 0;
828 cpi->protocol = PROTO_SCSI;
829 cpi->protocol_version = SCSI_REV_SPC;
830 #if __FreeBSD_version >= 800001
832 * XXX KDM where does this number come from?
834 cpi->maxio = 256 * 1024;
836 cpi->ccb_h.status = CAM_REQ_CMP;
839 case XPT_GET_TRAN_SETTINGS:
841 struct ccb_trans_settings *cts;
842 struct ccb_trans_settings_sas *sas;
843 struct ccb_trans_settings_scsi *scsi;
844 struct mpssas_target *targ;
847 sas = &cts->xport_specific.sas;
848 scsi = &cts->proto_specific.scsi;
850 targ = &sassc->targets[cts->ccb_h.target_id];
851 if (targ->handle == 0x0) {
852 cts->ccb_h.status = CAM_TID_INVALID;
856 cts->protocol_version = SCSI_REV_SPC2;
857 cts->transport = XPORT_SAS;
858 cts->transport_version = 0;
860 sas->valid = CTS_SAS_VALID_SPEED;
861 switch (targ->linkrate) {
863 sas->bitrate = 150000;
866 sas->bitrate = 300000;
869 sas->bitrate = 600000;
875 cts->protocol = PROTO_SCSI;
876 scsi->valid = CTS_SCSI_VALID_TQ;
877 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
879 cts->ccb_h.status = CAM_REQ_CMP;
882 case XPT_CALC_GEOMETRY:
883 cam_calc_geometry(&ccb->ccg, /*extended*/1);
884 ccb->ccb_h.status = CAM_REQ_CMP;
887 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
888 mpssas_action_resetdev(sassc, ccb);
893 mps_printf(sassc->sc, "mpssas_action faking success for "
895 ccb->ccb_h.status = CAM_REQ_CMP;
898 mpssas_action_scsiio(sassc, ccb);
900 #if __FreeBSD_version >= 900026
902 mpssas_action_smpio(sassc, ccb);
906 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
914 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
915 target_id_t target_id, lun_id_t lun_id)
917 path_id_t path_id = cam_sim_path(sc->sassc->sim);
918 struct cam_path *path;
920 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
921 ac_code, target_id, lun_id);
923 if (xpt_create_path(&path, NULL,
924 path_id, target_id, lun_id) != CAM_REQ_CMP) {
925 mps_printf(sc, "unable to create path for reset "
930 xpt_async(ac_code, path, NULL);
935 mpssas_complete_all_commands(struct mps_softc *sc)
937 struct mps_command *cm;
941 mps_printf(sc, "%s\n", __func__);
942 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
944 /* complete all commands with a NULL reply */
945 for (i = 1; i < sc->num_reqs; i++) {
946 cm = &sc->commands[i];
950 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
951 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
953 if (cm->cm_complete != NULL) {
954 mpssas_log_command(cm,
955 "completing cm %p state %x ccb %p for diag reset\n",
956 cm, cm->cm_state, cm->cm_ccb);
958 cm->cm_complete(sc, cm);
962 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
963 mpssas_log_command(cm,
964 "waking up cm %p state %x ccb %p for diag reset\n",
965 cm, cm->cm_state, cm->cm_ccb);
970 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
971 /* this should never happen, but if it does, log */
972 mpssas_log_command(cm,
973 "cm %p state %x flags 0x%x ccb %p during diag "
974 "reset\n", cm, cm->cm_state, cm->cm_flags,
981 mpssas_handle_reinit(struct mps_softc *sc)
985 /* Go back into startup mode and freeze the simq, so that CAM
986 * doesn't send any commands until after we've rediscovered all
987 * targets and found the proper device handles for them.
989 * After the reset, portenable will trigger discovery, and after all
990 * discovery-related activities have finished, the simq will be
993 mps_printf(sc, "%s startup\n", __func__);
994 sc->sassc->flags |= MPSSAS_IN_STARTUP;
995 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
996 xpt_freeze_simq(sc->sassc->sim, 1);
998 /* notify CAM of a bus reset */
999 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1002 /* complete and cleanup after all outstanding commands */
1003 mpssas_complete_all_commands(sc);
1005 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1006 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1009 * The simq was explicitly frozen above, so set the refcount to 0.
1010 * The simq will be explicitly released after port enable completes.
1012 sc->sassc->startup_refcount = 0;
1014 /* zero all the target handles, since they may change after the
1015 * reset, and we have to rediscover all the targets and use the new
1018 for (i = 0; i < sc->facts->MaxTargets; i++) {
1019 if (sc->sassc->targets[i].outstanding != 0)
1020 mps_printf(sc, "target %u outstanding %u\n",
1021 i, sc->sassc->targets[i].outstanding);
1022 sc->sassc->targets[i].handle = 0x0;
1023 sc->sassc->targets[i].exp_dev_handle = 0x0;
1024 sc->sassc->targets[i].outstanding = 0;
1025 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1029 mpssas_tm_timeout(void *data)
1031 struct mps_command *tm = data;
1032 struct mps_softc *sc = tm->cm_sc;
1035 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1041 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1043 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1044 unsigned int cm_count = 0;
1045 struct mps_command *cm;
1046 struct mpssas_target *targ;
1048 callout_stop(&tm->cm_callout);
1050 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1054 * Currently there should be no way we can hit this case. It only
1055 * happens when we have a failure to allocate chain frames, and
1056 * task management commands don't have S/G lists.
1058 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1059 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1060 "This should not happen!\n", __func__, tm->cm_flags);
1061 mpssas_free_tm(sc, tm);
1065 if (reply == NULL) {
1066 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1067 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1068 /* this completion was due to a reset, just cleanup */
1069 targ->flags &= ~MPSSAS_TARGET_INRESET;
1071 mpssas_free_tm(sc, tm);
1074 /* we should have gotten a reply. */
1080 mpssas_log_command(tm,
1081 "logical unit reset status 0x%x code 0x%x count %u\n",
1082 reply->IOCStatus, reply->ResponseCode,
1083 reply->TerminationCount);
1085 /* See if there are any outstanding commands for this LUN.
1086 * This could be made more efficient by using a per-LU data
1087 * structure of some sort.
1089 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1090 if (cm->cm_lun == tm->cm_lun)
1094 if (cm_count == 0) {
1095 mpssas_log_command(tm,
1096 "logical unit %u finished recovery after reset\n",
1099 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1102 /* we've finished recovery for this logical unit. check and
1103 * see if some other logical unit has a timedout command
1104 * that needs to be processed.
1106 cm = TAILQ_FIRST(&targ->timedout_commands);
1108 mpssas_send_abort(sc, tm, cm);
1112 mpssas_free_tm(sc, tm);
1116 /* if we still have commands for this LUN, the reset
1117 * effectively failed, regardless of the status reported.
1118 * Escalate to a target reset.
1120 mpssas_log_command(tm,
1121 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1123 mpssas_send_reset(sc, tm,
1124 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1129 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1131 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1132 struct mpssas_target *targ;
1134 callout_stop(&tm->cm_callout);
1136 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1140 * Currently there should be no way we can hit this case. It only
1141 * happens when we have a failure to allocate chain frames, and
1142 * task management commands don't have S/G lists.
1144 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1145 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1146 "This should not happen!\n", __func__, tm->cm_flags);
1147 mpssas_free_tm(sc, tm);
1151 if (reply == NULL) {
1152 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1153 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1154 /* this completion was due to a reset, just cleanup */
1155 targ->flags &= ~MPSSAS_TARGET_INRESET;
1157 mpssas_free_tm(sc, tm);
1160 /* we should have gotten a reply. */
1166 mpssas_log_command(tm,
1167 "target reset status 0x%x code 0x%x count %u\n",
1168 reply->IOCStatus, reply->ResponseCode,
1169 reply->TerminationCount);
1171 targ->flags &= ~MPSSAS_TARGET_INRESET;
1173 if (targ->outstanding == 0) {
1174 /* we've finished recovery for this target and all
1175 * of its logical units.
1177 mpssas_log_command(tm,
1178 "recovery finished after target reset\n");
1180 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1184 mpssas_free_tm(sc, tm);
1187 /* after a target reset, if this target still has
1188 * outstanding commands, the reset effectively failed,
1189 * regardless of the status reported. escalate.
1191 mpssas_log_command(tm,
1192 "target reset complete for tm %p, but still have %u command(s)\n",
1193 tm, targ->outstanding);
1198 #define MPS_RESET_TIMEOUT 30
1201 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1203 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1204 struct mpssas_target *target;
1207 target = tm->cm_targ;
1208 if (target->handle == 0) {
1209 mps_printf(sc, "%s null devhandle for target_id %d\n",
1210 __func__, target->tid);
1214 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1215 req->DevHandle = target->handle;
1216 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1217 req->TaskType = type;
1219 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1220 /* XXX Need to handle invalid LUNs */
1221 MPS_SET_LUN(req->LUN, tm->cm_lun);
1222 tm->cm_targ->logical_unit_resets++;
1223 mpssas_log_command(tm, "sending logical unit reset\n");
1224 tm->cm_complete = mpssas_logical_unit_reset_complete;
1226 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1227 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1228 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1229 tm->cm_targ->target_resets++;
1230 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1231 mpssas_log_command(tm, "sending target reset\n");
1232 tm->cm_complete = mpssas_target_reset_complete;
1235 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1240 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1241 tm->cm_complete_data = (void *)tm;
1243 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1244 mpssas_tm_timeout, tm);
1246 err = mps_map_command(sc, tm);
1248 mpssas_log_command(tm,
1249 "error %d sending reset type %u\n",
1257 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1259 struct mps_command *cm;
1260 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1261 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1262 struct mpssas_target *targ;
1264 callout_stop(&tm->cm_callout);
1266 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1267 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1271 * Currently there should be no way we can hit this case. It only
1272 * happens when we have a failure to allocate chain frames, and
1273 * task management commands don't have S/G lists.
1275 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1276 mpssas_log_command(tm,
1277 "cm_flags = %#x for abort %p TaskMID %u!\n",
1278 tm->cm_flags, tm, req->TaskMID);
1279 mpssas_free_tm(sc, tm);
1283 if (reply == NULL) {
1284 mpssas_log_command(tm,
1285 "NULL abort reply for tm %p TaskMID %u\n",
1287 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1288 /* this completion was due to a reset, just cleanup */
1290 mpssas_free_tm(sc, tm);
1293 /* we should have gotten a reply. */
1299 mpssas_log_command(tm,
1300 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1302 reply->IOCStatus, reply->ResponseCode,
1303 reply->TerminationCount);
1305 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1307 /* if there are no more timedout commands, we're done with
1308 * error recovery for this target.
1310 mpssas_log_command(tm,
1311 "finished recovery after aborting TaskMID %u\n",
1315 mpssas_free_tm(sc, tm);
1317 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1318 /* abort success, but we have more timedout commands to abort */
1319 mpssas_log_command(tm,
1320 "continuing recovery after aborting TaskMID %u\n",
1323 mpssas_send_abort(sc, tm, cm);
1326 /* we didn't get a command completion, so the abort
1327 * failed as far as we're concerned. escalate.
1329 mpssas_log_command(tm,
1330 "abort failed for TaskMID %u tm %p\n",
1333 mpssas_send_reset(sc, tm,
1334 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1338 #define MPS_ABORT_TIMEOUT 5
1341 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1343 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1344 struct mpssas_target *targ;
1348 if (targ->handle == 0) {
1349 mps_printf(sc, "%s null devhandle for target_id %d\n",
1350 __func__, cm->cm_ccb->ccb_h.target_id);
1354 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1355 req->DevHandle = targ->handle;
1356 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1357 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1359 /* XXX Need to handle invalid LUNs */
1360 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1362 req->TaskMID = cm->cm_desc.Default.SMID;
1365 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1366 tm->cm_complete = mpssas_abort_complete;
1367 tm->cm_complete_data = (void *)tm;
1368 tm->cm_targ = cm->cm_targ;
1369 tm->cm_lun = cm->cm_lun;
1371 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1372 mpssas_tm_timeout, tm);
1376 err = mps_map_command(sc, tm);
1378 mpssas_log_command(tm,
1379 "error %d sending abort for cm %p SMID %u\n",
1380 err, cm, req->TaskMID);
1386 mpssas_scsiio_timeout(void *data)
1388 struct mps_softc *sc;
1389 struct mps_command *cm;
1390 struct mpssas_target *targ;
1392 cm = (struct mps_command *)data;
1395 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1397 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1400 * Run the interrupt handler to make sure it's not pending. This
1401 * isn't perfect because the command could have already completed
1402 * and been re-used, though this is unlikely.
1404 mps_intr_locked(sc);
1405 if (cm->cm_state == MPS_CM_STATE_FREE) {
1406 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1410 if (cm->cm_ccb == NULL) {
1411 mps_printf(sc, "command timeout with NULL ccb\n");
1415 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1421 /* XXX first, check the firmware state, to see if it's still
1422 * operational. if not, do a diag reset.
1425 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1426 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1427 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1429 if (targ->tm != NULL) {
1430 /* target already in recovery, just queue up another
1431 * timedout command to be processed later.
1433 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1436 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1437 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1440 /* start recovery by aborting the first timedout command */
1441 mpssas_send_abort(sc, targ->tm, cm);
1444 /* XXX queue this target up for recovery once a TM becomes
1445 * available. The firmware only has a limited number of
1446 * HighPriority credits for the high priority requests used
1447 * for task management, and we ran out.
1449 * Isilon: don't worry about this for now, since we have
1450 * more credits than disks in an enclosure, and limit
1451 * ourselves to one TM per target for recovery.
1453 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1460 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1462 MPI2_SCSI_IO_REQUEST *req;
1463 struct ccb_scsiio *csio;
1464 struct mps_softc *sc;
1465 struct mpssas_target *targ;
1466 struct mpssas_lun *lun;
1467 struct mps_command *cm;
1468 uint8_t i, lba_byte, *ref_tag_addr;
1469 uint16_t eedp_flags;
1472 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1473 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1476 targ = &sassc->targets[csio->ccb_h.target_id];
1477 if (targ->handle == 0x0) {
1478 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1479 __func__, csio->ccb_h.target_id);
1480 csio->ccb_h.status = CAM_TID_INVALID;
1485 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1486 * that the volume has timed out. We want volumes to be enumerated
1487 * until they are deleted/removed, not just failed.
1489 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1490 if (targ->devinfo == 0)
1491 csio->ccb_h.status = CAM_REQ_CMP;
1493 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1498 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1499 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1500 csio->ccb_h.status = CAM_TID_INVALID;
1505 cm = mps_alloc_command(sc);
1507 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1508 xpt_freeze_simq(sassc->sim, 1);
1509 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1511 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1512 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1517 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1518 bzero(req, sizeof(*req));
1519 req->DevHandle = targ->handle;
1520 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1522 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1523 req->SenseBufferLength = MPS_SENSE_LEN;
1525 req->ChainOffset = 0;
1526 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1531 req->DataLength = csio->dxfer_len;
1532 req->BidirectionalDataLength = 0;
1533 req->IoFlags = csio->cdb_len;
1536 /* Note: BiDirectional transfers are not supported */
1537 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1539 req->Control = MPI2_SCSIIO_CONTROL_READ;
1540 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1543 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1544 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1548 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1553 * It looks like the hardware doesn't require an explicit tag
1554 * number for each transaction. SAM Task Management not supported
1557 switch (csio->tag_action) {
1558 case MSG_HEAD_OF_Q_TAG:
1559 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1561 case MSG_ORDERED_Q_TAG:
1562 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1565 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1567 case CAM_TAG_ACTION_NONE:
1568 case MSG_SIMPLE_Q_TAG:
1570 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1573 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1575 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1576 mps_free_command(sc, cm);
1577 ccb->ccb_h.status = CAM_LUN_INVALID;
1582 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1583 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1585 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1586 req->IoFlags = csio->cdb_len;
1589 * Check if EEDP is supported and enabled. If it is then check if the
1590 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1591 * is formatted for EEDP support. If all of this is true, set CDB up
1592 * for EEDP transfer.
1594 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1595 if (sc->eedp_enabled && eedp_flags) {
1596 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1597 if (lun->lun_id == csio->ccb_h.target_lun) {
1602 if ((lun != NULL) && (lun->eedp_formatted)) {
1603 req->EEDPBlockSize = lun->eedp_block_size;
1604 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1605 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1606 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1607 req->EEDPFlags = eedp_flags;
1610 * If CDB less than 32, fill in Primary Ref Tag with
1611 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1612 * already there. Also, set protection bit. FreeBSD
1613 * currently does not support CDBs bigger than 16, but
1614 * the code doesn't hurt, and will be here for the
1617 if (csio->cdb_len != 32) {
1618 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1619 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1620 PrimaryReferenceTag;
1621 for (i = 0; i < 4; i++) {
1623 req->CDB.CDB32[lba_byte + i];
1626 req->CDB.EEDP32.PrimaryApplicationTagMask =
1628 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1632 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1633 req->EEDPFlags = eedp_flags;
1634 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1640 cm->cm_data = csio->data_ptr;
1641 cm->cm_length = csio->dxfer_len;
1642 cm->cm_sge = &req->SGL;
1643 cm->cm_sglsize = (32 - 24) * 4;
1644 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1645 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1646 cm->cm_complete = mpssas_scsiio_complete;
1647 cm->cm_complete_data = ccb;
1649 cm->cm_lun = csio->ccb_h.target_lun;
1653 * If HBA is a WD and the command is not for a retry, try to build a
1654 * direct I/O message. If failed, or the command is for a retry, send
1655 * the I/O to the IR volume itself.
1657 if (sc->WD_valid_config) {
1658 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1659 mpssas_direct_drive_io(sassc, cm, ccb);
1661 ccb->ccb_h.status = CAM_REQ_INPROG;
1665 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1666 mpssas_scsiio_timeout, cm);
1669 targ->outstanding++;
1670 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1672 if ((sc->mps_debug & MPS_TRACE) != 0)
1673 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1674 __func__, cm, ccb, targ->outstanding);
1676 mps_map_command(sc, cm);
1681 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1683 MPI2_SCSI_IO_REPLY *rep;
1685 struct ccb_scsiio *csio;
1686 struct mpssas_softc *sassc;
1687 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1688 u8 *TLR_bits, TLR_on;
1692 mps_dprint(sc, MPS_TRACE,
1693 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1694 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1695 cm->cm_targ->outstanding);
1697 callout_stop(&cm->cm_callout);
1698 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1701 ccb = cm->cm_complete_data;
1703 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1705 * XXX KDM if the chain allocation fails, does it matter if we do
1706 * the sync and unload here? It is simpler to do it in every case,
1707 * assuming it doesn't cause problems.
1709 if (cm->cm_data != NULL) {
1710 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1711 dir = BUS_DMASYNC_POSTREAD;
1712 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1713 dir = BUS_DMASYNC_POSTWRITE;;
1714 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1715 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1718 cm->cm_targ->completed++;
1719 cm->cm_targ->outstanding--;
1720 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1722 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1723 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1724 if (cm->cm_reply != NULL)
1725 mpssas_log_command(cm,
1726 "completed timedout cm %p ccb %p during recovery "
1727 "ioc %x scsi %x state %x xfer %u\n",
1729 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1730 rep->TransferCount);
1732 mpssas_log_command(cm,
1733 "completed timedout cm %p ccb %p during recovery\n",
1735 } else if (cm->cm_targ->tm != NULL) {
1736 if (cm->cm_reply != NULL)
1737 mpssas_log_command(cm,
1738 "completed cm %p ccb %p during recovery "
1739 "ioc %x scsi %x state %x xfer %u\n",
1741 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1742 rep->TransferCount);
1744 mpssas_log_command(cm,
1745 "completed cm %p ccb %p during recovery\n",
1747 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1748 mpssas_log_command(cm,
1749 "reset completed cm %p ccb %p\n",
1753 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1755 * We ran into an error after we tried to map the command,
1756 * so we're getting a callback without queueing the command
1757 * to the hardware. So we set the status here, and it will
1758 * be retained below. We'll go through the "fast path",
1759 * because there can be no reply when we haven't actually
1760 * gone out to the hardware.
1762 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1765 * Currently the only error included in the mask is
1766 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1767 * chain frames. We need to freeze the queue until we get
1768 * a command that completed without this error, which will
1769 * hopefully have some chain frames attached that we can
1770 * use. If we wanted to get smarter about it, we would
1771 * only unfreeze the queue in this condition when we're
1772 * sure that we're getting some chain frames back. That's
1773 * probably unnecessary.
1775 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1776 xpt_freeze_simq(sassc->sim, 1);
1777 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1778 mps_dprint(sc, MPS_INFO, "Error sending command, "
1779 "freezing SIM queue\n");
1783 /* Take the fast path to completion */
1784 if (cm->cm_reply == NULL) {
1785 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1786 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1787 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1789 ccb->ccb_h.status = CAM_REQ_CMP;
1790 ccb->csio.scsi_status = SCSI_STATUS_OK;
1792 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1793 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1794 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1795 mps_dprint(sc, MPS_INFO,
1796 "Unfreezing SIM queue\n");
1801 * There are two scenarios where the status won't be
1802 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1803 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1805 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1807 * Freeze the dev queue so that commands are
1808 * executed in the correct order with after error
1811 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1812 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1814 mps_free_command(sc, cm);
1819 if (sc->mps_debug & MPS_TRACE)
1820 mpssas_log_command(cm,
1821 "ioc %x scsi %x state %x xfer %u\n",
1822 rep->IOCStatus, rep->SCSIStatus,
1823 rep->SCSIState, rep->TransferCount);
1826 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1827 * Volume if an error occurred (normal I/O retry). Use the original
1828 * CCB, but set a flag that this will be a retry so that it's sent to
1829 * the original volume. Free the command but reuse the CCB.
1831 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1832 mps_free_command(sc, cm);
1833 ccb->ccb_h.status = MPS_WD_RETRY;
1834 mpssas_action_scsiio(sassc, ccb);
1838 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1839 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1840 csio->resid = cm->cm_length - rep->TransferCount;
1842 case MPI2_IOCSTATUS_SUCCESS:
1843 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1845 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1846 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1847 mpssas_log_command(cm, "recovered error\n");
1849 /* Completion failed at the transport level. */
1850 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1851 MPI2_SCSI_STATE_TERMINATED)) {
1852 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1856 /* In a modern packetized environment, an autosense failure
1857 * implies that there's not much else that can be done to
1858 * recover the command.
1860 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1861 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1866 * CAM doesn't care about SAS Response Info data, but if this is
1867 * the state check if TLR should be done. If not, clear the
1868 * TLR_bits for the target.
1870 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1871 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1872 MPS_SCSI_RI_INVALID_FRAME)) {
1873 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1874 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1878 * Intentionally override the normal SCSI status reporting
1879 * for these two cases. These are likely to happen in a
1880 * multi-initiator environment, and we want to make sure that
1881 * CAM retries these commands rather than fail them.
1883 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1884 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1885 ccb->ccb_h.status = CAM_REQ_ABORTED;
1889 /* Handle normal status and sense */
1890 csio->scsi_status = rep->SCSIStatus;
1891 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1892 ccb->ccb_h.status = CAM_REQ_CMP;
1894 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1896 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1897 int sense_len, returned_sense_len;
1899 returned_sense_len = min(rep->SenseCount,
1900 sizeof(struct scsi_sense_data));
1901 if (returned_sense_len < ccb->csio.sense_len)
1902 ccb->csio.sense_resid = ccb->csio.sense_len -
1905 ccb->csio.sense_resid = 0;
1907 sense_len = min(returned_sense_len,
1908 ccb->csio.sense_len - ccb->csio.sense_resid);
1909 bzero(&ccb->csio.sense_data,
1910 sizeof(&ccb->csio.sense_data));
1911 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1912 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1916 * Check if this is an INQUIRY command. If it's a VPD inquiry,
1917 * and it's page code 0 (Supported Page List), and there is
1918 * inquiry data, and this is for a sequential access device, and
1919 * the device is an SSP target, and TLR is supported by the
1920 * controller, turn the TLR_bits value ON if page 0x90 is
1923 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1924 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1925 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1926 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1927 T_SEQUENTIAL) && (sc->control_TLR) &&
1928 (sc->mapping_table[csio->ccb_h.target_id].device_info &
1929 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1930 vpd_list = (struct scsi_vpd_supported_page_list *)
1932 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1934 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1935 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1936 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1937 csio->cdb_io.cdb_bytes[4];
1938 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1939 if (vpd_list->list[i] == 0x90) {
1946 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1947 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1949 * If devinfo is 0 this will be a volume. In that case don't
1950 * tell CAM that the volume is not there. We want volumes to
1951 * be enumerated until they are deleted/removed, not just
1954 if (cm->cm_targ->devinfo == 0)
1955 ccb->ccb_h.status = CAM_REQ_CMP;
1957 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1959 case MPI2_IOCSTATUS_INVALID_SGL:
1960 mps_print_scsiio_cmd(sc, cm);
1961 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1963 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1965 * This is one of the responses that comes back when an I/O
1966 * has been aborted. If it is because of a timeout that we
1967 * initiated, just set the status to CAM_CMD_TIMEOUT.
1968 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
1969 * command is the same (it gets retried, subject to the
1970 * retry counter), the only difference is what gets printed
1973 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1974 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1976 ccb->ccb_h.status = CAM_REQ_ABORTED;
1978 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1979 /* resid is ignored for this condition */
1981 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1983 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1984 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1986 * Since these are generally external (i.e. hopefully
1987 * transient transport-related) errors, retry these without
1988 * decrementing the retry count.
1990 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1991 mpssas_log_command(cm,
1992 "terminated ioc %x scsi %x state %x xfer %u\n",
1993 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1994 rep->TransferCount);
1996 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1997 case MPI2_IOCSTATUS_INTERNAL_ERROR:
1998 case MPI2_IOCSTATUS_INVALID_VPID:
1999 case MPI2_IOCSTATUS_INVALID_FIELD:
2000 case MPI2_IOCSTATUS_INVALID_STATE:
2001 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2002 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2003 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2004 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2005 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2007 mpssas_log_command(cm,
2008 "completed ioc %x scsi %x state %x xfer %u\n",
2009 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2010 rep->TransferCount);
2011 csio->resid = cm->cm_length;
2012 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2016 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2017 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2018 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2019 mps_dprint(sc, MPS_INFO, "Command completed, "
2020 "unfreezing SIM queue\n");
2023 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2024 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2025 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2028 mps_free_command(sc, cm);
2033 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2035 pMpi2SCSIIORequest_t pIO_req;
2036 struct mps_softc *sc = sassc->sc;
2038 uint32_t physLBA, stripe_offset, stripe_unit;
2039 uint32_t io_size, column;
2040 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2043 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2044 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2045 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2046 * bit different than the 10/16 CDBs, handle them separately.
2048 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2049 CDB = pIO_req->CDB.CDB32;
2052 * Handle 6 byte CDBs.
2054 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2055 (CDB[0] == WRITE_6))) {
2057 * Get the transfer size in blocks.
2059 io_size = (cm->cm_length >> sc->DD_block_exponent);
2062 * Get virtual LBA given in the CDB.
2064 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2065 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2068 * Check that LBA range for I/O does not exceed volume's
2071 if ((virtLBA + (uint64_t)io_size - 1) <=
2074 * Check if the I/O crosses a stripe boundary. If not,
2075 * translate the virtual LBA to a physical LBA and set
2076 * the DevHandle for the PhysDisk to be used. If it
2077 * does cross a boundry, do normal I/O. To get the
2078 * right DevHandle to use, get the map number for the
2079 * column, then use that map number to look up the
2080 * DevHandle of the PhysDisk.
2082 stripe_offset = (uint32_t)virtLBA &
2083 (sc->DD_stripe_size - 1);
2084 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2085 physLBA = (uint32_t)virtLBA >>
2086 sc->DD_stripe_exponent;
2087 stripe_unit = physLBA / sc->DD_num_phys_disks;
2088 column = physLBA % sc->DD_num_phys_disks;
2089 pIO_req->DevHandle =
2090 sc->DD_column_map[column].dev_handle;
2091 cm->cm_desc.SCSIIO.DevHandle =
2094 physLBA = (stripe_unit <<
2095 sc->DD_stripe_exponent) + stripe_offset;
2096 ptrLBA = &pIO_req->CDB.CDB32[1];
2097 physLBA_byte = (uint8_t)(physLBA >> 16);
2098 *ptrLBA = physLBA_byte;
2099 ptrLBA = &pIO_req->CDB.CDB32[2];
2100 physLBA_byte = (uint8_t)(physLBA >> 8);
2101 *ptrLBA = physLBA_byte;
2102 ptrLBA = &pIO_req->CDB.CDB32[3];
2103 physLBA_byte = (uint8_t)physLBA;
2104 *ptrLBA = physLBA_byte;
2107 * Set flag that Direct Drive I/O is
2110 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2117 * Handle 10 or 16 byte CDBs.
2119 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2120 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2121 (CDB[0] == WRITE_16))) {
2123 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2124 * are 0. If not, this is accessing beyond 2TB so handle it in
2125 * the else section. 10-byte CDB's are OK.
2127 if ((CDB[0] < READ_16) ||
2128 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2130 * Get the transfer size in blocks.
2132 io_size = (cm->cm_length >> sc->DD_block_exponent);
2135 * Get virtual LBA. Point to correct lower 4 bytes of
2136 * LBA in the CDB depending on command.
2138 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2139 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2140 ((uint64_t)CDB[lba_idx + 1] << 16) |
2141 ((uint64_t)CDB[lba_idx + 2] << 8) |
2142 (uint64_t)CDB[lba_idx + 3];
2145 * Check that LBA range for I/O does not exceed volume's
2148 if ((virtLBA + (uint64_t)io_size - 1) <=
2151 * Check if the I/O crosses a stripe boundary.
2152 * If not, translate the virtual LBA to a
2153 * physical LBA and set the DevHandle for the
2154 * PhysDisk to be used. If it does cross a
2155 * boundry, do normal I/O. To get the right
2156 * DevHandle to use, get the map number for the
2157 * column, then use that map number to look up
2158 * the DevHandle of the PhysDisk.
2160 stripe_offset = (uint32_t)virtLBA &
2161 (sc->DD_stripe_size - 1);
2162 if ((stripe_offset + io_size) <=
2163 sc->DD_stripe_size) {
2164 physLBA = (uint32_t)virtLBA >>
2165 sc->DD_stripe_exponent;
2166 stripe_unit = physLBA /
2167 sc->DD_num_phys_disks;
2169 sc->DD_num_phys_disks;
2170 pIO_req->DevHandle =
2171 sc->DD_column_map[column].
2173 cm->cm_desc.SCSIIO.DevHandle =
2176 physLBA = (stripe_unit <<
2177 sc->DD_stripe_exponent) +
2180 &pIO_req->CDB.CDB32[lba_idx];
2181 physLBA_byte = (uint8_t)(physLBA >> 24);
2182 *ptrLBA = physLBA_byte;
2184 &pIO_req->CDB.CDB32[lba_idx + 1];
2185 physLBA_byte = (uint8_t)(physLBA >> 16);
2186 *ptrLBA = physLBA_byte;
2188 &pIO_req->CDB.CDB32[lba_idx + 2];
2189 physLBA_byte = (uint8_t)(physLBA >> 8);
2190 *ptrLBA = physLBA_byte;
2192 &pIO_req->CDB.CDB32[lba_idx + 3];
2193 physLBA_byte = (uint8_t)physLBA;
2194 *ptrLBA = physLBA_byte;
2197 * Set flag that Direct Drive I/O is
2200 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2205 * 16-byte CDB and the upper 4 bytes of the CDB are not
2206 * 0. Get the transfer size in blocks.
2208 io_size = (cm->cm_length >> sc->DD_block_exponent);
2213 virtLBA = ((uint64_t)CDB[2] << 54) |
2214 ((uint64_t)CDB[3] << 48) |
2215 ((uint64_t)CDB[4] << 40) |
2216 ((uint64_t)CDB[5] << 32) |
2217 ((uint64_t)CDB[6] << 24) |
2218 ((uint64_t)CDB[7] << 16) |
2219 ((uint64_t)CDB[8] << 8) |
2223 * Check that LBA range for I/O does not exceed volume's
2226 if ((virtLBA + (uint64_t)io_size - 1) <=
2229 * Check if the I/O crosses a stripe boundary.
2230 * If not, translate the virtual LBA to a
2231 * physical LBA and set the DevHandle for the
2232 * PhysDisk to be used. If it does cross a
2233 * boundry, do normal I/O. To get the right
2234 * DevHandle to use, get the map number for the
2235 * column, then use that map number to look up
2236 * the DevHandle of the PhysDisk.
2238 stripe_offset = (uint32_t)virtLBA &
2239 (sc->DD_stripe_size - 1);
2240 if ((stripe_offset + io_size) <=
2241 sc->DD_stripe_size) {
2242 physLBA = (uint32_t)(virtLBA >>
2243 sc->DD_stripe_exponent);
2244 stripe_unit = physLBA /
2245 sc->DD_num_phys_disks;
2247 sc->DD_num_phys_disks;
2248 pIO_req->DevHandle =
2249 sc->DD_column_map[column].
2251 cm->cm_desc.SCSIIO.DevHandle =
2254 physLBA = (stripe_unit <<
2255 sc->DD_stripe_exponent) +
2259 * Set upper 4 bytes of LBA to 0. We
2260 * assume that the phys disks are less
2261 * than 2 TB's in size. Then, set the
2264 pIO_req->CDB.CDB32[2] = 0;
2265 pIO_req->CDB.CDB32[3] = 0;
2266 pIO_req->CDB.CDB32[4] = 0;
2267 pIO_req->CDB.CDB32[5] = 0;
2268 ptrLBA = &pIO_req->CDB.CDB32[6];
2269 physLBA_byte = (uint8_t)(physLBA >> 24);
2270 *ptrLBA = physLBA_byte;
2271 ptrLBA = &pIO_req->CDB.CDB32[7];
2272 physLBA_byte = (uint8_t)(physLBA >> 16);
2273 *ptrLBA = physLBA_byte;
2274 ptrLBA = &pIO_req->CDB.CDB32[8];
2275 physLBA_byte = (uint8_t)(physLBA >> 8);
2276 *ptrLBA = physLBA_byte;
2277 ptrLBA = &pIO_req->CDB.CDB32[9];
2278 physLBA_byte = (uint8_t)physLBA;
2279 *ptrLBA = physLBA_byte;
2282 * Set flag that Direct Drive I/O is
2285 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2292 #if __FreeBSD_version >= 900026
2294 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2296 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2297 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2301 ccb = cm->cm_complete_data;
2304 * Currently there should be no way we can hit this case. It only
2305 * happens when we have a failure to allocate chain frames, and SMP
2306 * commands require two S/G elements only. That should be handled
2307 * in the standard request size.
2309 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2310 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2311 __func__, cm->cm_flags);
2312 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2316 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2318 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2319 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2323 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2324 sasaddr = le32toh(req->SASAddress.Low);
2325 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2327 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2328 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2329 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2330 __func__, rpl->IOCStatus, rpl->SASStatus);
2331 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2335 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2336 "%#jx completed successfully\n", __func__,
2337 (uintmax_t)sasaddr);
2339 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2340 ccb->ccb_h.status = CAM_REQ_CMP;
2342 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2346 * We sync in both directions because we had DMAs in the S/G list
2347 * in both directions.
2349 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2350 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2351 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2352 mps_free_command(sc, cm);
2357 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2359 struct mps_command *cm;
2360 uint8_t *request, *response;
2361 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2362 struct mps_softc *sc;
2369 * XXX We don't yet support physical addresses here.
2371 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2372 mps_printf(sc, "%s: physical addresses not supported\n",
2374 ccb->ccb_h.status = CAM_REQ_INVALID;
2380 * If the user wants to send an S/G list, check to make sure they
2381 * have single buffers.
2383 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2385 * The chip does not support more than one buffer for the
2386 * request or response.
2388 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2389 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2390 mps_printf(sc, "%s: multiple request or response "
2391 "buffer segments not supported for SMP\n",
2393 ccb->ccb_h.status = CAM_REQ_INVALID;
2399 * The CAM_SCATTER_VALID flag was originally implemented
2400 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2401 * We have two. So, just take that flag to mean that we
2402 * might have S/G lists, and look at the S/G segment count
2403 * to figure out whether that is the case for each individual
2406 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2407 bus_dma_segment_t *req_sg;
2409 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2410 request = (uint8_t *)req_sg[0].ds_addr;
2412 request = ccb->smpio.smp_request;
2414 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2415 bus_dma_segment_t *rsp_sg;
2417 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2418 response = (uint8_t *)rsp_sg[0].ds_addr;
2420 response = ccb->smpio.smp_response;
2422 request = ccb->smpio.smp_request;
2423 response = ccb->smpio.smp_response;
2426 cm = mps_alloc_command(sc);
2428 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2429 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2434 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2435 bzero(req, sizeof(*req));
2436 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2438 /* Allow the chip to use any route to this SAS address. */
2439 req->PhysicalPort = 0xff;
2441 req->RequestDataLength = ccb->smpio.smp_request_len;
2443 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2445 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2446 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2448 mpi_init_sge(cm, req, &req->SGL);
2451 * Set up a uio to pass into mps_map_command(). This allows us to
2452 * do one map command, and one busdma call in there.
2454 cm->cm_uio.uio_iov = cm->cm_iovec;
2455 cm->cm_uio.uio_iovcnt = 2;
2456 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2459 * The read/write flag isn't used by busdma, but set it just in
2460 * case. This isn't exactly accurate, either, since we're going in
2463 cm->cm_uio.uio_rw = UIO_WRITE;
2465 cm->cm_iovec[0].iov_base = request;
2466 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2467 cm->cm_iovec[1].iov_base = response;
2468 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2470 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2471 cm->cm_iovec[1].iov_len;
2474 * Trigger a warning message in mps_data_cb() for the user if we
2475 * wind up exceeding two S/G segments. The chip expects one
2476 * segment for the request and another for the response.
2478 cm->cm_max_segs = 2;
2480 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2481 cm->cm_complete = mpssas_smpio_complete;
2482 cm->cm_complete_data = ccb;
2485 * Tell the mapping code that we're using a uio, and that this is
2486 * an SMP passthrough request. There is a little special-case
2487 * logic there (in mps_data_cb()) to handle the bidirectional
2490 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2491 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2493 /* The chip data format is little endian. */
2494 req->SASAddress.High = htole32(sasaddr >> 32);
2495 req->SASAddress.Low = htole32(sasaddr);
2498 * XXX Note that we don't have a timeout/abort mechanism here.
2499 * From the manual, it looks like task management requests only
2500 * work for SCSI IO and SATA passthrough requests. We may need to
2501 * have a mechanism to retry requests in the event of a chip reset
2502 * at least. Hopefully the chip will insure that any errors short
2503 * of that are relayed back to the driver.
2505 error = mps_map_command(sc, cm);
2506 if ((error != 0) && (error != EINPROGRESS)) {
2507 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2515 mps_free_command(sc, cm);
2516 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2523 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2525 struct mps_softc *sc;
2526 struct mpssas_target *targ;
2527 uint64_t sasaddr = 0;
2532 * Make sure the target exists.
2534 targ = &sassc->targets[ccb->ccb_h.target_id];
2535 if (targ->handle == 0x0) {
2536 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2537 ccb->ccb_h.target_id);
2538 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2544 * If this device has an embedded SMP target, we'll talk to it
2546 * figure out what the expander's address is.
2548 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2549 sasaddr = targ->sasaddr;
2552 * If we don't have a SAS address for the expander yet, try
2553 * grabbing it from the page 0x83 information cached in the
2554 * transport layer for this target. LSI expanders report the
2555 * expander SAS address as the port-associated SAS address in
2556 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2559 * XXX KDM disable this for now, but leave it commented out so that
2560 * it is obvious that this is another possible way to get the SAS
2563 * The parent handle method below is a little more reliable, and
2564 * the other benefit is that it works for devices other than SES
2565 * devices. So you can send a SMP request to a da(4) device and it
2566 * will get routed to the expander that device is attached to.
2567 * (Assuming the da(4) device doesn't contain an SMP target...)
2571 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2575 * If we still don't have a SAS address for the expander, look for
2576 * the parent device of this device, which is probably the expander.
2579 #ifdef OLD_MPS_PROBE
2580 struct mpssas_target *parent_target;
2583 if (targ->parent_handle == 0x0) {
2584 mps_printf(sc, "%s: handle %d does not have a valid "
2585 "parent handle!\n", __func__, targ->handle);
2586 ccb->ccb_h.status = CAM_REQ_INVALID;
2589 #ifdef OLD_MPS_PROBE
2590 parent_target = mpssas_find_target_by_handle(sassc, 0,
2591 targ->parent_handle);
2593 if (parent_target == NULL) {
2594 mps_printf(sc, "%s: handle %d does not have a valid "
2595 "parent target!\n", __func__, targ->handle);
2596 ccb->ccb_h.status = CAM_REQ_INVALID;
2600 if ((parent_target->devinfo &
2601 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2602 mps_printf(sc, "%s: handle %d parent %d does not "
2603 "have an SMP target!\n", __func__,
2604 targ->handle, parent_target->handle);
2605 ccb->ccb_h.status = CAM_REQ_INVALID;
2610 sasaddr = parent_target->sasaddr;
2611 #else /* OLD_MPS_PROBE */
2612 if ((targ->parent_devinfo &
2613 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2614 mps_printf(sc, "%s: handle %d parent %d does not "
2615 "have an SMP target!\n", __func__,
2616 targ->handle, targ->parent_handle);
2617 ccb->ccb_h.status = CAM_REQ_INVALID;
2621 if (targ->parent_sasaddr == 0x0) {
2622 mps_printf(sc, "%s: handle %d parent handle %d does "
2623 "not have a valid SAS address!\n",
2624 __func__, targ->handle, targ->parent_handle);
2625 ccb->ccb_h.status = CAM_REQ_INVALID;
2629 sasaddr = targ->parent_sasaddr;
2630 #endif /* OLD_MPS_PROBE */
2635 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2636 __func__, targ->handle);
2637 ccb->ccb_h.status = CAM_REQ_INVALID;
2640 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2648 #endif //__FreeBSD_version >= 900026
2651 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2653 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2654 struct mps_softc *sc;
2655 struct mps_command *tm;
2656 struct mpssas_target *targ;
2658 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2659 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2662 tm = mps_alloc_command(sc);
2664 mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2665 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2670 targ = &sassc->targets[ccb->ccb_h.target_id];
2671 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2672 req->DevHandle = targ->handle;
2673 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2674 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2676 /* SAS Hard Link Reset / SATA Link Reset */
2677 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2680 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2681 tm->cm_complete = mpssas_resetdev_complete;
2682 tm->cm_complete_data = ccb;
2683 mps_map_command(sc, tm);
2687 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2689 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2692 mps_dprint(sc, MPS_TRACE, __func__);
2693 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2695 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2696 ccb = tm->cm_complete_data;
2699 * Currently there should be no way we can hit this case. It only
2700 * happens when we have a failure to allocate chain frames, and
2701 * task management commands don't have S/G lists.
2703 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2704 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2706 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2708 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2709 "This should not happen!\n", __func__, tm->cm_flags,
2711 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2715 kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2716 resp->IOCStatus, resp->ResponseCode);
2718 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2719 ccb->ccb_h.status = CAM_REQ_CMP;
2720 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2724 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2728 mpssas_free_tm(sc, tm);
2733 mpssas_poll(struct cam_sim *sim)
2735 struct mpssas_softc *sassc;
2737 sassc = cam_sim_softc(sim);
2739 if (sassc->sc->mps_debug & MPS_TRACE) {
2740 /* frequent debug messages during a panic just slow
2741 * everything down too much.
2743 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2744 sassc->sc->mps_debug &= ~MPS_TRACE;
2747 mps_intr_locked(sassc->sc);
2751 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2753 struct mpssas_softc *sassc;
2756 if (done_ccb == NULL)
2759 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2761 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2763 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2764 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2766 xpt_free_path(done_ccb->ccb_h.path);
2767 xpt_free_ccb(done_ccb);
2769 #if __FreeBSD_version < 1000006
2771 * Before completing scan, get EEDP stuff for all of the existing
2774 mpssas_check_eedp(sassc);
2779 /* thread to handle bus rescans */
2781 mpssas_scanner_thread(void *arg)
2783 struct mpssas_softc *sassc;
2784 struct mps_softc *sc;
2787 sassc = (struct mpssas_softc *)arg;
2790 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2794 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2795 if (sassc->flags & MPSSAS_SHUTDOWN) {
2796 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2799 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2802 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2806 sassc->flags &= ~MPSSAS_SCANTHREAD;
2807 wakeup(&sassc->flags);
2809 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2814 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2818 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2820 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2825 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2826 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2828 /* Prepare request */
2829 ccb->ccb_h.ppriv_ptr1 = sassc;
2830 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2831 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2832 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2833 wakeup(&sassc->ccb_scanq);
2836 #if __FreeBSD_version >= 1000006
2838 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2841 struct mps_softc *sc;
2843 sc = (struct mps_softc *)callback_arg;
2846 case AC_ADVINFO_CHANGED: {
2847 struct mpssas_target *target;
2848 struct mpssas_softc *sassc;
2849 struct scsi_read_capacity_data_long rcap_buf;
2850 struct ccb_dev_advinfo cdai;
2851 struct mpssas_lun *lun;
2856 buftype = (uintptr_t)arg;
2862 * We're only interested in read capacity data changes.
2864 if (buftype != CDAI_TYPE_RCAPLONG)
2868 * We're only interested in devices that are attached to
2871 if (xpt_path_path_id(path) != sassc->sim->path_id)
2875 * We should have a handle for this, but check to make sure.
2877 target = &sassc->targets[xpt_path_target_id(path)];
2878 if (target->handle == 0)
2881 lunid = xpt_path_lun_id(path);
2883 SLIST_FOREACH(lun, &target->luns, lun_link) {
2884 if (lun->lun_id == lunid) {
2890 if (found_lun == 0) {
2891 lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2894 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2895 "LUN for EEDP support.\n");
2898 lun->lun_id = lunid;
2899 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2902 bzero(&rcap_buf, sizeof(rcap_buf));
2903 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2904 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2905 cdai.ccb_h.flags = CAM_DIR_IN;
2906 cdai.buftype = CDAI_TYPE_RCAPLONG;
2908 cdai.bufsiz = sizeof(rcap_buf);
2909 cdai.buf = (uint8_t *)&rcap_buf;
2910 xpt_action((union ccb *)&cdai);
2911 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2912 cam_release_devq(cdai.ccb_h.path,
2915 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2916 && (rcap_buf.prot & SRC16_PROT_EN)) {
2917 lun->eedp_formatted = TRUE;
2918 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2920 lun->eedp_formatted = FALSE;
2921 lun->eedp_block_size = 0;
2929 #else /* __FreeBSD_version >= 1000006 */
2932 mpssas_check_eedp(struct mpssas_softc *sassc)
2934 struct mps_softc *sc = sassc->sc;
2935 struct ccb_scsiio *csio;
2936 struct scsi_read_capacity_16 *scsi_cmd;
2937 struct scsi_read_capacity_eedp *rcap_buf;
2939 path_id_t pathid = cam_sim_path(sassc->sim);
2940 target_id_t targetid;
2942 struct cam_periph *found_periph;
2943 struct mpssas_target *target;
2944 struct mpssas_lun *lun;
2948 * Issue a READ CAPACITY 16 command to each LUN of each target. This
2949 * info is used to determine if the LUN is formatted for EEDP support.
2951 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2952 target = &sassc->targets[targetid];
2953 if (target->handle == 0x0) {
2960 kmalloc(sizeof(struct scsi_read_capacity_eedp),
2961 M_MPT2, M_NOWAIT | M_ZERO);
2962 if (rcap_buf == NULL) {
2963 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2964 "capacity buffer for EEDP support.\n");
2968 ccb = kmalloc(sizeof(union ccb), M_TEMP,
2971 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2972 pathid, targetid, lunid) != CAM_REQ_CMP) {
2973 mps_dprint(sc, MPS_FAULT, "Unable to create "
2974 "path for EEDP support\n");
2975 kfree(rcap_buf, M_MPT2);
2981 * If a periph is returned, the LUN exists. Create an
2982 * entry in the target's LUN list.
2984 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2987 * If LUN is already in list, don't create a new
2991 SLIST_FOREACH(lun, &target->luns, lun_link) {
2992 if (lun->lun_id == lunid) {
2998 lun = kmalloc(sizeof(struct mpssas_lun),
2999 M_MPT2, M_WAITOK | M_ZERO);
3000 lun->lun_id = lunid;
3001 SLIST_INSERT_HEAD(&target->luns, lun,
3007 * Issue a READ CAPACITY 16 command for the LUN.
3008 * The mpssas_read_cap_done function will load
3009 * the read cap info into the LUN struct.
3012 csio->ccb_h.func_code = XPT_SCSI_IO;
3013 csio->ccb_h.flags = CAM_DIR_IN;
3014 csio->ccb_h.retry_count = 4;
3015 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3016 csio->ccb_h.timeout = 60000;
3017 csio->data_ptr = (uint8_t *)rcap_buf;
3018 csio->dxfer_len = sizeof(struct
3019 scsi_read_capacity_eedp);
3020 csio->sense_len = MPS_SENSE_LEN;
3021 csio->cdb_len = sizeof(*scsi_cmd);
3022 csio->tag_action = MSG_SIMPLE_Q_TAG;
3024 scsi_cmd = (struct scsi_read_capacity_16 *)
3025 &csio->cdb_io.cdb_bytes;
3026 bzero(scsi_cmd, sizeof(*scsi_cmd));
3027 scsi_cmd->opcode = 0x9E;
3028 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3029 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3030 scsi_read_capacity_eedp);
3033 * Set the path, target and lun IDs for the READ
3036 ccb->ccb_h.path_id =
3037 xpt_path_path_id(ccb->ccb_h.path);
3038 ccb->ccb_h.target_id =
3039 xpt_path_target_id(ccb->ccb_h.path);
3040 ccb->ccb_h.target_lun =
3041 xpt_path_lun_id(ccb->ccb_h.path);
3043 ccb->ccb_h.ppriv_ptr1 = sassc;
3046 kfree(rcap_buf, M_MPT2);
3047 xpt_free_path(ccb->ccb_h.path);
3050 } while (found_periph);
3056 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3058 struct mpssas_softc *sassc;
3059 struct mpssas_target *target;
3060 struct mpssas_lun *lun;
3061 struct scsi_read_capacity_eedp *rcap_buf;
3063 if (done_ccb == NULL)
3066 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3069 * Get the LUN ID for the path and look it up in the LUN list for the
3072 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3073 target = &sassc->targets[done_ccb->ccb_h.target_id];
3074 SLIST_FOREACH(lun, &target->luns, lun_link) {
3075 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3079 * Got the LUN in the target's LUN list. Fill it in
3080 * with EEDP info. If the READ CAP 16 command had some
3081 * SCSI error (common if command is not supported), mark
3082 * the lun as not supporting EEDP and set the block size
3085 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3086 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3087 lun->eedp_formatted = FALSE;
3088 lun->eedp_block_size = 0;
3092 if (rcap_buf->protect & 0x01) {
3093 lun->eedp_formatted = TRUE;
3094 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3099 // Finished with this CCB and path.
3100 kfree(rcap_buf, M_MPT2);
3101 xpt_free_path(done_ccb->ccb_h.path);
3102 xpt_free_ccb(done_ccb);
3104 #endif /* __FreeBSD_version >= 1000006 */
3107 mpssas_startup(struct mps_softc *sc)
3109 struct mpssas_softc *sassc;
3112 * Send the port enable message and set the wait_for_port_enable flag.
3113 * This flag helps to keep the simq frozen until all discovery events
3117 mpssas_startup_increment(sassc);
3118 sc->wait_for_port_enable = 1;
3119 mpssas_send_portenable(sc);
3124 mpssas_send_portenable(struct mps_softc *sc)
3126 MPI2_PORT_ENABLE_REQUEST *request;
3127 struct mps_command *cm;
3129 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3131 if ((cm = mps_alloc_command(sc)) == NULL)
3133 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3134 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3135 request->MsgFlags = 0;
3137 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3138 cm->cm_complete = mpssas_portenable_complete;
3142 mps_map_command(sc, cm);
3143 mps_dprint(sc, MPS_TRACE,
3144 "mps_send_portenable finished cm %p req %p complete %p\n",
3145 cm, cm->cm_req, cm->cm_complete);
3150 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3152 MPI2_PORT_ENABLE_REPLY *reply;
3153 struct mpssas_softc *sassc;
3154 struct mpssas_target *target;
3157 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3161 * Currently there should be no way we can hit this case. It only
3162 * happens when we have a failure to allocate chain frames, and
3163 * port enable commands don't have S/G lists.
3165 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3166 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3167 "This should not happen!\n", __func__, cm->cm_flags);
3170 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3172 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3173 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3174 MPI2_IOCSTATUS_SUCCESS)
3175 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3177 mps_free_command(sc, cm);
3178 if (sc->mps_ich.ich_arg != NULL) {
3179 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3180 config_intrhook_disestablish(&sc->mps_ich);
3181 sc->mps_ich.ich_arg = NULL;
3185 * Get WarpDrive info after discovery is complete but before the scan
3186 * starts. At this point, all devices are ready to be exposed to the
3187 * OS. If devices should be hidden instead, take them out of the
3188 * 'targets' array before the scan. The devinfo for a disk will have
3189 * some info and a volume's will be 0. Use that to remove disks.
3191 mps_wd_config_pages(sc);
3192 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3193 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3194 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3195 MPS_WD_HIDE_IF_VOLUME))) {
3196 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3197 target = &sassc->targets[i];
3198 if (target->devinfo) {
3199 target->devinfo = 0x0;
3200 target->encl_handle = 0x0;
3201 target->encl_slot = 0x0;
3202 target->handle = 0x0;
3204 target->linkrate = 0x0;
3205 target->flags = 0x0;
3211 * Done waiting for port enable to complete. Decrement the refcount.
3212 * If refcount is 0, discovery is complete and a rescan of the bus can
3213 * take place. Since the simq was explicitly frozen before port
3214 * enable, it must be explicitly released here to keep the
3215 * freeze/release count in sync.
3217 sc->wait_for_port_enable = 0;
3218 sc->port_enable_complete = 1;
3219 mpssas_startup_decrement(sassc);
3220 xpt_release_simq(sassc->sim, 1);