2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
53 * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
56 /* Communications core for LSI MPT2 */
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
69 #include <sys/malloc.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
80 #include <machine/stdarg.h>
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
108 #define MPSSAS_DISCOVERY_TIMEOUT 20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
112 * static array to check SCSI OpCode for EEDP protection bits
114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
136 static void mpssas_log_command(struct mps_command *, const char *, ...)
138 #if 0 /* XXX unused */
139 static void mpssas_discovery_timeout(void *data);
141 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
142 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
143 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
144 static void mpssas_poll(struct cam_sim *sim);
145 static void mpssas_scsiio_timeout(void *data);
146 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
147 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
148 struct mps_command *cm, union ccb *ccb);
149 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
150 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
151 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
152 #if __FreeBSD_version >= 900026
153 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
154 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
156 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
157 #endif //FreeBSD_version >= 900026
158 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
159 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
160 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
161 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
162 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
163 static void mpssas_scanner_thread(void *arg);
164 #if __FreeBSD_version >= 1000006
165 static void mpssas_async(void *callback_arg, uint32_t code,
166 struct cam_path *path, void *arg);
168 static void mpssas_check_eedp(struct mpssas_softc *sassc);
169 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
171 static int mpssas_send_portenable(struct mps_softc *sc);
172 static void mpssas_portenable_complete(struct mps_softc *sc,
173 struct mps_command *cm);
175 struct mpssas_target *
176 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
178 struct mpssas_target *target;
181 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
182 target = &sassc->targets[i];
183 if (target->handle == handle)
190 /* we need to freeze the simq during attach and diag reset, to avoid failing
191 * commands before device handles have been found by discovery. Since
192 * discovery involves reading config pages and possibly sending commands,
193 * discovery actions may continue even after we receive the end of discovery
194 * event, so refcount discovery actions instead of assuming we can unfreeze
195 * the simq when we get the event.
198 mpssas_startup_increment(struct mpssas_softc *sassc)
200 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
201 if (sassc->startup_refcount++ == 0) {
202 /* just starting, freeze the simq */
203 mps_dprint(sassc->sc, MPS_INFO,
204 "%s freezing simq\n", __func__);
205 xpt_freeze_simq(sassc->sim, 1);
207 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
208 sassc->startup_refcount);
213 mpssas_startup_decrement(struct mpssas_softc *sassc)
215 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
216 if (--sassc->startup_refcount == 0) {
217 /* finished all discovery-related actions, release
218 * the simq and rescan for the latest topology.
220 mps_dprint(sassc->sc, MPS_INFO,
221 "%s releasing simq\n", __func__);
222 sassc->flags &= ~MPSSAS_IN_STARTUP;
223 xpt_release_simq(sassc->sim, 1);
224 mpssas_rescan_target(sassc->sc, NULL);
226 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
227 sassc->startup_refcount);
231 /* LSI's firmware requires us to stop sending commands when we're doing task
232 * management, so refcount the TMs and keep the simq frozen when any are in
236 mpssas_alloc_tm(struct mps_softc *sc)
238 struct mps_command *tm;
240 tm = mps_alloc_high_priority_command(sc);
242 if (sc->sassc->tm_count++ == 0) {
243 mps_printf(sc, "%s freezing simq\n", __func__);
244 xpt_freeze_simq(sc->sassc->sim, 1);
246 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
247 sc->sassc->tm_count);
253 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
258 /* if there are no TMs in use, we can release the simq. We use our
259 * own refcount so that it's easier for a diag reset to cleanup and
262 if (--sc->sassc->tm_count == 0) {
263 mps_printf(sc, "%s releasing simq\n", __func__);
264 xpt_release_simq(sc->sassc->sim, 1);
266 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
267 sc->sassc->tm_count);
269 mps_free_high_priority_command(sc, tm);
274 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
276 struct mpssas_softc *sassc = sc->sassc;
278 target_id_t targetid;
281 pathid = cam_sim_path(sassc->sim);
283 targetid = CAM_TARGET_WILDCARD;
285 targetid = targ - sassc->targets;
288 * Allocate a CCB and schedule a rescan.
290 ccb = xpt_alloc_ccb();
292 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
293 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
294 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
299 /* XXX Hardwired to scan the bus for now */
300 ccb->ccb_h.func_code = XPT_SCAN_BUS;
301 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
302 mpssas_rescan(sassc, ccb);
306 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
316 sbuf_new(&sb, str, sizeof(str), 0);
320 if (cm->cm_ccb != NULL) {
321 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
323 sbuf_cat(&sb, path_str);
324 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
325 scsi_command_string(&cm->cm_ccb->csio, &sb);
326 sbuf_printf(&sb, "length %d ",
327 cm->cm_ccb->csio.dxfer_len);
331 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
332 cam_sim_name(cm->cm_sc->sassc->sim),
333 cam_sim_unit(cm->cm_sc->sassc->sim),
334 cam_sim_bus(cm->cm_sc->sassc->sim),
335 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
339 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
340 sbuf_vprintf(&sb, fmt, ap);
342 kprintf("%s", sbuf_data(&sb));
348 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
350 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
351 struct mpssas_target *targ;
354 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
356 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
357 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
361 /* XXX retry the remove after the diag reset completes? */
362 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
364 mpssas_free_tm(sc, tm);
368 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
369 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
370 reply->IOCStatus, handle);
371 mpssas_free_tm(sc, tm);
375 mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
376 mps_free_reply(sc, tm->cm_reply_data);
377 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
379 mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
382 * Don't clear target if remove fails because things will get confusing.
383 * Leave the devname and sasaddr intact so that we know to avoid reusing
384 * this target id if possible, and so we can assign the same target id
385 * to this device if it comes back in the future.
387 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
390 targ->encl_handle = 0x0;
391 targ->encl_slot = 0x0;
392 targ->exp_dev_handle = 0x0;
394 targ->linkrate = 0x0;
399 mpssas_free_tm(sc, tm);
403 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
404 * Otherwise Volume Delete is same as Bare Drive Removal.
407 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
409 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
410 struct mps_softc *sc;
411 struct mps_command *cm;
412 struct mpssas_target *targ = NULL;
414 mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
419 * If this is a WD controller, determine if the disk should be exposed
420 * to the OS or not. If disk should be exposed, return from this
421 * function without doing anything.
423 if (sc->WD_available && (sc->WD_hide_expose ==
424 MPS_WD_EXPOSE_ALWAYS)) {
429 targ = mpssas_find_target_by_handle(sassc, 0, handle);
431 /* FIXME: what is the action? */
432 /* We don't know about this device? */
433 kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
437 targ->flags |= MPSSAS_TARGET_INREMOVAL;
439 cm = mpssas_alloc_tm(sc);
441 mps_printf(sc, "%s: command alloc failure\n", __func__);
445 mpssas_rescan_target(sc, targ);
447 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
448 req->DevHandle = targ->handle;
449 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
450 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
452 /* SAS Hard Link Reset / SATA Link Reset */
453 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
457 cm->cm_desc.HighPriority.RequestFlags =
458 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
459 cm->cm_complete = mpssas_remove_volume;
460 cm->cm_complete_data = (void *)(uintptr_t)handle;
461 mps_map_command(sc, cm);
465 * The MPT2 firmware performs debounce on the link to avoid transient link
466 * errors and false removals. When it does decide that link has been lost
467 * and a device need to go away, it expects that the host will perform a
468 * target reset and then an op remove. The reset has the side-effect of
469 * aborting any outstanding requests for the device, which is required for
470 * the op-remove to succeed. It's not clear if the host should check for
471 * the device coming back alive after the reset.
474 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
476 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
477 struct mps_softc *sc;
478 struct mps_command *cm;
479 struct mpssas_target *targ = NULL;
481 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
484 * If this is a WD controller, determine if the disk should be exposed
485 * to the OS or not. If disk should be exposed, return from this
486 * function without doing anything.
489 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
490 MPS_WD_EXPOSE_ALWAYS)) {
494 targ = mpssas_find_target_by_handle(sassc, 0, handle);
496 /* FIXME: what is the action? */
497 /* We don't know about this device? */
498 kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
502 targ->flags |= MPSSAS_TARGET_INREMOVAL;
504 cm = mpssas_alloc_tm(sc);
506 mps_printf(sc, "%s: command alloc failure\n", __func__);
510 mpssas_rescan_target(sc, targ);
512 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
513 memset(req, 0, sizeof(*req));
514 req->DevHandle = targ->handle;
515 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
516 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 /* SAS Hard Link Reset / SATA Link Reset */
519 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
523 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
524 cm->cm_complete = mpssas_remove_device;
525 cm->cm_complete_data = (void *)(uintptr_t)handle;
526 mps_map_command(sc, cm);
530 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
532 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
533 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
534 struct mpssas_target *targ;
535 struct mps_command *next_cm;
538 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
540 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
541 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
545 * Currently there should be no way we can hit this case. It only
546 * happens when we have a failure to allocate chain frames, and
547 * task management commands don't have S/G lists.
549 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
550 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
551 "This should not happen!\n", __func__, tm->cm_flags,
553 mpssas_free_tm(sc, tm);
558 /* XXX retry the remove after the diag reset completes? */
559 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
561 mpssas_free_tm(sc, tm);
565 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
566 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
567 reply->IOCStatus, handle);
568 mpssas_free_tm(sc, tm);
572 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
573 reply->TerminationCount);
574 mps_free_reply(sc, tm->cm_reply_data);
575 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
577 /* Reuse the existing command */
578 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
579 memset(req, 0, sizeof(*req));
580 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
581 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
582 req->DevHandle = handle;
584 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
585 tm->cm_complete = mpssas_remove_complete;
586 tm->cm_complete_data = (void *)(uintptr_t)handle;
588 mps_map_command(sc, tm);
590 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
592 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
595 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
596 ccb = tm->cm_complete_data;
597 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
598 mpssas_scsiio_complete(sc, tm);
603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
605 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
607 struct mpssas_target *targ;
609 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
611 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
612 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
615 * Currently there should be no way we can hit this case. It only
616 * happens when we have a failure to allocate chain frames, and
617 * task management commands don't have S/G lists.
619 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
620 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
621 "This should not happen!\n", __func__, tm->cm_flags,
623 mpssas_free_tm(sc, tm);
628 /* most likely a chip reset */
629 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
631 mpssas_free_tm(sc, tm);
635 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
636 handle, reply->IOCStatus);
639 * Don't clear target if remove fails because things will get confusing.
640 * Leave the devname and sasaddr intact so that we know to avoid reusing
641 * this target id if possible, and so we can assign the same target id
642 * to this device if it comes back in the future.
644 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
647 targ->encl_handle = 0x0;
648 targ->encl_slot = 0x0;
649 targ->exp_dev_handle = 0x0;
651 targ->linkrate = 0x0;
656 mpssas_free_tm(sc, tm);
660 mpssas_register_events(struct mps_softc *sc)
665 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
666 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
667 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
668 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
669 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
670 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
671 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
672 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
673 setbit(events, MPI2_EVENT_IR_VOLUME);
674 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
675 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
676 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
678 mps_register_events(sc, events, mpssas_evt_handler, NULL,
679 &sc->sassc->mpssas_eh);
685 mps_attach_sas(struct mps_softc *sc)
687 struct mpssas_softc *sassc;
688 #if __FreeBSD_version >= 1000006
693 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
695 sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
696 sassc->targets = kmalloc(sizeof(struct mpssas_target) *
697 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
701 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
702 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
707 unit = device_get_unit(sc->mps_dev);
708 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
709 unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
710 cam_simq_release(sassc->devq);
711 if (sassc->sim == NULL) {
712 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
717 TAILQ_INIT(&sassc->ev_queue);
719 /* Initialize taskqueue for Event Handling */
720 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
721 sassc->ev_tq = taskqueue_create("mps_taskq", M_INTWAIT | M_ZERO,
722 taskqueue_thread_enqueue, &sassc->ev_tq);
724 /* Run the task queue with lowest priority */
725 taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
726 device_get_nameunit(sc->mps_dev));
728 TAILQ_INIT(&sassc->ccb_scanq);
729 error = mps_kproc_create(mpssas_scanner_thread, sassc,
730 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
732 mps_printf(sc, "Error %d starting rescan thread\n", error);
737 sassc->flags |= MPSSAS_SCANTHREAD;
740 * XXX There should be a bus for every port on the adapter, but since
741 * we're just going to fake the topology for now, we'll pretend that
742 * everything is just a target on a single bus.
744 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
745 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
752 * Assume that discovery events will start right away. Freezing
753 * the simq will prevent the CAM boottime scanner from running
754 * before discovery is complete.
756 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
757 xpt_freeze_simq(sassc->sim, 1);
758 sc->sassc->startup_refcount = 0;
760 callout_init_mp(&sassc->discovery_callout);
761 sassc->discovery_timeouts = 0;
765 #if __FreeBSD_version >= 1000006
766 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
767 if (status != CAM_REQ_CMP) {
768 mps_printf(sc, "Error %#x registering async handler for "
769 "AC_ADVINFO_CHANGED events\n", status);
775 mpssas_register_events(sc);
783 mps_detach_sas(struct mps_softc *sc)
785 struct mpssas_softc *sassc;
787 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
789 if (sc->sassc == NULL)
793 mps_deregister_events(sc, sassc->mpssas_eh);
796 * Drain and free the event handling taskqueue with the lock
797 * unheld so that any parallel processing tasks drain properly
798 * without deadlocking.
800 if (sassc->ev_tq != NULL)
801 taskqueue_free(sassc->ev_tq);
803 /* Make sure CAM doesn't wedge if we had to bail out early. */
806 /* Deregister our async handler */
807 #if __FreeBSD_version >= 1000006
808 xpt_register_async(0, mpssas_async, sc, NULL);
811 if (sassc->flags & MPSSAS_IN_STARTUP)
812 xpt_release_simq(sassc->sim, 1);
814 if (sassc->sim != NULL) {
815 xpt_bus_deregister(cam_sim_path(sassc->sim));
816 cam_sim_free(sassc->sim);
819 if (sassc->flags & MPSSAS_SCANTHREAD) {
820 sassc->flags |= MPSSAS_SHUTDOWN;
821 wakeup(&sassc->ccb_scanq);
823 if (sassc->flags & MPSSAS_SCANTHREAD) {
824 lksleep(&sassc->flags, &sc->mps_lock, 0,
825 "mps_shutdown", 30 * hz);
830 kfree(sassc->targets, M_MPT2);
831 kfree(sassc, M_MPT2);
838 mpssas_discovery_end(struct mpssas_softc *sassc)
840 struct mps_softc *sc = sassc->sc;
842 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
844 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
845 callout_stop(&sassc->discovery_callout);
849 #if 0 /* XXX unused */
851 mpssas_discovery_timeout(void *data)
853 struct mpssas_softc *sassc = data;
854 struct mps_softc *sc;
857 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
861 "Timeout waiting for discovery, interrupts may not be working!\n");
862 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
864 /* Poll the hardware for events in case interrupts aren't working */
867 mps_printf(sassc->sc,
868 "Finished polling after discovery timeout at %d\n", ticks);
870 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
871 mpssas_discovery_end(sassc);
873 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
874 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
875 callout_reset(&sassc->discovery_callout,
876 MPSSAS_DISCOVERY_TIMEOUT * hz,
877 mpssas_discovery_timeout, sassc);
878 sassc->discovery_timeouts++;
880 mps_dprint(sassc->sc, MPS_FAULT,
881 "Discovery timed out, continuing.\n");
882 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
883 mpssas_discovery_end(sassc);
892 mpssas_action(struct cam_sim *sim, union ccb *ccb)
894 struct mpssas_softc *sassc;
896 sassc = cam_sim_softc(sim);
898 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
899 ccb->ccb_h.func_code);
900 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
902 switch (ccb->ccb_h.func_code) {
905 struct ccb_pathinq *cpi = &ccb->cpi;
907 cpi->version_num = 1;
908 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
909 cpi->target_sprt = 0;
910 cpi->hba_misc = PIM_NOBUSRESET;
911 cpi->hba_eng_cnt = 0;
912 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
914 cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
915 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
916 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
917 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
918 cpi->unit_number = cam_sim_unit(sim);
919 cpi->bus_id = cam_sim_bus(sim);
920 cpi->base_transfer_speed = 150000;
921 cpi->transport = XPORT_SAS;
922 cpi->transport_version = 0;
923 cpi->protocol = PROTO_SCSI;
924 cpi->protocol_version = SCSI_REV_SPC;
925 #if __FreeBSD_version >= 800001
927 * XXX KDM where does this number come from?
929 cpi->maxio = 256 * 1024;
931 cpi->ccb_h.status = CAM_REQ_CMP;
934 case XPT_GET_TRAN_SETTINGS:
936 struct ccb_trans_settings *cts;
937 struct ccb_trans_settings_sas *sas;
938 struct ccb_trans_settings_scsi *scsi;
939 struct mpssas_target *targ;
942 sas = &cts->xport_specific.sas;
943 scsi = &cts->proto_specific.scsi;
945 targ = &sassc->targets[cts->ccb_h.target_id];
946 if (targ->handle == 0x0) {
947 cts->ccb_h.status = CAM_TID_INVALID;
951 cts->protocol_version = SCSI_REV_SPC2;
952 cts->transport = XPORT_SAS;
953 cts->transport_version = 0;
955 sas->valid = CTS_SAS_VALID_SPEED;
956 switch (targ->linkrate) {
958 sas->bitrate = 150000;
961 sas->bitrate = 300000;
964 sas->bitrate = 600000;
970 cts->protocol = PROTO_SCSI;
971 scsi->valid = CTS_SCSI_VALID_TQ;
972 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
974 cts->ccb_h.status = CAM_REQ_CMP;
977 case XPT_CALC_GEOMETRY:
978 cam_calc_geometry(&ccb->ccg, /*extended*/1);
981 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
982 mpssas_action_resetdev(sassc, ccb);
987 mps_printf(sassc->sc, "mpssas_action faking success for "
989 ccb->ccb_h.status = CAM_REQ_CMP;
992 mpssas_action_scsiio(sassc, ccb);
994 #if __FreeBSD_version >= 900026
996 mpssas_action_smpio(sassc, ccb);
1000 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1008 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1009 target_id_t target_id, lun_id_t lun_id)
1011 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1012 struct cam_path *path;
1014 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
1015 ac_code, target_id, lun_id);
1017 if (xpt_create_path(&path, NULL,
1018 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1019 mps_printf(sc, "unable to create path for reset "
1024 xpt_async(ac_code, path, NULL);
1025 xpt_free_path(path);
1029 mpssas_complete_all_commands(struct mps_softc *sc)
1031 struct mps_command *cm;
1035 mps_printf(sc, "%s\n", __func__);
1036 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1038 /* complete all commands with a NULL reply */
1039 for (i = 1; i < sc->num_reqs; i++) {
1040 cm = &sc->commands[i];
1041 cm->cm_reply = NULL;
1044 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1045 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1047 if (cm->cm_complete != NULL) {
1048 mpssas_log_command(cm,
1049 "completing cm %p state %x ccb %p for diag reset\n",
1050 cm, cm->cm_state, cm->cm_ccb);
1052 cm->cm_complete(sc, cm);
1056 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1057 mpssas_log_command(cm,
1058 "waking up cm %p state %x ccb %p for diag reset\n",
1059 cm, cm->cm_state, cm->cm_ccb);
1064 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1065 /* this should never happen, but if it does, log */
1066 mpssas_log_command(cm,
1067 "cm %p state %x flags 0x%x ccb %p during diag "
1068 "reset\n", cm, cm->cm_state, cm->cm_flags,
1075 mpssas_handle_reinit(struct mps_softc *sc)
1079 /* Go back into startup mode and freeze the simq, so that CAM
1080 * doesn't send any commands until after we've rediscovered all
1081 * targets and found the proper device handles for them.
1083 * After the reset, portenable will trigger discovery, and after all
1084 * discovery-related activities have finished, the simq will be
1087 mps_printf(sc, "%s startup\n", __func__);
1088 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1089 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1090 xpt_freeze_simq(sc->sassc->sim, 1);
1092 /* notify CAM of a bus reset */
1093 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1096 /* complete and cleanup after all outstanding commands */
1097 mpssas_complete_all_commands(sc);
1099 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1100 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1103 * The simq was explicitly frozen above, so set the refcount to 0.
1104 * The simq will be explicitly released after port enable completes.
1106 sc->sassc->startup_refcount = 0;
1108 /* zero all the target handles, since they may change after the
1109 * reset, and we have to rediscover all the targets and use the new
1112 for (i = 0; i < sc->facts->MaxTargets; i++) {
1113 if (sc->sassc->targets[i].outstanding != 0)
1114 mps_printf(sc, "target %u outstanding %u\n",
1115 i, sc->sassc->targets[i].outstanding);
1116 sc->sassc->targets[i].handle = 0x0;
1117 sc->sassc->targets[i].exp_dev_handle = 0x0;
1118 sc->sassc->targets[i].outstanding = 0;
1119 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1123 mpssas_tm_timeout(void *data)
1125 struct mps_command *tm = data;
1126 struct mps_softc *sc = tm->cm_sc;
1129 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1135 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1137 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1138 unsigned int cm_count = 0;
1139 struct mps_command *cm;
1140 struct mpssas_target *targ;
1142 callout_stop(&tm->cm_callout);
1144 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1148 * Currently there should be no way we can hit this case. It only
1149 * happens when we have a failure to allocate chain frames, and
1150 * task management commands don't have S/G lists.
1152 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1153 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1154 "This should not happen!\n", __func__, tm->cm_flags);
1155 mpssas_free_tm(sc, tm);
1159 if (reply == NULL) {
1160 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1161 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1162 /* this completion was due to a reset, just cleanup */
1163 targ->flags &= ~MPSSAS_TARGET_INRESET;
1165 mpssas_free_tm(sc, tm);
1168 /* we should have gotten a reply. */
1174 mpssas_log_command(tm,
1175 "logical unit reset status 0x%x code 0x%x count %u\n",
1176 reply->IOCStatus, reply->ResponseCode,
1177 reply->TerminationCount);
1179 /* See if there are any outstanding commands for this LUN.
1180 * This could be made more efficient by using a per-LU data
1181 * structure of some sort.
1183 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1184 if (cm->cm_lun == tm->cm_lun)
1188 if (cm_count == 0) {
1189 mpssas_log_command(tm,
1190 "logical unit %u finished recovery after reset\n",
1193 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1196 /* we've finished recovery for this logical unit. check and
1197 * see if some other logical unit has a timedout command
1198 * that needs to be processed.
1200 cm = TAILQ_FIRST(&targ->timedout_commands);
1202 mpssas_send_abort(sc, tm, cm);
1206 mpssas_free_tm(sc, tm);
1210 /* if we still have commands for this LUN, the reset
1211 * effectively failed, regardless of the status reported.
1212 * Escalate to a target reset.
1214 mpssas_log_command(tm,
1215 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1217 mpssas_send_reset(sc, tm,
1218 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1223 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1225 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1226 struct mpssas_target *targ;
1228 callout_stop(&tm->cm_callout);
1230 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1234 * Currently there should be no way we can hit this case. It only
1235 * happens when we have a failure to allocate chain frames, and
1236 * task management commands don't have S/G lists.
1238 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1239 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1240 "This should not happen!\n", __func__, tm->cm_flags);
1241 mpssas_free_tm(sc, tm);
1245 if (reply == NULL) {
1246 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1247 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1248 /* this completion was due to a reset, just cleanup */
1249 targ->flags &= ~MPSSAS_TARGET_INRESET;
1251 mpssas_free_tm(sc, tm);
1254 /* we should have gotten a reply. */
1260 mpssas_log_command(tm,
1261 "target reset status 0x%x code 0x%x count %u\n",
1262 reply->IOCStatus, reply->ResponseCode,
1263 reply->TerminationCount);
1265 targ->flags &= ~MPSSAS_TARGET_INRESET;
1267 if (targ->outstanding == 0) {
1268 /* we've finished recovery for this target and all
1269 * of its logical units.
1271 mpssas_log_command(tm,
1272 "recovery finished after target reset\n");
1274 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1278 mpssas_free_tm(sc, tm);
1281 /* after a target reset, if this target still has
1282 * outstanding commands, the reset effectively failed,
1283 * regardless of the status reported. escalate.
1285 mpssas_log_command(tm,
1286 "target reset complete for tm %p, but still have %u command(s)\n",
1287 tm, targ->outstanding);
1292 #define MPS_RESET_TIMEOUT 30
1295 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1297 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1298 struct mpssas_target *target;
1301 target = tm->cm_targ;
1302 if (target->handle == 0) {
1303 mps_printf(sc, "%s null devhandle for target_id %d\n",
1304 __func__, target->tid);
1308 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1309 req->DevHandle = target->handle;
1310 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1311 req->TaskType = type;
1313 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1314 /* XXX Need to handle invalid LUNs */
1315 MPS_SET_LUN(req->LUN, tm->cm_lun);
1316 tm->cm_targ->logical_unit_resets++;
1317 mpssas_log_command(tm, "sending logical unit reset\n");
1318 tm->cm_complete = mpssas_logical_unit_reset_complete;
1320 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1321 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1322 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1323 tm->cm_targ->target_resets++;
1324 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1325 mpssas_log_command(tm, "sending target reset\n");
1326 tm->cm_complete = mpssas_target_reset_complete;
1329 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1334 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1335 tm->cm_complete_data = (void *)tm;
1337 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1338 mpssas_tm_timeout, tm);
1340 err = mps_map_command(sc, tm);
1342 mpssas_log_command(tm,
1343 "error %d sending reset type %u\n",
1351 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1353 struct mps_command *cm;
1354 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1355 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1356 struct mpssas_target *targ;
1358 callout_stop(&tm->cm_callout);
1360 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1361 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1365 * Currently there should be no way we can hit this case. It only
1366 * happens when we have a failure to allocate chain frames, and
1367 * task management commands don't have S/G lists.
1369 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1370 mpssas_log_command(tm,
1371 "cm_flags = %#x for abort %p TaskMID %u!\n",
1372 tm->cm_flags, tm, req->TaskMID);
1373 mpssas_free_tm(sc, tm);
1377 if (reply == NULL) {
1378 mpssas_log_command(tm,
1379 "NULL abort reply for tm %p TaskMID %u\n",
1381 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1382 /* this completion was due to a reset, just cleanup */
1384 mpssas_free_tm(sc, tm);
1387 /* we should have gotten a reply. */
1393 mpssas_log_command(tm,
1394 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1396 reply->IOCStatus, reply->ResponseCode,
1397 reply->TerminationCount);
1399 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1401 /* if there are no more timedout commands, we're done with
1402 * error recovery for this target.
1404 mpssas_log_command(tm,
1405 "finished recovery after aborting TaskMID %u\n",
1409 mpssas_free_tm(sc, tm);
1411 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1412 /* abort success, but we have more timedout commands to abort */
1413 mpssas_log_command(tm,
1414 "continuing recovery after aborting TaskMID %u\n",
1417 mpssas_send_abort(sc, tm, cm);
1420 /* we didn't get a command completion, so the abort
1421 * failed as far as we're concerned. escalate.
1423 mpssas_log_command(tm,
1424 "abort failed for TaskMID %u tm %p\n",
1427 mpssas_send_reset(sc, tm,
1428 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1432 #define MPS_ABORT_TIMEOUT 5
1435 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1437 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1438 struct mpssas_target *targ;
1442 if (targ->handle == 0) {
1443 mps_printf(sc, "%s null devhandle for target_id %d\n",
1444 __func__, cm->cm_ccb->ccb_h.target_id);
1448 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1449 req->DevHandle = targ->handle;
1450 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1451 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1453 /* XXX Need to handle invalid LUNs */
1454 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1456 req->TaskMID = cm->cm_desc.Default.SMID;
1459 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1460 tm->cm_complete = mpssas_abort_complete;
1461 tm->cm_complete_data = (void *)tm;
1462 tm->cm_targ = cm->cm_targ;
1463 tm->cm_lun = cm->cm_lun;
1465 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1466 mpssas_tm_timeout, tm);
1470 err = mps_map_command(sc, tm);
1472 mpssas_log_command(tm,
1473 "error %d sending abort for cm %p SMID %u\n",
1474 err, cm, req->TaskMID);
1480 mpssas_scsiio_timeout(void *data)
1482 struct mps_softc *sc;
1483 struct mps_command *cm;
1484 struct mpssas_target *targ;
1486 cm = (struct mps_command *)data;
1489 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1491 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1494 * Run the interrupt handler to make sure it's not pending. This
1495 * isn't perfect because the command could have already completed
1496 * and been re-used, though this is unlikely.
1498 mps_intr_locked(sc);
1499 if (cm->cm_state == MPS_CM_STATE_FREE) {
1500 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1504 if (cm->cm_ccb == NULL) {
1505 mps_printf(sc, "command timeout with NULL ccb\n");
1509 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1515 /* XXX first, check the firmware state, to see if it's still
1516 * operational. if not, do a diag reset.
1519 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1520 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1521 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1523 if (targ->tm != NULL) {
1524 /* target already in recovery, just queue up another
1525 * timedout command to be processed later.
1527 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1530 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1531 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1534 /* start recovery by aborting the first timedout command */
1535 mpssas_send_abort(sc, targ->tm, cm);
1538 /* XXX queue this target up for recovery once a TM becomes
1539 * available. The firmware only has a limited number of
1540 * HighPriority credits for the high priority requests used
1541 * for task management, and we ran out.
1543 * Isilon: don't worry about this for now, since we have
1544 * more credits than disks in an enclosure, and limit
1545 * ourselves to one TM per target for recovery.
1547 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1554 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1556 MPI2_SCSI_IO_REQUEST *req;
1557 struct ccb_scsiio *csio;
1558 struct mps_softc *sc;
1559 struct mpssas_target *targ;
1560 struct mpssas_lun *lun;
1561 struct mps_command *cm;
1562 uint8_t i, lba_byte, *ref_tag_addr;
1563 uint16_t eedp_flags;
1566 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1567 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1570 targ = &sassc->targets[csio->ccb_h.target_id];
1571 if (targ->handle == 0x0) {
1572 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1573 __func__, csio->ccb_h.target_id);
1574 csio->ccb_h.status = CAM_TID_INVALID;
1578 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1579 mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
1580 __func__, csio->ccb_h.target_id);
1581 csio->ccb_h.status = CAM_TID_INVALID;
1587 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1588 * that the volume has timed out. We want volumes to be enumerated
1589 * until they are deleted/removed, not just failed.
1591 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1592 if (targ->devinfo == 0)
1593 csio->ccb_h.status = CAM_REQ_CMP;
1595 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1600 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1601 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1602 csio->ccb_h.status = CAM_TID_INVALID;
1607 cm = mps_alloc_command(sc);
1609 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1610 xpt_freeze_simq(sassc->sim, 1);
1611 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1613 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1614 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1619 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1620 bzero(req, sizeof(*req));
1621 req->DevHandle = targ->handle;
1622 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1624 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1625 req->SenseBufferLength = MPS_SENSE_LEN;
1627 req->ChainOffset = 0;
1628 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1633 req->DataLength = csio->dxfer_len;
1634 req->BidirectionalDataLength = 0;
1635 req->IoFlags = csio->cdb_len;
1638 /* Note: BiDirectional transfers are not supported */
1639 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1641 req->Control = MPI2_SCSIIO_CONTROL_READ;
1642 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1645 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1646 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1650 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1655 * It looks like the hardware doesn't require an explicit tag
1656 * number for each transaction. SAM Task Management not supported
1659 switch (csio->tag_action) {
1660 case MSG_HEAD_OF_Q_TAG:
1661 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1663 case MSG_ORDERED_Q_TAG:
1664 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1667 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1669 case CAM_TAG_ACTION_NONE:
1670 case MSG_SIMPLE_Q_TAG:
1672 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1675 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1677 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1678 mps_free_command(sc, cm);
1679 ccb->ccb_h.status = CAM_LUN_INVALID;
1684 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1685 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1687 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1688 req->IoFlags = csio->cdb_len;
1691 * Check if EEDP is supported and enabled. If it is then check if the
1692 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1693 * is formatted for EEDP support. If all of this is true, set CDB up
1694 * for EEDP transfer.
1696 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1697 if (sc->eedp_enabled && eedp_flags) {
1698 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1699 if (lun->lun_id == csio->ccb_h.target_lun) {
1704 if ((lun != NULL) && (lun->eedp_formatted)) {
1705 req->EEDPBlockSize = lun->eedp_block_size;
1706 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1707 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1708 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1709 req->EEDPFlags = eedp_flags;
1712 * If CDB less than 32, fill in Primary Ref Tag with
1713 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1714 * already there. Also, set protection bit. FreeBSD
1715 * currently does not support CDBs bigger than 16, but
1716 * the code doesn't hurt, and will be here for the
1719 if (csio->cdb_len != 32) {
1720 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1721 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1722 PrimaryReferenceTag;
1723 for (i = 0; i < 4; i++) {
1725 req->CDB.CDB32[lba_byte + i];
1728 req->CDB.EEDP32.PrimaryApplicationTagMask =
1730 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1734 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1735 req->EEDPFlags = eedp_flags;
1736 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1742 cm->cm_data = csio->data_ptr;
1743 cm->cm_length = csio->dxfer_len;
1744 cm->cm_sge = &req->SGL;
1745 cm->cm_sglsize = (32 - 24) * 4;
1746 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1747 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1748 cm->cm_complete = mpssas_scsiio_complete;
1749 cm->cm_complete_data = ccb;
1751 cm->cm_lun = csio->ccb_h.target_lun;
1755 * If HBA is a WD and the command is not for a retry, try to build a
1756 * direct I/O message. If failed, or the command is for a retry, send
1757 * the I/O to the IR volume itself.
1759 if (sc->WD_valid_config) {
1760 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1761 mpssas_direct_drive_io(sassc, cm, ccb);
1763 ccb->ccb_h.status = CAM_REQ_INPROG;
1767 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1768 mpssas_scsiio_timeout, cm);
1771 targ->outstanding++;
1772 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1774 if ((sc->mps_debug & MPS_TRACE) != 0)
1775 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1776 __func__, cm, ccb, targ->outstanding);
1778 mps_map_command(sc, cm);
1783 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1785 MPI2_SCSI_IO_REPLY *rep;
1787 struct ccb_scsiio *csio;
1788 struct mpssas_softc *sassc;
1789 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1790 u8 *TLR_bits, TLR_on;
1794 mps_dprint(sc, MPS_TRACE,
1795 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1796 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1797 cm->cm_targ->outstanding);
1799 callout_stop(&cm->cm_callout);
1800 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1803 ccb = cm->cm_complete_data;
1805 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1807 * XXX KDM if the chain allocation fails, does it matter if we do
1808 * the sync and unload here? It is simpler to do it in every case,
1809 * assuming it doesn't cause problems.
1811 if (cm->cm_data != NULL) {
1812 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1813 dir = BUS_DMASYNC_POSTREAD;
1814 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1815 dir = BUS_DMASYNC_POSTWRITE;
1816 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1817 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1820 cm->cm_targ->completed++;
1821 cm->cm_targ->outstanding--;
1822 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1824 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1825 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1826 if (cm->cm_reply != NULL)
1827 mpssas_log_command(cm,
1828 "completed timedout cm %p ccb %p during recovery "
1829 "ioc %x scsi %x state %x xfer %u\n",
1831 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1832 rep->TransferCount);
1834 mpssas_log_command(cm,
1835 "completed timedout cm %p ccb %p during recovery\n",
1837 } else if (cm->cm_targ->tm != NULL) {
1838 if (cm->cm_reply != NULL)
1839 mpssas_log_command(cm,
1840 "completed cm %p ccb %p during recovery "
1841 "ioc %x scsi %x state %x xfer %u\n",
1843 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1844 rep->TransferCount);
1846 mpssas_log_command(cm,
1847 "completed cm %p ccb %p during recovery\n",
1849 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1850 mpssas_log_command(cm,
1851 "reset completed cm %p ccb %p\n",
1855 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1857 * We ran into an error after we tried to map the command,
1858 * so we're getting a callback without queueing the command
1859 * to the hardware. So we set the status here, and it will
1860 * be retained below. We'll go through the "fast path",
1861 * because there can be no reply when we haven't actually
1862 * gone out to the hardware.
1864 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1867 * Currently the only error included in the mask is
1868 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1869 * chain frames. We need to freeze the queue until we get
1870 * a command that completed without this error, which will
1871 * hopefully have some chain frames attached that we can
1872 * use. If we wanted to get smarter about it, we would
1873 * only unfreeze the queue in this condition when we're
1874 * sure that we're getting some chain frames back. That's
1875 * probably unnecessary.
1877 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1878 xpt_freeze_simq(sassc->sim, 1);
1879 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1880 mps_dprint(sc, MPS_INFO, "Error sending command, "
1881 "freezing SIM queue\n");
1885 /* Take the fast path to completion */
1886 if (cm->cm_reply == NULL) {
1887 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1888 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1889 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1891 ccb->ccb_h.status = CAM_REQ_CMP;
1892 ccb->csio.scsi_status = SCSI_STATUS_OK;
1894 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1895 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1896 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1897 mps_dprint(sc, MPS_INFO,
1898 "Unfreezing SIM queue\n");
1903 * There are two scenarios where the status won't be
1904 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1905 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1907 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1909 * Freeze the dev queue so that commands are
1910 * executed in the correct order with after error
1913 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1914 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1916 mps_free_command(sc, cm);
1921 if (sc->mps_debug & MPS_TRACE)
1922 mpssas_log_command(cm,
1923 "ioc %x scsi %x state %x xfer %u\n",
1924 rep->IOCStatus, rep->SCSIStatus,
1925 rep->SCSIState, rep->TransferCount);
1928 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1929 * Volume if an error occurred (normal I/O retry). Use the original
1930 * CCB, but set a flag that this will be a retry so that it's sent to
1931 * the original volume. Free the command but reuse the CCB.
1933 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1934 mps_free_command(sc, cm);
1935 ccb->ccb_h.status = MPS_WD_RETRY;
1936 mpssas_action_scsiio(sassc, ccb);
1940 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1941 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1942 csio->resid = cm->cm_length - rep->TransferCount;
1944 case MPI2_IOCSTATUS_SUCCESS:
1945 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1947 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1948 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1949 mpssas_log_command(cm, "recovered error\n");
1951 /* Completion failed at the transport level. */
1952 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1953 MPI2_SCSI_STATE_TERMINATED)) {
1954 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1958 /* In a modern packetized environment, an autosense failure
1959 * implies that there's not much else that can be done to
1960 * recover the command.
1962 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1963 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1968 * CAM doesn't care about SAS Response Info data, but if this is
1969 * the state check if TLR should be done. If not, clear the
1970 * TLR_bits for the target.
1972 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1973 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1974 MPS_SCSI_RI_INVALID_FRAME)) {
1975 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1976 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1980 * Intentionally override the normal SCSI status reporting
1981 * for these two cases. These are likely to happen in a
1982 * multi-initiator environment, and we want to make sure that
1983 * CAM retries these commands rather than fail them.
1985 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1986 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1987 ccb->ccb_h.status = CAM_REQ_ABORTED;
1991 /* Handle normal status and sense */
1992 csio->scsi_status = rep->SCSIStatus;
1993 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1994 ccb->ccb_h.status = CAM_REQ_CMP;
1996 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1998 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1999 int sense_len, returned_sense_len;
2001 returned_sense_len = min(rep->SenseCount,
2002 sizeof(struct scsi_sense_data));
2003 if (returned_sense_len < ccb->csio.sense_len)
2004 ccb->csio.sense_resid = ccb->csio.sense_len -
2007 ccb->csio.sense_resid = 0;
2009 sense_len = min(returned_sense_len,
2010 ccb->csio.sense_len - ccb->csio.sense_resid);
2011 bzero(&ccb->csio.sense_data,
2012 sizeof(ccb->csio.sense_data));
2013 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2014 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2018 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2019 * and it's page code 0 (Supported Page List), and there is
2020 * inquiry data, and this is for a sequential access device, and
2021 * the device is an SSP target, and TLR is supported by the
2022 * controller, turn the TLR_bits value ON if page 0x90 is
2025 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2026 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2027 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2028 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2029 T_SEQUENTIAL) && (sc->control_TLR) &&
2030 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2031 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2032 vpd_list = (struct scsi_vpd_supported_page_list *)
2034 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2036 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2037 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2038 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2039 csio->cdb_io.cdb_bytes[4];
2040 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2041 if (vpd_list->list[i] == 0x90) {
2048 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2049 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2051 * If devinfo is 0 this will be a volume. In that case don't
2052 * tell CAM that the volume is not there. We want volumes to
2053 * be enumerated until they are deleted/removed, not just
2056 if (cm->cm_targ->devinfo == 0)
2057 ccb->ccb_h.status = CAM_REQ_CMP;
2059 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2061 case MPI2_IOCSTATUS_INVALID_SGL:
2062 mps_print_scsiio_cmd(sc, cm);
2063 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2065 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2067 * This is one of the responses that comes back when an I/O
2068 * has been aborted. If it is because of a timeout that we
2069 * initiated, just set the status to CAM_CMD_TIMEOUT.
2070 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2071 * command is the same (it gets retried, subject to the
2072 * retry counter), the only difference is what gets printed
2075 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2076 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2078 ccb->ccb_h.status = CAM_REQ_ABORTED;
2080 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2081 /* resid is ignored for this condition */
2083 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2085 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2086 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2088 * Since these are generally external (i.e. hopefully
2089 * transient transport-related) errors, retry these without
2090 * decrementing the retry count.
2092 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2093 mpssas_log_command(cm,
2094 "terminated ioc %x scsi %x state %x xfer %u\n",
2095 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2096 rep->TransferCount);
2098 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2099 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2100 case MPI2_IOCSTATUS_INVALID_VPID:
2101 case MPI2_IOCSTATUS_INVALID_FIELD:
2102 case MPI2_IOCSTATUS_INVALID_STATE:
2103 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2104 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2105 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2106 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2107 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2109 mpssas_log_command(cm,
2110 "completed ioc %x scsi %x state %x xfer %u\n",
2111 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2112 rep->TransferCount);
2113 csio->resid = cm->cm_length;
2114 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2118 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2119 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2120 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2121 mps_dprint(sc, MPS_INFO, "Command completed, "
2122 "unfreezing SIM queue\n");
2125 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2126 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2127 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2130 mps_free_command(sc, cm);
2135 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2137 pMpi2SCSIIORequest_t pIO_req;
2138 struct mps_softc *sc = sassc->sc;
2140 uint32_t physLBA, stripe_offset, stripe_unit;
2141 uint32_t io_size, column;
2142 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2145 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2146 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2147 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2148 * bit different than the 10/16 CDBs, handle them separately.
2150 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2151 CDB = pIO_req->CDB.CDB32;
2154 * Handle 6 byte CDBs.
2156 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2157 (CDB[0] == WRITE_6))) {
2159 * Get the transfer size in blocks.
2161 io_size = (cm->cm_length >> sc->DD_block_exponent);
2164 * Get virtual LBA given in the CDB.
2166 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2167 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2170 * Check that LBA range for I/O does not exceed volume's
2173 if ((virtLBA + (uint64_t)io_size - 1) <=
2176 * Check if the I/O crosses a stripe boundary. If not,
2177 * translate the virtual LBA to a physical LBA and set
2178 * the DevHandle for the PhysDisk to be used. If it
2179 * does cross a boundry, do normal I/O. To get the
2180 * right DevHandle to use, get the map number for the
2181 * column, then use that map number to look up the
2182 * DevHandle of the PhysDisk.
2184 stripe_offset = (uint32_t)virtLBA &
2185 (sc->DD_stripe_size - 1);
2186 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2187 physLBA = (uint32_t)virtLBA >>
2188 sc->DD_stripe_exponent;
2189 stripe_unit = physLBA / sc->DD_num_phys_disks;
2190 column = physLBA % sc->DD_num_phys_disks;
2191 pIO_req->DevHandle =
2192 sc->DD_column_map[column].dev_handle;
2193 cm->cm_desc.SCSIIO.DevHandle =
2196 physLBA = (stripe_unit <<
2197 sc->DD_stripe_exponent) + stripe_offset;
2198 ptrLBA = &pIO_req->CDB.CDB32[1];
2199 physLBA_byte = (uint8_t)(physLBA >> 16);
2200 *ptrLBA = physLBA_byte;
2201 ptrLBA = &pIO_req->CDB.CDB32[2];
2202 physLBA_byte = (uint8_t)(physLBA >> 8);
2203 *ptrLBA = physLBA_byte;
2204 ptrLBA = &pIO_req->CDB.CDB32[3];
2205 physLBA_byte = (uint8_t)physLBA;
2206 *ptrLBA = physLBA_byte;
2209 * Set flag that Direct Drive I/O is
2212 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2219 * Handle 10 or 16 byte CDBs.
2221 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2222 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2223 (CDB[0] == WRITE_16))) {
2225 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2226 * are 0. If not, this is accessing beyond 2TB so handle it in
2227 * the else section. 10-byte CDB's are OK.
2229 if ((CDB[0] < READ_16) ||
2230 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2232 * Get the transfer size in blocks.
2234 io_size = (cm->cm_length >> sc->DD_block_exponent);
2237 * Get virtual LBA. Point to correct lower 4 bytes of
2238 * LBA in the CDB depending on command.
2240 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2241 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2242 ((uint64_t)CDB[lba_idx + 1] << 16) |
2243 ((uint64_t)CDB[lba_idx + 2] << 8) |
2244 (uint64_t)CDB[lba_idx + 3];
2247 * Check that LBA range for I/O does not exceed volume's
2250 if ((virtLBA + (uint64_t)io_size - 1) <=
2253 * Check if the I/O crosses a stripe boundary.
2254 * If not, translate the virtual LBA to a
2255 * physical LBA and set the DevHandle for the
2256 * PhysDisk to be used. If it does cross a
2257 * boundry, do normal I/O. To get the right
2258 * DevHandle to use, get the map number for the
2259 * column, then use that map number to look up
2260 * the DevHandle of the PhysDisk.
2262 stripe_offset = (uint32_t)virtLBA &
2263 (sc->DD_stripe_size - 1);
2264 if ((stripe_offset + io_size) <=
2265 sc->DD_stripe_size) {
2266 physLBA = (uint32_t)virtLBA >>
2267 sc->DD_stripe_exponent;
2268 stripe_unit = physLBA /
2269 sc->DD_num_phys_disks;
2271 sc->DD_num_phys_disks;
2272 pIO_req->DevHandle =
2273 sc->DD_column_map[column].
2275 cm->cm_desc.SCSIIO.DevHandle =
2278 physLBA = (stripe_unit <<
2279 sc->DD_stripe_exponent) +
2282 &pIO_req->CDB.CDB32[lba_idx];
2283 physLBA_byte = (uint8_t)(physLBA >> 24);
2284 *ptrLBA = physLBA_byte;
2286 &pIO_req->CDB.CDB32[lba_idx + 1];
2287 physLBA_byte = (uint8_t)(physLBA >> 16);
2288 *ptrLBA = physLBA_byte;
2290 &pIO_req->CDB.CDB32[lba_idx + 2];
2291 physLBA_byte = (uint8_t)(physLBA >> 8);
2292 *ptrLBA = physLBA_byte;
2294 &pIO_req->CDB.CDB32[lba_idx + 3];
2295 physLBA_byte = (uint8_t)physLBA;
2296 *ptrLBA = physLBA_byte;
2299 * Set flag that Direct Drive I/O is
2302 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2307 * 16-byte CDB and the upper 4 bytes of the CDB are not
2308 * 0. Get the transfer size in blocks.
2310 io_size = (cm->cm_length >> sc->DD_block_exponent);
2315 virtLBA = ((uint64_t)CDB[2] << 54) |
2316 ((uint64_t)CDB[3] << 48) |
2317 ((uint64_t)CDB[4] << 40) |
2318 ((uint64_t)CDB[5] << 32) |
2319 ((uint64_t)CDB[6] << 24) |
2320 ((uint64_t)CDB[7] << 16) |
2321 ((uint64_t)CDB[8] << 8) |
2325 * Check that LBA range for I/O does not exceed volume's
2328 if ((virtLBA + (uint64_t)io_size - 1) <=
2331 * Check if the I/O crosses a stripe boundary.
2332 * If not, translate the virtual LBA to a
2333 * physical LBA and set the DevHandle for the
2334 * PhysDisk to be used. If it does cross a
2335 * boundry, do normal I/O. To get the right
2336 * DevHandle to use, get the map number for the
2337 * column, then use that map number to look up
2338 * the DevHandle of the PhysDisk.
2340 stripe_offset = (uint32_t)virtLBA &
2341 (sc->DD_stripe_size - 1);
2342 if ((stripe_offset + io_size) <=
2343 sc->DD_stripe_size) {
2344 physLBA = (uint32_t)(virtLBA >>
2345 sc->DD_stripe_exponent);
2346 stripe_unit = physLBA /
2347 sc->DD_num_phys_disks;
2349 sc->DD_num_phys_disks;
2350 pIO_req->DevHandle =
2351 sc->DD_column_map[column].
2353 cm->cm_desc.SCSIIO.DevHandle =
2356 physLBA = (stripe_unit <<
2357 sc->DD_stripe_exponent) +
2361 * Set upper 4 bytes of LBA to 0. We
2362 * assume that the phys disks are less
2363 * than 2 TB's in size. Then, set the
2366 pIO_req->CDB.CDB32[2] = 0;
2367 pIO_req->CDB.CDB32[3] = 0;
2368 pIO_req->CDB.CDB32[4] = 0;
2369 pIO_req->CDB.CDB32[5] = 0;
2370 ptrLBA = &pIO_req->CDB.CDB32[6];
2371 physLBA_byte = (uint8_t)(physLBA >> 24);
2372 *ptrLBA = physLBA_byte;
2373 ptrLBA = &pIO_req->CDB.CDB32[7];
2374 physLBA_byte = (uint8_t)(physLBA >> 16);
2375 *ptrLBA = physLBA_byte;
2376 ptrLBA = &pIO_req->CDB.CDB32[8];
2377 physLBA_byte = (uint8_t)(physLBA >> 8);
2378 *ptrLBA = physLBA_byte;
2379 ptrLBA = &pIO_req->CDB.CDB32[9];
2380 physLBA_byte = (uint8_t)physLBA;
2381 *ptrLBA = physLBA_byte;
2384 * Set flag that Direct Drive I/O is
2387 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2394 #if __FreeBSD_version >= 900026
2396 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2398 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2399 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2403 ccb = cm->cm_complete_data;
2406 * Currently there should be no way we can hit this case. It only
2407 * happens when we have a failure to allocate chain frames, and SMP
2408 * commands require two S/G elements only. That should be handled
2409 * in the standard request size.
2411 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2412 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2413 __func__, cm->cm_flags);
2414 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2418 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2420 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2421 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2425 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2426 sasaddr = le32toh(req->SASAddress.Low);
2427 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2429 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2430 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2431 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2432 __func__, rpl->IOCStatus, rpl->SASStatus);
2433 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2437 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2438 "%#jx completed successfully\n", __func__,
2439 (uintmax_t)sasaddr);
2441 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2442 ccb->ccb_h.status = CAM_REQ_CMP;
2444 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2448 * We sync in both directions because we had DMAs in the S/G list
2449 * in both directions.
2451 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2452 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2453 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2454 mps_free_command(sc, cm);
2459 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2461 struct mps_command *cm;
2462 uint8_t *request, *response;
2463 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2464 struct mps_softc *sc;
2471 * XXX We don't yet support physical addresses here.
2473 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2474 mps_printf(sc, "%s: physical addresses not supported\n",
2476 ccb->ccb_h.status = CAM_REQ_INVALID;
2482 * If the user wants to send an S/G list, check to make sure they
2483 * have single buffers.
2485 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2487 * The chip does not support more than one buffer for the
2488 * request or response.
2490 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2491 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2492 mps_printf(sc, "%s: multiple request or response "
2493 "buffer segments not supported for SMP\n",
2495 ccb->ccb_h.status = CAM_REQ_INVALID;
2501 * The CAM_SCATTER_VALID flag was originally implemented
2502 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2503 * We have two. So, just take that flag to mean that we
2504 * might have S/G lists, and look at the S/G segment count
2505 * to figure out whether that is the case for each individual
2508 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2509 bus_dma_segment_t *req_sg;
2511 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2512 request = (uint8_t *)req_sg[0].ds_addr;
2514 request = ccb->smpio.smp_request;
2516 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2517 bus_dma_segment_t *rsp_sg;
2519 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2520 response = (uint8_t *)rsp_sg[0].ds_addr;
2522 response = ccb->smpio.smp_response;
2524 request = ccb->smpio.smp_request;
2525 response = ccb->smpio.smp_response;
2528 cm = mps_alloc_command(sc);
2530 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2531 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2536 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2537 bzero(req, sizeof(*req));
2538 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2540 /* Allow the chip to use any route to this SAS address. */
2541 req->PhysicalPort = 0xff;
2543 req->RequestDataLength = ccb->smpio.smp_request_len;
2545 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2547 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2548 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2550 mpi_init_sge(cm, req, &req->SGL);
2553 * Set up a uio to pass into mps_map_command(). This allows us to
2554 * do one map command, and one busdma call in there.
2556 cm->cm_uio.uio_iov = cm->cm_iovec;
2557 cm->cm_uio.uio_iovcnt = 2;
2558 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2561 * The read/write flag isn't used by busdma, but set it just in
2562 * case. This isn't exactly accurate, either, since we're going in
2565 cm->cm_uio.uio_rw = UIO_WRITE;
2567 cm->cm_iovec[0].iov_base = request;
2568 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2569 cm->cm_iovec[1].iov_base = response;
2570 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2572 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2573 cm->cm_iovec[1].iov_len;
2576 * Trigger a warning message in mps_data_cb() for the user if we
2577 * wind up exceeding two S/G segments. The chip expects one
2578 * segment for the request and another for the response.
2580 cm->cm_max_segs = 2;
2582 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2583 cm->cm_complete = mpssas_smpio_complete;
2584 cm->cm_complete_data = ccb;
2587 * Tell the mapping code that we're using a uio, and that this is
2588 * an SMP passthrough request. There is a little special-case
2589 * logic there (in mps_data_cb()) to handle the bidirectional
2592 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2593 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2595 /* The chip data format is little endian. */
2596 req->SASAddress.High = htole32(sasaddr >> 32);
2597 req->SASAddress.Low = htole32(sasaddr);
2600 * XXX Note that we don't have a timeout/abort mechanism here.
2601 * From the manual, it looks like task management requests only
2602 * work for SCSI IO and SATA passthrough requests. We may need to
2603 * have a mechanism to retry requests in the event of a chip reset
2604 * at least. Hopefully the chip will insure that any errors short
2605 * of that are relayed back to the driver.
2607 error = mps_map_command(sc, cm);
2608 if ((error != 0) && (error != EINPROGRESS)) {
2609 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2617 mps_free_command(sc, cm);
2618 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2625 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2627 struct mps_softc *sc;
2628 struct mpssas_target *targ;
2629 uint64_t sasaddr = 0;
2634 * Make sure the target exists.
2636 targ = &sassc->targets[ccb->ccb_h.target_id];
2637 if (targ->handle == 0x0) {
2638 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2639 ccb->ccb_h.target_id);
2640 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2646 * If this device has an embedded SMP target, we'll talk to it
2648 * figure out what the expander's address is.
2650 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2651 sasaddr = targ->sasaddr;
2654 * If we don't have a SAS address for the expander yet, try
2655 * grabbing it from the page 0x83 information cached in the
2656 * transport layer for this target. LSI expanders report the
2657 * expander SAS address as the port-associated SAS address in
2658 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2661 * XXX KDM disable this for now, but leave it commented out so that
2662 * it is obvious that this is another possible way to get the SAS
2665 * The parent handle method below is a little more reliable, and
2666 * the other benefit is that it works for devices other than SES
2667 * devices. So you can send a SMP request to a da(4) device and it
2668 * will get routed to the expander that device is attached to.
2669 * (Assuming the da(4) device doesn't contain an SMP target...)
2673 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2677 * If we still don't have a SAS address for the expander, look for
2678 * the parent device of this device, which is probably the expander.
2681 #ifdef OLD_MPS_PROBE
2682 struct mpssas_target *parent_target;
2685 if (targ->parent_handle == 0x0) {
2686 mps_printf(sc, "%s: handle %d does not have a valid "
2687 "parent handle!\n", __func__, targ->handle);
2688 ccb->ccb_h.status = CAM_REQ_INVALID;
2691 #ifdef OLD_MPS_PROBE
2692 parent_target = mpssas_find_target_by_handle(sassc, 0,
2693 targ->parent_handle);
2695 if (parent_target == NULL) {
2696 mps_printf(sc, "%s: handle %d does not have a valid "
2697 "parent target!\n", __func__, targ->handle);
2698 ccb->ccb_h.status = CAM_REQ_INVALID;
2702 if ((parent_target->devinfo &
2703 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2704 mps_printf(sc, "%s: handle %d parent %d does not "
2705 "have an SMP target!\n", __func__,
2706 targ->handle, parent_target->handle);
2707 ccb->ccb_h.status = CAM_REQ_INVALID;
2712 sasaddr = parent_target->sasaddr;
2713 #else /* OLD_MPS_PROBE */
2714 if ((targ->parent_devinfo &
2715 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2716 mps_printf(sc, "%s: handle %d parent %d does not "
2717 "have an SMP target!\n", __func__,
2718 targ->handle, targ->parent_handle);
2719 ccb->ccb_h.status = CAM_REQ_INVALID;
2723 if (targ->parent_sasaddr == 0x0) {
2724 mps_printf(sc, "%s: handle %d parent handle %d does "
2725 "not have a valid SAS address!\n",
2726 __func__, targ->handle, targ->parent_handle);
2727 ccb->ccb_h.status = CAM_REQ_INVALID;
2731 sasaddr = targ->parent_sasaddr;
2732 #endif /* OLD_MPS_PROBE */
2737 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2738 __func__, targ->handle);
2739 ccb->ccb_h.status = CAM_REQ_INVALID;
2742 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2750 #endif //__FreeBSD_version >= 900026
2753 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2755 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2756 struct mps_softc *sc;
2757 struct mps_command *tm;
2758 struct mpssas_target *targ;
2760 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2761 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2764 tm = mps_alloc_command(sc);
2766 mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2767 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2772 targ = &sassc->targets[ccb->ccb_h.target_id];
2773 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2774 req->DevHandle = targ->handle;
2775 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2776 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2778 /* SAS Hard Link Reset / SATA Link Reset */
2779 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2782 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2783 tm->cm_complete = mpssas_resetdev_complete;
2784 tm->cm_complete_data = ccb;
2786 mps_map_command(sc, tm);
2790 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2792 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2795 mps_dprint(sc, MPS_TRACE, __func__);
2796 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2798 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2799 ccb = tm->cm_complete_data;
2802 * Currently there should be no way we can hit this case. It only
2803 * happens when we have a failure to allocate chain frames, and
2804 * task management commands don't have S/G lists.
2806 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2807 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2809 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2811 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2812 "This should not happen!\n", __func__, tm->cm_flags,
2814 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2818 kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2819 resp->IOCStatus, resp->ResponseCode);
2821 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2822 ccb->ccb_h.status = CAM_REQ_CMP;
2823 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2827 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2831 mpssas_free_tm(sc, tm);
2836 mpssas_poll(struct cam_sim *sim)
2838 struct mpssas_softc *sassc;
2840 sassc = cam_sim_softc(sim);
2842 if (sassc->sc->mps_debug & MPS_TRACE) {
2843 /* frequent debug messages during a panic just slow
2844 * everything down too much.
2846 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2847 sassc->sc->mps_debug &= ~MPS_TRACE;
2850 mps_intr_locked(sassc->sc);
2854 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2856 struct mpssas_softc *sassc;
2859 if (done_ccb == NULL)
2862 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2864 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2866 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2867 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2869 xpt_free_path(done_ccb->ccb_h.path);
2870 xpt_free_ccb(done_ccb);
2872 #if __FreeBSD_version < 1000006
2874 * Before completing scan, get EEDP stuff for all of the existing
2877 mpssas_check_eedp(sassc);
2882 /* thread to handle bus rescans */
2884 mpssas_scanner_thread(void *arg)
2886 struct mpssas_softc *sassc;
2887 struct mps_softc *sc;
2890 sassc = (struct mpssas_softc *)arg;
2893 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2897 /* Sleep for 1 second and check the queue status*/
2898 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 1 * hz);
2899 if (sassc->flags & MPSSAS_SHUTDOWN) {
2900 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2904 /* Get first work */
2905 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2908 /* Got first work */
2909 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2911 if (sassc->flags & MPSSAS_SHUTDOWN) {
2912 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2918 sassc->flags &= ~MPSSAS_SCANTHREAD;
2919 wakeup(&sassc->flags);
2921 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2926 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2930 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2932 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2937 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2938 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2940 /* Prepare request */
2941 ccb->ccb_h.ppriv_ptr1 = sassc;
2942 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2943 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2944 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2945 wakeup(&sassc->ccb_scanq);
2948 #if __FreeBSD_version >= 1000006
2950 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2953 struct mps_softc *sc;
2955 sc = (struct mps_softc *)callback_arg;
2958 case AC_ADVINFO_CHANGED: {
2959 struct mpssas_target *target;
2960 struct mpssas_softc *sassc;
2961 struct scsi_read_capacity_data_long rcap_buf;
2962 struct ccb_dev_advinfo cdai;
2963 struct mpssas_lun *lun;
2968 buftype = (uintptr_t)arg;
2974 * We're only interested in read capacity data changes.
2976 if (buftype != CDAI_TYPE_RCAPLONG)
2980 * We're only interested in devices that are attached to
2983 if (xpt_path_path_id(path) != sassc->sim->path_id)
2987 * We should have a handle for this, but check to make sure.
2989 target = &sassc->targets[xpt_path_target_id(path)];
2990 if (target->handle == 0)
2993 lunid = xpt_path_lun_id(path);
2995 SLIST_FOREACH(lun, &target->luns, lun_link) {
2996 if (lun->lun_id == lunid) {
3002 if (found_lun == 0) {
3003 lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
3004 M_INTWAIT | M_ZERO);
3006 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
3007 "LUN for EEDP support.\n");
3010 lun->lun_id = lunid;
3011 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3014 bzero(&rcap_buf, sizeof(rcap_buf));
3015 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3016 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3017 cdai.ccb_h.flags = CAM_DIR_IN;
3018 cdai.buftype = CDAI_TYPE_RCAPLONG;
3020 cdai.bufsiz = sizeof(rcap_buf);
3021 cdai.buf = (uint8_t *)&rcap_buf;
3022 xpt_action((union ccb *)&cdai);
3023 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3024 cam_release_devq(cdai.ccb_h.path,
3027 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3028 && (rcap_buf.prot & SRC16_PROT_EN)) {
3029 lun->eedp_formatted = TRUE;
3030 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3032 lun->eedp_formatted = FALSE;
3033 lun->eedp_block_size = 0;
3041 #else /* __FreeBSD_version >= 1000006 */
3044 mpssas_check_eedp(struct mpssas_softc *sassc)
3046 struct mps_softc *sc = sassc->sc;
3047 struct ccb_scsiio *csio;
3048 struct scsi_read_capacity_16 *scsi_cmd;
3049 struct scsi_read_capacity_eedp *rcap_buf;
3051 path_id_t pathid = cam_sim_path(sassc->sim);
3052 target_id_t targetid;
3054 struct cam_periph *found_periph;
3055 struct mpssas_target *target;
3056 struct mpssas_lun *lun;
3060 * Issue a READ CAPACITY 16 command to each LUN of each target. This
3061 * info is used to determine if the LUN is formatted for EEDP support.
3063 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
3064 target = &sassc->targets[targetid];
3065 if (target->handle == 0x0) {
3072 kmalloc(sizeof(struct scsi_read_capacity_eedp),
3073 M_MPT2, M_INTWAIT | M_ZERO);
3074 if (rcap_buf == NULL) {
3075 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
3076 "capacity buffer for EEDP support.\n");
3080 ccb = kmalloc(sizeof(union ccb), M_TEMP,
3083 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
3084 pathid, targetid, lunid) != CAM_REQ_CMP) {
3085 mps_dprint(sc, MPS_FAULT, "Unable to create "
3086 "path for EEDP support\n");
3087 kfree(rcap_buf, M_MPT2);
3093 * If a periph is returned, the LUN exists. Create an
3094 * entry in the target's LUN list.
3096 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3099 * If LUN is already in list, don't create a new
3103 SLIST_FOREACH(lun, &target->luns, lun_link) {
3104 if (lun->lun_id == lunid) {
3110 lun = kmalloc(sizeof(struct mpssas_lun),
3111 M_MPT2, M_INTWAIT | M_ZERO);
3112 lun->lun_id = lunid;
3113 SLIST_INSERT_HEAD(&target->luns, lun,
3119 * Issue a READ CAPACITY 16 command for the LUN.
3120 * The mpssas_read_cap_done function will load
3121 * the read cap info into the LUN struct.
3124 csio->ccb_h.func_code = XPT_SCSI_IO;
3125 csio->ccb_h.flags = CAM_DIR_IN;
3126 csio->ccb_h.retry_count = 4;
3127 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3128 csio->ccb_h.timeout = 60000;
3129 csio->data_ptr = (uint8_t *)rcap_buf;
3130 csio->dxfer_len = sizeof(struct
3131 scsi_read_capacity_eedp);
3132 csio->sense_len = MPS_SENSE_LEN;
3133 csio->cdb_len = sizeof(*scsi_cmd);
3134 csio->tag_action = MSG_SIMPLE_Q_TAG;
3136 scsi_cmd = (struct scsi_read_capacity_16 *)
3137 &csio->cdb_io.cdb_bytes;
3138 bzero(scsi_cmd, sizeof(*scsi_cmd));
3139 scsi_cmd->opcode = 0x9E;
3140 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3141 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3142 scsi_read_capacity_eedp);
3145 * Set the path, target and lun IDs for the READ
3148 ccb->ccb_h.path_id =
3149 xpt_path_path_id(ccb->ccb_h.path);
3150 ccb->ccb_h.target_id =
3151 xpt_path_target_id(ccb->ccb_h.path);
3152 ccb->ccb_h.target_lun =
3153 xpt_path_lun_id(ccb->ccb_h.path);
3155 ccb->ccb_h.ppriv_ptr1 = sassc;
3158 kfree(rcap_buf, M_MPT2);
3159 xpt_free_path(ccb->ccb_h.path);
3162 } while (found_periph);
3168 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3170 struct mpssas_softc *sassc;
3171 struct mpssas_target *target;
3172 struct mpssas_lun *lun;
3173 struct scsi_read_capacity_eedp *rcap_buf;
3175 if (done_ccb == NULL)
3179 * Driver need to release devq, it Scsi command is
3180 * generated by driver internally.
3181 * Currently there is a single place where driver
3182 * calls scsi command internally. In future if driver
3183 * calls more scsi command internally, it needs to release
3184 * devq internally, since those command will not go back to
3187 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3188 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3189 xpt_release_devq(done_ccb->ccb_h.path,
3190 /*count*/ 1, /*run_queue*/TRUE);
3193 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3196 * Get the LUN ID for the path and look it up in the LUN list for the
3199 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3200 target = &sassc->targets[done_ccb->ccb_h.target_id];
3201 SLIST_FOREACH(lun, &target->luns, lun_link) {
3202 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3206 * Got the LUN in the target's LUN list. Fill it in
3207 * with EEDP info. If the READ CAP 16 command had some
3208 * SCSI error (common if command is not supported), mark
3209 * the lun as not supporting EEDP and set the block size
3212 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3213 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3214 lun->eedp_formatted = FALSE;
3215 lun->eedp_block_size = 0;
3219 if (rcap_buf->protect & 0x01) {
3220 lun->eedp_formatted = TRUE;
3221 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3226 // Finished with this CCB and path.
3227 kfree(rcap_buf, M_MPT2);
3228 xpt_free_path(done_ccb->ccb_h.path);
3229 xpt_free_ccb(done_ccb);
3231 #endif /* __FreeBSD_version >= 1000006 */
3234 mpssas_startup(struct mps_softc *sc)
3236 struct mpssas_softc *sassc;
3239 * Send the port enable message and set the wait_for_port_enable flag.
3240 * This flag helps to keep the simq frozen until all discovery events
3244 mpssas_startup_increment(sassc);
3245 sc->wait_for_port_enable = 1;
3246 mpssas_send_portenable(sc);
3251 mpssas_send_portenable(struct mps_softc *sc)
3253 MPI2_PORT_ENABLE_REQUEST *request;
3254 struct mps_command *cm;
3256 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3258 if ((cm = mps_alloc_command(sc)) == NULL)
3260 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3261 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3262 request->MsgFlags = 0;
3264 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3265 cm->cm_complete = mpssas_portenable_complete;
3269 mps_map_command(sc, cm);
3270 mps_dprint(sc, MPS_TRACE,
3271 "mps_send_portenable finished cm %p req %p complete %p\n",
3272 cm, cm->cm_req, cm->cm_complete);
3277 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3279 MPI2_PORT_ENABLE_REPLY *reply;
3280 struct mpssas_softc *sassc;
3281 struct mpssas_target *target;
3284 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3288 * Currently there should be no way we can hit this case. It only
3289 * happens when we have a failure to allocate chain frames, and
3290 * port enable commands don't have S/G lists.
3292 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3293 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3294 "This should not happen!\n", __func__, cm->cm_flags);
3297 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3299 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3300 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3301 MPI2_IOCSTATUS_SUCCESS)
3302 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3304 mps_free_command(sc, cm);
3305 if (sc->mps_ich.ich_arg != NULL) {
3306 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3307 config_intrhook_disestablish(&sc->mps_ich);
3308 sc->mps_ich.ich_arg = NULL;
3312 * Get WarpDrive info after discovery is complete but before the scan
3313 * starts. At this point, all devices are ready to be exposed to the
3314 * OS. If devices should be hidden instead, take them out of the
3315 * 'targets' array before the scan. The devinfo for a disk will have
3316 * some info and a volume's will be 0. Use that to remove disks.
3318 mps_wd_config_pages(sc);
3319 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3320 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3321 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3322 MPS_WD_HIDE_IF_VOLUME))) {
3323 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3324 target = &sassc->targets[i];
3325 if (target->devinfo) {
3326 target->devinfo = 0x0;
3327 target->encl_handle = 0x0;
3328 target->encl_slot = 0x0;
3329 target->handle = 0x0;
3331 target->linkrate = 0x0;
3332 target->flags = 0x0;
3338 * Done waiting for port enable to complete. Decrement the refcount.
3339 * If refcount is 0, discovery is complete and a rescan of the bus can
3340 * take place. Since the simq was explicitly frozen before port
3341 * enable, it must be explicitly released here to keep the
3342 * freeze/release count in sync.
3344 sc->wait_for_port_enable = 0;
3345 sc->port_enable_complete = 1;
3346 mpssas_startup_decrement(sassc);
3347 xpt_release_simq(sassc->sim, 1);