2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
53 * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
56 /* Communications core for LSI MPT2 */
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
69 #include <sys/malloc.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
80 #include <machine/stdarg.h>
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
108 #define MPSSAS_DISCOVERY_TIMEOUT 20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
112 * static array to check SCSI OpCode for EEDP protection bits
114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
136 static void mpssas_log_command(struct mps_command *, const char *, ...)
138 #if 0 /* XXX unused */
139 static void mpssas_discovery_timeout(void *data);
141 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
142 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
143 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
144 static void mpssas_poll(struct cam_sim *sim);
145 static void mpssas_scsiio_timeout(void *data);
146 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
147 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
148 struct mps_command *cm, union ccb *ccb);
149 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
150 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
151 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
152 #if __FreeBSD_version >= 900026
153 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
154 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
156 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
157 #endif //FreeBSD_version >= 900026
158 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
159 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
160 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
161 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
162 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
163 static void mpssas_scanner_thread(void *arg);
164 #if __FreeBSD_version >= 1000006
165 static void mpssas_async(void *callback_arg, uint32_t code,
166 struct cam_path *path, void *arg);
168 static void mpssas_check_eedp(struct mpssas_softc *sassc);
169 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
171 static int mpssas_send_portenable(struct mps_softc *sc);
172 static void mpssas_portenable_complete(struct mps_softc *sc,
173 struct mps_command *cm);
175 struct mpssas_target *
176 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
178 struct mpssas_target *target;
181 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
182 target = &sassc->targets[i];
183 if (target->handle == handle)
190 /* we need to freeze the simq during attach and diag reset, to avoid failing
191 * commands before device handles have been found by discovery. Since
192 * discovery involves reading config pages and possibly sending commands,
193 * discovery actions may continue even after we receive the end of discovery
194 * event, so refcount discovery actions instead of assuming we can unfreeze
195 * the simq when we get the event.
198 mpssas_startup_increment(struct mpssas_softc *sassc)
200 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
201 if (sassc->startup_refcount++ == 0) {
202 /* just starting, freeze the simq */
203 mps_dprint(sassc->sc, MPS_INFO,
204 "%s freezing simq\n", __func__);
205 xpt_freeze_simq(sassc->sim, 1);
207 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
208 sassc->startup_refcount);
213 mpssas_startup_decrement(struct mpssas_softc *sassc)
215 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
216 if (--sassc->startup_refcount == 0) {
217 /* finished all discovery-related actions, release
218 * the simq and rescan for the latest topology.
220 mps_dprint(sassc->sc, MPS_INFO,
221 "%s releasing simq\n", __func__);
222 sassc->flags &= ~MPSSAS_IN_STARTUP;
223 xpt_release_simq(sassc->sim, 1);
224 mpssas_rescan_target(sassc->sc, NULL);
226 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
227 sassc->startup_refcount);
231 /* LSI's firmware requires us to stop sending commands when we're doing task
232 * management, so refcount the TMs and keep the simq frozen when any are in
236 mpssas_alloc_tm(struct mps_softc *sc)
238 struct mps_command *tm;
240 tm = mps_alloc_high_priority_command(sc);
242 if (sc->sassc->tm_count++ == 0) {
243 mps_printf(sc, "%s freezing simq\n", __func__);
244 xpt_freeze_simq(sc->sassc->sim, 1);
246 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
247 sc->sassc->tm_count);
253 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
258 /* if there are no TMs in use, we can release the simq. We use our
259 * own refcount so that it's easier for a diag reset to cleanup and
262 if (--sc->sassc->tm_count == 0) {
263 mps_printf(sc, "%s releasing simq\n", __func__);
264 xpt_release_simq(sc->sassc->sim, 1);
266 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
267 sc->sassc->tm_count);
269 mps_free_high_priority_command(sc, tm);
274 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
276 struct mpssas_softc *sassc = sc->sassc;
278 target_id_t targetid;
281 pathid = cam_sim_path(sassc->sim);
283 targetid = CAM_TARGET_WILDCARD;
285 targetid = targ - sassc->targets;
288 * Allocate a CCB and schedule a rescan.
290 ccb = xpt_alloc_ccb();
292 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
293 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
294 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
299 /* XXX Hardwired to scan the bus for now */
300 ccb->ccb_h.func_code = XPT_SCAN_BUS;
301 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
302 mpssas_rescan(sassc, ccb);
306 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
316 sbuf_new(&sb, str, sizeof(str), 0);
320 if (cm->cm_ccb != NULL) {
321 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
323 sbuf_cat(&sb, path_str);
324 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
325 scsi_command_string(&cm->cm_ccb->csio, &sb);
326 sbuf_printf(&sb, "length %d ",
327 cm->cm_ccb->csio.dxfer_len);
331 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
332 cam_sim_name(cm->cm_sc->sassc->sim),
333 cam_sim_unit(cm->cm_sc->sassc->sim),
334 cam_sim_bus(cm->cm_sc->sassc->sim),
335 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
339 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
340 sbuf_vprintf(&sb, fmt, ap);
342 kprintf("%s", sbuf_data(&sb));
348 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
350 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
351 struct mpssas_target *targ;
354 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
356 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
357 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
361 /* XXX retry the remove after the diag reset completes? */
362 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
364 mpssas_free_tm(sc, tm);
368 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
369 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
370 reply->IOCStatus, handle);
371 mpssas_free_tm(sc, tm);
375 mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
376 mps_free_reply(sc, tm->cm_reply_data);
377 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
379 mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
382 * Don't clear target if remove fails because things will get confusing.
383 * Leave the devname and sasaddr intact so that we know to avoid reusing
384 * this target id if possible, and so we can assign the same target id
385 * to this device if it comes back in the future.
387 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
390 targ->encl_handle = 0x0;
391 targ->encl_slot = 0x0;
392 targ->exp_dev_handle = 0x0;
394 targ->linkrate = 0x0;
399 mpssas_free_tm(sc, tm);
403 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
404 * Otherwise Volume Delete is same as Bare Drive Removal.
407 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
409 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
410 struct mps_softc *sc;
411 struct mps_command *cm;
412 struct mpssas_target *targ = NULL;
414 mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
419 * If this is a WD controller, determine if the disk should be exposed
420 * to the OS or not. If disk should be exposed, return from this
421 * function without doing anything.
423 if (sc->WD_available && (sc->WD_hide_expose ==
424 MPS_WD_EXPOSE_ALWAYS)) {
429 targ = mpssas_find_target_by_handle(sassc, 0, handle);
431 /* FIXME: what is the action? */
432 /* We don't know about this device? */
433 kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
437 targ->flags |= MPSSAS_TARGET_INREMOVAL;
439 cm = mpssas_alloc_tm(sc);
441 mps_printf(sc, "%s: command alloc failure\n", __func__);
445 mpssas_rescan_target(sc, targ);
447 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
448 req->DevHandle = targ->handle;
449 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
450 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
452 /* SAS Hard Link Reset / SATA Link Reset */
453 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
457 cm->cm_desc.HighPriority.RequestFlags =
458 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
459 cm->cm_complete = mpssas_remove_volume;
460 cm->cm_complete_data = (void *)(uintptr_t)handle;
461 mps_map_command(sc, cm);
465 * The MPT2 firmware performs debounce on the link to avoid transient link
466 * errors and false removals. When it does decide that link has been lost
467 * and a device need to go away, it expects that the host will perform a
468 * target reset and then an op remove. The reset has the side-effect of
469 * aborting any outstanding requests for the device, which is required for
470 * the op-remove to succeed. It's not clear if the host should check for
471 * the device coming back alive after the reset.
474 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
476 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
477 struct mps_softc *sc;
478 struct mps_command *cm;
479 struct mpssas_target *targ = NULL;
481 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
484 * If this is a WD controller, determine if the disk should be exposed
485 * to the OS or not. If disk should be exposed, return from this
486 * function without doing anything.
489 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
490 MPS_WD_EXPOSE_ALWAYS)) {
494 targ = mpssas_find_target_by_handle(sassc, 0, handle);
496 /* FIXME: what is the action? */
497 /* We don't know about this device? */
498 kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
502 targ->flags |= MPSSAS_TARGET_INREMOVAL;
504 cm = mpssas_alloc_tm(sc);
506 mps_printf(sc, "%s: command alloc failure\n", __func__);
510 mpssas_rescan_target(sc, targ);
512 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
513 memset(req, 0, sizeof(*req));
514 req->DevHandle = targ->handle;
515 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
516 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 /* SAS Hard Link Reset / SATA Link Reset */
519 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
523 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
524 cm->cm_complete = mpssas_remove_device;
525 cm->cm_complete_data = (void *)(uintptr_t)handle;
526 mps_map_command(sc, cm);
530 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
532 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
533 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
534 struct mpssas_target *targ;
535 struct mps_command *next_cm;
538 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
540 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
541 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
545 * Currently there should be no way we can hit this case. It only
546 * happens when we have a failure to allocate chain frames, and
547 * task management commands don't have S/G lists.
549 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
550 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
551 "This should not happen!\n", __func__, tm->cm_flags,
553 mpssas_free_tm(sc, tm);
558 /* XXX retry the remove after the diag reset completes? */
559 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
561 mpssas_free_tm(sc, tm);
565 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
566 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
567 reply->IOCStatus, handle);
568 mpssas_free_tm(sc, tm);
572 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
573 reply->TerminationCount);
574 mps_free_reply(sc, tm->cm_reply_data);
575 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
577 /* Reuse the existing command */
578 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
579 memset(req, 0, sizeof(*req));
580 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
581 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
582 req->DevHandle = handle;
584 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
585 tm->cm_complete = mpssas_remove_complete;
586 tm->cm_complete_data = (void *)(uintptr_t)handle;
588 mps_map_command(sc, tm);
590 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
592 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
595 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
596 ccb = tm->cm_complete_data;
597 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
598 mpssas_scsiio_complete(sc, tm);
603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
605 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
607 struct mpssas_target *targ;
609 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
611 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
612 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
615 * Currently there should be no way we can hit this case. It only
616 * happens when we have a failure to allocate chain frames, and
617 * task management commands don't have S/G lists.
619 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
620 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
621 "This should not happen!\n", __func__, tm->cm_flags,
623 mpssas_free_tm(sc, tm);
628 /* most likely a chip reset */
629 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
631 mpssas_free_tm(sc, tm);
635 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
636 handle, reply->IOCStatus);
639 * Don't clear target if remove fails because things will get confusing.
640 * Leave the devname and sasaddr intact so that we know to avoid reusing
641 * this target id if possible, and so we can assign the same target id
642 * to this device if it comes back in the future.
644 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
647 targ->encl_handle = 0x0;
648 targ->encl_slot = 0x0;
649 targ->exp_dev_handle = 0x0;
651 targ->linkrate = 0x0;
656 mpssas_free_tm(sc, tm);
660 mpssas_register_events(struct mps_softc *sc)
665 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
666 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
667 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
668 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
669 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
670 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
671 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
672 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
673 setbit(events, MPI2_EVENT_IR_VOLUME);
674 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
675 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
676 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
678 mps_register_events(sc, events, mpssas_evt_handler, NULL,
679 &sc->sassc->mpssas_eh);
685 mps_attach_sas(struct mps_softc *sc)
687 struct mpssas_softc *sassc;
688 #if __FreeBSD_version >= 1000006
693 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
695 sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
696 sassc->targets = kmalloc(sizeof(struct mpssas_target) *
697 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
701 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
702 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
707 unit = device_get_unit(sc->mps_dev);
708 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
709 unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
710 cam_simq_release(sassc->devq);
711 if (sassc->sim == NULL) {
712 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
717 TAILQ_INIT(&sassc->ev_queue);
719 /* Initialize taskqueue for Event Handling */
720 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
721 sassc->ev_tq = taskqueue_create("mps_taskq", M_INTWAIT | M_ZERO,
722 taskqueue_thread_enqueue, &sassc->ev_tq);
724 /* Run the task queue with lowest priority */
725 taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
726 device_get_nameunit(sc->mps_dev));
728 TAILQ_INIT(&sassc->ccb_scanq);
729 error = mps_kproc_create(mpssas_scanner_thread, sassc,
730 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
732 mps_printf(sc, "Error %d starting rescan thread\n", error);
737 sassc->flags |= MPSSAS_SCANTHREAD;
740 * XXX There should be a bus for every port on the adapter, but since
741 * we're just going to fake the topology for now, we'll pretend that
742 * everything is just a target on a single bus.
744 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
745 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
752 * Assume that discovery events will start right away. Freezing
753 * the simq will prevent the CAM boottime scanner from running
754 * before discovery is complete.
756 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
757 xpt_freeze_simq(sassc->sim, 1);
758 sc->sassc->startup_refcount = 0;
760 callout_init_mp(&sassc->discovery_callout);
761 sassc->discovery_timeouts = 0;
765 #if __FreeBSD_version >= 1000006
766 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
767 if (status != CAM_REQ_CMP) {
768 mps_printf(sc, "Error %#x registering async handler for "
769 "AC_ADVINFO_CHANGED events\n", status);
775 mpssas_register_events(sc);
783 mps_detach_sas(struct mps_softc *sc)
785 struct mpssas_softc *sassc;
787 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
789 if (sc->sassc == NULL)
793 mps_deregister_events(sc, sassc->mpssas_eh);
796 * Drain and free the event handling taskqueue with the lock
797 * unheld so that any parallel processing tasks drain properly
798 * without deadlocking.
800 if (sassc->ev_tq != NULL)
801 taskqueue_free(sassc->ev_tq);
803 /* Make sure CAM doesn't wedge if we had to bail out early. */
806 /* Deregister our async handler */
807 #if __FreeBSD_version >= 1000006
808 xpt_register_async(0, mpssas_async, sc, NULL);
811 if (sassc->flags & MPSSAS_IN_STARTUP)
812 xpt_release_simq(sassc->sim, 1);
814 if (sassc->sim != NULL) {
815 xpt_bus_deregister(cam_sim_path(sassc->sim));
816 cam_sim_free(sassc->sim);
819 if (sassc->flags & MPSSAS_SCANTHREAD) {
820 sassc->flags |= MPSSAS_SHUTDOWN;
821 wakeup(&sassc->ccb_scanq);
823 if (sassc->flags & MPSSAS_SCANTHREAD) {
824 lksleep(&sassc->flags, &sc->mps_lock, 0,
825 "mps_shutdown", 30 * hz);
830 kfree(sassc->targets, M_MPT2);
831 kfree(sassc, M_MPT2);
838 mpssas_discovery_end(struct mpssas_softc *sassc)
840 struct mps_softc *sc = sassc->sc;
842 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
844 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
845 callout_stop(&sassc->discovery_callout);
849 #if 0 /* XXX unused */
851 mpssas_discovery_timeout(void *data)
853 struct mpssas_softc *sassc = data;
854 struct mps_softc *sc;
857 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
861 "Timeout waiting for discovery, interrupts may not be working!\n");
862 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
864 /* Poll the hardware for events in case interrupts aren't working */
867 mps_printf(sassc->sc,
868 "Finished polling after discovery timeout at %d\n", ticks);
870 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
871 mpssas_discovery_end(sassc);
873 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
874 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
875 callout_reset(&sassc->discovery_callout,
876 MPSSAS_DISCOVERY_TIMEOUT * hz,
877 mpssas_discovery_timeout, sassc);
878 sassc->discovery_timeouts++;
880 mps_dprint(sassc->sc, MPS_FAULT,
881 "Discovery timed out, continuing.\n");
882 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
883 mpssas_discovery_end(sassc);
892 mpssas_action(struct cam_sim *sim, union ccb *ccb)
894 struct mpssas_softc *sassc;
896 sassc = cam_sim_softc(sim);
898 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
899 ccb->ccb_h.func_code);
900 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
902 switch (ccb->ccb_h.func_code) {
905 struct ccb_pathinq *cpi = &ccb->cpi;
907 cpi->version_num = 1;
908 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
909 cpi->target_sprt = 0;
910 cpi->hba_misc = PIM_NOBUSRESET;
911 cpi->hba_eng_cnt = 0;
912 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
914 cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
915 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
916 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
917 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
918 cpi->unit_number = cam_sim_unit(sim);
919 cpi->bus_id = cam_sim_bus(sim);
920 cpi->base_transfer_speed = 150000;
921 cpi->transport = XPORT_SAS;
922 cpi->transport_version = 0;
923 cpi->protocol = PROTO_SCSI;
924 cpi->protocol_version = SCSI_REV_SPC;
925 #if __FreeBSD_version >= 800001
927 * XXX KDM where does this number come from?
929 cpi->maxio = 256 * 1024;
931 cpi->ccb_h.status = CAM_REQ_CMP;
934 case XPT_GET_TRAN_SETTINGS:
936 struct ccb_trans_settings *cts;
937 struct ccb_trans_settings_sas *sas;
938 struct ccb_trans_settings_scsi *scsi;
939 struct mpssas_target *targ;
942 sas = &cts->xport_specific.sas;
943 scsi = &cts->proto_specific.scsi;
945 targ = &sassc->targets[cts->ccb_h.target_id];
946 if (targ->handle == 0x0) {
947 cts->ccb_h.status = CAM_TID_INVALID;
951 cts->protocol_version = SCSI_REV_SPC2;
952 cts->transport = XPORT_SAS;
953 cts->transport_version = 0;
955 sas->valid = CTS_SAS_VALID_SPEED;
956 switch (targ->linkrate) {
958 sas->bitrate = 150000;
961 sas->bitrate = 300000;
964 sas->bitrate = 600000;
970 cts->protocol = PROTO_SCSI;
971 scsi->valid = CTS_SCSI_VALID_TQ;
972 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
974 cts->ccb_h.status = CAM_REQ_CMP;
977 case XPT_CALC_GEOMETRY:
978 cam_calc_geometry(&ccb->ccg, /*extended*/1);
979 ccb->ccb_h.status = CAM_REQ_CMP;
982 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
983 mpssas_action_resetdev(sassc, ccb);
988 mps_printf(sassc->sc, "mpssas_action faking success for "
990 ccb->ccb_h.status = CAM_REQ_CMP;
993 mpssas_action_scsiio(sassc, ccb);
995 #if __FreeBSD_version >= 900026
997 mpssas_action_smpio(sassc, ccb);
1001 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1009 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1010 target_id_t target_id, lun_id_t lun_id)
1012 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1013 struct cam_path *path;
1015 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
1016 ac_code, target_id, lun_id);
1018 if (xpt_create_path(&path, NULL,
1019 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1020 mps_printf(sc, "unable to create path for reset "
1025 xpt_async(ac_code, path, NULL);
1026 xpt_free_path(path);
1030 mpssas_complete_all_commands(struct mps_softc *sc)
1032 struct mps_command *cm;
1036 mps_printf(sc, "%s\n", __func__);
1037 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1039 /* complete all commands with a NULL reply */
1040 for (i = 1; i < sc->num_reqs; i++) {
1041 cm = &sc->commands[i];
1042 cm->cm_reply = NULL;
1045 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1046 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1048 if (cm->cm_complete != NULL) {
1049 mpssas_log_command(cm,
1050 "completing cm %p state %x ccb %p for diag reset\n",
1051 cm, cm->cm_state, cm->cm_ccb);
1053 cm->cm_complete(sc, cm);
1057 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1058 mpssas_log_command(cm,
1059 "waking up cm %p state %x ccb %p for diag reset\n",
1060 cm, cm->cm_state, cm->cm_ccb);
1065 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1066 /* this should never happen, but if it does, log */
1067 mpssas_log_command(cm,
1068 "cm %p state %x flags 0x%x ccb %p during diag "
1069 "reset\n", cm, cm->cm_state, cm->cm_flags,
1076 mpssas_handle_reinit(struct mps_softc *sc)
1080 /* Go back into startup mode and freeze the simq, so that CAM
1081 * doesn't send any commands until after we've rediscovered all
1082 * targets and found the proper device handles for them.
1084 * After the reset, portenable will trigger discovery, and after all
1085 * discovery-related activities have finished, the simq will be
1088 mps_printf(sc, "%s startup\n", __func__);
1089 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1090 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1091 xpt_freeze_simq(sc->sassc->sim, 1);
1093 /* notify CAM of a bus reset */
1094 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1097 /* complete and cleanup after all outstanding commands */
1098 mpssas_complete_all_commands(sc);
1100 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1101 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1104 * The simq was explicitly frozen above, so set the refcount to 0.
1105 * The simq will be explicitly released after port enable completes.
1107 sc->sassc->startup_refcount = 0;
1109 /* zero all the target handles, since they may change after the
1110 * reset, and we have to rediscover all the targets and use the new
1113 for (i = 0; i < sc->facts->MaxTargets; i++) {
1114 if (sc->sassc->targets[i].outstanding != 0)
1115 mps_printf(sc, "target %u outstanding %u\n",
1116 i, sc->sassc->targets[i].outstanding);
1117 sc->sassc->targets[i].handle = 0x0;
1118 sc->sassc->targets[i].exp_dev_handle = 0x0;
1119 sc->sassc->targets[i].outstanding = 0;
1120 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1124 mpssas_tm_timeout(void *data)
1126 struct mps_command *tm = data;
1127 struct mps_softc *sc = tm->cm_sc;
1130 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1136 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1138 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1139 unsigned int cm_count = 0;
1140 struct mps_command *cm;
1141 struct mpssas_target *targ;
1143 callout_stop(&tm->cm_callout);
1145 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1149 * Currently there should be no way we can hit this case. It only
1150 * happens when we have a failure to allocate chain frames, and
1151 * task management commands don't have S/G lists.
1153 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1154 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1155 "This should not happen!\n", __func__, tm->cm_flags);
1156 mpssas_free_tm(sc, tm);
1160 if (reply == NULL) {
1161 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1162 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1163 /* this completion was due to a reset, just cleanup */
1164 targ->flags &= ~MPSSAS_TARGET_INRESET;
1166 mpssas_free_tm(sc, tm);
1169 /* we should have gotten a reply. */
1175 mpssas_log_command(tm,
1176 "logical unit reset status 0x%x code 0x%x count %u\n",
1177 reply->IOCStatus, reply->ResponseCode,
1178 reply->TerminationCount);
1180 /* See if there are any outstanding commands for this LUN.
1181 * This could be made more efficient by using a per-LU data
1182 * structure of some sort.
1184 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1185 if (cm->cm_lun == tm->cm_lun)
1189 if (cm_count == 0) {
1190 mpssas_log_command(tm,
1191 "logical unit %u finished recovery after reset\n",
1194 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1197 /* we've finished recovery for this logical unit. check and
1198 * see if some other logical unit has a timedout command
1199 * that needs to be processed.
1201 cm = TAILQ_FIRST(&targ->timedout_commands);
1203 mpssas_send_abort(sc, tm, cm);
1207 mpssas_free_tm(sc, tm);
1211 /* if we still have commands for this LUN, the reset
1212 * effectively failed, regardless of the status reported.
1213 * Escalate to a target reset.
1215 mpssas_log_command(tm,
1216 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1218 mpssas_send_reset(sc, tm,
1219 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1224 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1226 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1227 struct mpssas_target *targ;
1229 callout_stop(&tm->cm_callout);
1231 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1235 * Currently there should be no way we can hit this case. It only
1236 * happens when we have a failure to allocate chain frames, and
1237 * task management commands don't have S/G lists.
1239 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1240 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1241 "This should not happen!\n", __func__, tm->cm_flags);
1242 mpssas_free_tm(sc, tm);
1246 if (reply == NULL) {
1247 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1248 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1249 /* this completion was due to a reset, just cleanup */
1250 targ->flags &= ~MPSSAS_TARGET_INRESET;
1252 mpssas_free_tm(sc, tm);
1255 /* we should have gotten a reply. */
1261 mpssas_log_command(tm,
1262 "target reset status 0x%x code 0x%x count %u\n",
1263 reply->IOCStatus, reply->ResponseCode,
1264 reply->TerminationCount);
1266 targ->flags &= ~MPSSAS_TARGET_INRESET;
1268 if (targ->outstanding == 0) {
1269 /* we've finished recovery for this target and all
1270 * of its logical units.
1272 mpssas_log_command(tm,
1273 "recovery finished after target reset\n");
1275 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1279 mpssas_free_tm(sc, tm);
1282 /* after a target reset, if this target still has
1283 * outstanding commands, the reset effectively failed,
1284 * regardless of the status reported. escalate.
1286 mpssas_log_command(tm,
1287 "target reset complete for tm %p, but still have %u command(s)\n",
1288 tm, targ->outstanding);
1293 #define MPS_RESET_TIMEOUT 30
1296 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1298 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1299 struct mpssas_target *target;
1302 target = tm->cm_targ;
1303 if (target->handle == 0) {
1304 mps_printf(sc, "%s null devhandle for target_id %d\n",
1305 __func__, target->tid);
1309 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1310 req->DevHandle = target->handle;
1311 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1312 req->TaskType = type;
1314 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1315 /* XXX Need to handle invalid LUNs */
1316 MPS_SET_LUN(req->LUN, tm->cm_lun);
1317 tm->cm_targ->logical_unit_resets++;
1318 mpssas_log_command(tm, "sending logical unit reset\n");
1319 tm->cm_complete = mpssas_logical_unit_reset_complete;
1321 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1322 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1323 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1324 tm->cm_targ->target_resets++;
1325 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1326 mpssas_log_command(tm, "sending target reset\n");
1327 tm->cm_complete = mpssas_target_reset_complete;
1330 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1335 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1336 tm->cm_complete_data = (void *)tm;
1338 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1339 mpssas_tm_timeout, tm);
1341 err = mps_map_command(sc, tm);
1343 mpssas_log_command(tm,
1344 "error %d sending reset type %u\n",
1352 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1354 struct mps_command *cm;
1355 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1356 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1357 struct mpssas_target *targ;
1359 callout_stop(&tm->cm_callout);
1361 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1362 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1366 * Currently there should be no way we can hit this case. It only
1367 * happens when we have a failure to allocate chain frames, and
1368 * task management commands don't have S/G lists.
1370 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1371 mpssas_log_command(tm,
1372 "cm_flags = %#x for abort %p TaskMID %u!\n",
1373 tm->cm_flags, tm, req->TaskMID);
1374 mpssas_free_tm(sc, tm);
1378 if (reply == NULL) {
1379 mpssas_log_command(tm,
1380 "NULL abort reply for tm %p TaskMID %u\n",
1382 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1383 /* this completion was due to a reset, just cleanup */
1385 mpssas_free_tm(sc, tm);
1388 /* we should have gotten a reply. */
1394 mpssas_log_command(tm,
1395 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1397 reply->IOCStatus, reply->ResponseCode,
1398 reply->TerminationCount);
1400 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1402 /* if there are no more timedout commands, we're done with
1403 * error recovery for this target.
1405 mpssas_log_command(tm,
1406 "finished recovery after aborting TaskMID %u\n",
1410 mpssas_free_tm(sc, tm);
1412 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1413 /* abort success, but we have more timedout commands to abort */
1414 mpssas_log_command(tm,
1415 "continuing recovery after aborting TaskMID %u\n",
1418 mpssas_send_abort(sc, tm, cm);
1421 /* we didn't get a command completion, so the abort
1422 * failed as far as we're concerned. escalate.
1424 mpssas_log_command(tm,
1425 "abort failed for TaskMID %u tm %p\n",
1428 mpssas_send_reset(sc, tm,
1429 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1433 #define MPS_ABORT_TIMEOUT 5
1436 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1438 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1439 struct mpssas_target *targ;
1443 if (targ->handle == 0) {
1444 mps_printf(sc, "%s null devhandle for target_id %d\n",
1445 __func__, cm->cm_ccb->ccb_h.target_id);
1449 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1450 req->DevHandle = targ->handle;
1451 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1452 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1454 /* XXX Need to handle invalid LUNs */
1455 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1457 req->TaskMID = cm->cm_desc.Default.SMID;
1460 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1461 tm->cm_complete = mpssas_abort_complete;
1462 tm->cm_complete_data = (void *)tm;
1463 tm->cm_targ = cm->cm_targ;
1464 tm->cm_lun = cm->cm_lun;
1466 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1467 mpssas_tm_timeout, tm);
1471 err = mps_map_command(sc, tm);
1473 mpssas_log_command(tm,
1474 "error %d sending abort for cm %p SMID %u\n",
1475 err, cm, req->TaskMID);
1481 mpssas_scsiio_timeout(void *data)
1483 struct mps_softc *sc;
1484 struct mps_command *cm;
1485 struct mpssas_target *targ;
1487 cm = (struct mps_command *)data;
1490 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1492 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1495 * Run the interrupt handler to make sure it's not pending. This
1496 * isn't perfect because the command could have already completed
1497 * and been re-used, though this is unlikely.
1499 mps_intr_locked(sc);
1500 if (cm->cm_state == MPS_CM_STATE_FREE) {
1501 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1505 if (cm->cm_ccb == NULL) {
1506 mps_printf(sc, "command timeout with NULL ccb\n");
1510 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1516 /* XXX first, check the firmware state, to see if it's still
1517 * operational. if not, do a diag reset.
1520 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1521 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1522 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1524 if (targ->tm != NULL) {
1525 /* target already in recovery, just queue up another
1526 * timedout command to be processed later.
1528 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1531 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1532 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1535 /* start recovery by aborting the first timedout command */
1536 mpssas_send_abort(sc, targ->tm, cm);
1539 /* XXX queue this target up for recovery once a TM becomes
1540 * available. The firmware only has a limited number of
1541 * HighPriority credits for the high priority requests used
1542 * for task management, and we ran out.
1544 * Isilon: don't worry about this for now, since we have
1545 * more credits than disks in an enclosure, and limit
1546 * ourselves to one TM per target for recovery.
1548 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1555 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1557 MPI2_SCSI_IO_REQUEST *req;
1558 struct ccb_scsiio *csio;
1559 struct mps_softc *sc;
1560 struct mpssas_target *targ;
1561 struct mpssas_lun *lun;
1562 struct mps_command *cm;
1563 uint8_t i, lba_byte, *ref_tag_addr;
1564 uint16_t eedp_flags;
1567 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1568 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1571 targ = &sassc->targets[csio->ccb_h.target_id];
1572 if (targ->handle == 0x0) {
1573 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1574 __func__, csio->ccb_h.target_id);
1575 csio->ccb_h.status = CAM_TID_INVALID;
1579 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1580 mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
1581 __func__, csio->ccb_h.target_id);
1582 csio->ccb_h.status = CAM_TID_INVALID;
1588 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1589 * that the volume has timed out. We want volumes to be enumerated
1590 * until they are deleted/removed, not just failed.
1592 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1593 if (targ->devinfo == 0)
1594 csio->ccb_h.status = CAM_REQ_CMP;
1596 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1601 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1602 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1603 csio->ccb_h.status = CAM_TID_INVALID;
1608 cm = mps_alloc_command(sc);
1610 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1611 xpt_freeze_simq(sassc->sim, 1);
1612 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1614 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1615 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1620 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1621 bzero(req, sizeof(*req));
1622 req->DevHandle = targ->handle;
1623 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1625 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1626 req->SenseBufferLength = MPS_SENSE_LEN;
1628 req->ChainOffset = 0;
1629 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1634 req->DataLength = csio->dxfer_len;
1635 req->BidirectionalDataLength = 0;
1636 req->IoFlags = csio->cdb_len;
1639 /* Note: BiDirectional transfers are not supported */
1640 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1642 req->Control = MPI2_SCSIIO_CONTROL_READ;
1643 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1646 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1647 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1651 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1656 * It looks like the hardware doesn't require an explicit tag
1657 * number for each transaction. SAM Task Management not supported
1660 switch (csio->tag_action) {
1661 case MSG_HEAD_OF_Q_TAG:
1662 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1664 case MSG_ORDERED_Q_TAG:
1665 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1668 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1670 case CAM_TAG_ACTION_NONE:
1671 case MSG_SIMPLE_Q_TAG:
1673 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1676 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1678 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1679 mps_free_command(sc, cm);
1680 ccb->ccb_h.status = CAM_LUN_INVALID;
1685 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1686 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1688 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1689 req->IoFlags = csio->cdb_len;
1692 * Check if EEDP is supported and enabled. If it is then check if the
1693 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1694 * is formatted for EEDP support. If all of this is true, set CDB up
1695 * for EEDP transfer.
1697 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1698 if (sc->eedp_enabled && eedp_flags) {
1699 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1700 if (lun->lun_id == csio->ccb_h.target_lun) {
1705 if ((lun != NULL) && (lun->eedp_formatted)) {
1706 req->EEDPBlockSize = lun->eedp_block_size;
1707 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1708 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1709 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1710 req->EEDPFlags = eedp_flags;
1713 * If CDB less than 32, fill in Primary Ref Tag with
1714 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1715 * already there. Also, set protection bit. FreeBSD
1716 * currently does not support CDBs bigger than 16, but
1717 * the code doesn't hurt, and will be here for the
1720 if (csio->cdb_len != 32) {
1721 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1722 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1723 PrimaryReferenceTag;
1724 for (i = 0; i < 4; i++) {
1726 req->CDB.CDB32[lba_byte + i];
1729 req->CDB.EEDP32.PrimaryApplicationTagMask =
1731 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1735 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1736 req->EEDPFlags = eedp_flags;
1737 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1743 cm->cm_data = csio->data_ptr;
1744 cm->cm_length = csio->dxfer_len;
1745 cm->cm_sge = &req->SGL;
1746 cm->cm_sglsize = (32 - 24) * 4;
1747 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1748 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1749 cm->cm_complete = mpssas_scsiio_complete;
1750 cm->cm_complete_data = ccb;
1752 cm->cm_lun = csio->ccb_h.target_lun;
1756 * If HBA is a WD and the command is not for a retry, try to build a
1757 * direct I/O message. If failed, or the command is for a retry, send
1758 * the I/O to the IR volume itself.
1760 if (sc->WD_valid_config) {
1761 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1762 mpssas_direct_drive_io(sassc, cm, ccb);
1764 ccb->ccb_h.status = CAM_REQ_INPROG;
1768 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1769 mpssas_scsiio_timeout, cm);
1772 targ->outstanding++;
1773 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1775 if ((sc->mps_debug & MPS_TRACE) != 0)
1776 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1777 __func__, cm, ccb, targ->outstanding);
1779 mps_map_command(sc, cm);
1784 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1786 MPI2_SCSI_IO_REPLY *rep;
1788 struct ccb_scsiio *csio;
1789 struct mpssas_softc *sassc;
1790 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1791 u8 *TLR_bits, TLR_on;
1795 mps_dprint(sc, MPS_TRACE,
1796 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1797 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1798 cm->cm_targ->outstanding);
1800 callout_stop(&cm->cm_callout);
1801 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1804 ccb = cm->cm_complete_data;
1806 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1808 * XXX KDM if the chain allocation fails, does it matter if we do
1809 * the sync and unload here? It is simpler to do it in every case,
1810 * assuming it doesn't cause problems.
1812 if (cm->cm_data != NULL) {
1813 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1814 dir = BUS_DMASYNC_POSTREAD;
1815 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1816 dir = BUS_DMASYNC_POSTWRITE;
1817 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1818 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1821 cm->cm_targ->completed++;
1822 cm->cm_targ->outstanding--;
1823 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1825 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1826 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1827 if (cm->cm_reply != NULL)
1828 mpssas_log_command(cm,
1829 "completed timedout cm %p ccb %p during recovery "
1830 "ioc %x scsi %x state %x xfer %u\n",
1832 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1833 rep->TransferCount);
1835 mpssas_log_command(cm,
1836 "completed timedout cm %p ccb %p during recovery\n",
1838 } else if (cm->cm_targ->tm != NULL) {
1839 if (cm->cm_reply != NULL)
1840 mpssas_log_command(cm,
1841 "completed cm %p ccb %p during recovery "
1842 "ioc %x scsi %x state %x xfer %u\n",
1844 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1845 rep->TransferCount);
1847 mpssas_log_command(cm,
1848 "completed cm %p ccb %p during recovery\n",
1850 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1851 mpssas_log_command(cm,
1852 "reset completed cm %p ccb %p\n",
1856 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1858 * We ran into an error after we tried to map the command,
1859 * so we're getting a callback without queueing the command
1860 * to the hardware. So we set the status here, and it will
1861 * be retained below. We'll go through the "fast path",
1862 * because there can be no reply when we haven't actually
1863 * gone out to the hardware.
1865 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1868 * Currently the only error included in the mask is
1869 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1870 * chain frames. We need to freeze the queue until we get
1871 * a command that completed without this error, which will
1872 * hopefully have some chain frames attached that we can
1873 * use. If we wanted to get smarter about it, we would
1874 * only unfreeze the queue in this condition when we're
1875 * sure that we're getting some chain frames back. That's
1876 * probably unnecessary.
1878 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1879 xpt_freeze_simq(sassc->sim, 1);
1880 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1881 mps_dprint(sc, MPS_INFO, "Error sending command, "
1882 "freezing SIM queue\n");
1886 /* Take the fast path to completion */
1887 if (cm->cm_reply == NULL) {
1888 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1889 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1890 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1892 ccb->ccb_h.status = CAM_REQ_CMP;
1893 ccb->csio.scsi_status = SCSI_STATUS_OK;
1895 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1896 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1897 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1898 mps_dprint(sc, MPS_INFO,
1899 "Unfreezing SIM queue\n");
1904 * There are two scenarios where the status won't be
1905 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1906 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1908 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1910 * Freeze the dev queue so that commands are
1911 * executed in the correct order with after error
1914 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1915 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1917 mps_free_command(sc, cm);
1922 if (sc->mps_debug & MPS_TRACE)
1923 mpssas_log_command(cm,
1924 "ioc %x scsi %x state %x xfer %u\n",
1925 rep->IOCStatus, rep->SCSIStatus,
1926 rep->SCSIState, rep->TransferCount);
1929 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1930 * Volume if an error occurred (normal I/O retry). Use the original
1931 * CCB, but set a flag that this will be a retry so that it's sent to
1932 * the original volume. Free the command but reuse the CCB.
1934 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1935 mps_free_command(sc, cm);
1936 ccb->ccb_h.status = MPS_WD_RETRY;
1937 mpssas_action_scsiio(sassc, ccb);
1941 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1942 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1943 csio->resid = cm->cm_length - rep->TransferCount;
1945 case MPI2_IOCSTATUS_SUCCESS:
1946 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1948 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1949 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1950 mpssas_log_command(cm, "recovered error\n");
1952 /* Completion failed at the transport level. */
1953 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1954 MPI2_SCSI_STATE_TERMINATED)) {
1955 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1959 /* In a modern packetized environment, an autosense failure
1960 * implies that there's not much else that can be done to
1961 * recover the command.
1963 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1964 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1969 * CAM doesn't care about SAS Response Info data, but if this is
1970 * the state check if TLR should be done. If not, clear the
1971 * TLR_bits for the target.
1973 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1974 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1975 MPS_SCSI_RI_INVALID_FRAME)) {
1976 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1977 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1981 * Intentionally override the normal SCSI status reporting
1982 * for these two cases. These are likely to happen in a
1983 * multi-initiator environment, and we want to make sure that
1984 * CAM retries these commands rather than fail them.
1986 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1987 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1988 ccb->ccb_h.status = CAM_REQ_ABORTED;
1992 /* Handle normal status and sense */
1993 csio->scsi_status = rep->SCSIStatus;
1994 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1995 ccb->ccb_h.status = CAM_REQ_CMP;
1997 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1999 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2000 int sense_len, returned_sense_len;
2002 returned_sense_len = min(rep->SenseCount,
2003 sizeof(struct scsi_sense_data));
2004 if (returned_sense_len < ccb->csio.sense_len)
2005 ccb->csio.sense_resid = ccb->csio.sense_len -
2008 ccb->csio.sense_resid = 0;
2010 sense_len = min(returned_sense_len,
2011 ccb->csio.sense_len - ccb->csio.sense_resid);
2012 bzero(&ccb->csio.sense_data,
2013 sizeof(ccb->csio.sense_data));
2014 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2015 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2019 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2020 * and it's page code 0 (Supported Page List), and there is
2021 * inquiry data, and this is for a sequential access device, and
2022 * the device is an SSP target, and TLR is supported by the
2023 * controller, turn the TLR_bits value ON if page 0x90 is
2026 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2027 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2028 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2029 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2030 T_SEQUENTIAL) && (sc->control_TLR) &&
2031 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2032 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2033 vpd_list = (struct scsi_vpd_supported_page_list *)
2035 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2037 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2038 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2039 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2040 csio->cdb_io.cdb_bytes[4];
2041 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2042 if (vpd_list->list[i] == 0x90) {
2049 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2050 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2052 * If devinfo is 0 this will be a volume. In that case don't
2053 * tell CAM that the volume is not there. We want volumes to
2054 * be enumerated until they are deleted/removed, not just
2057 if (cm->cm_targ->devinfo == 0)
2058 ccb->ccb_h.status = CAM_REQ_CMP;
2060 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2062 case MPI2_IOCSTATUS_INVALID_SGL:
2063 mps_print_scsiio_cmd(sc, cm);
2064 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2066 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2068 * This is one of the responses that comes back when an I/O
2069 * has been aborted. If it is because of a timeout that we
2070 * initiated, just set the status to CAM_CMD_TIMEOUT.
2071 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2072 * command is the same (it gets retried, subject to the
2073 * retry counter), the only difference is what gets printed
2076 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2077 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2079 ccb->ccb_h.status = CAM_REQ_ABORTED;
2081 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2082 /* resid is ignored for this condition */
2084 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2086 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2087 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2089 * Since these are generally external (i.e. hopefully
2090 * transient transport-related) errors, retry these without
2091 * decrementing the retry count.
2093 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2094 mpssas_log_command(cm,
2095 "terminated ioc %x scsi %x state %x xfer %u\n",
2096 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2097 rep->TransferCount);
2099 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2100 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2101 case MPI2_IOCSTATUS_INVALID_VPID:
2102 case MPI2_IOCSTATUS_INVALID_FIELD:
2103 case MPI2_IOCSTATUS_INVALID_STATE:
2104 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2105 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2106 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2107 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2108 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2110 mpssas_log_command(cm,
2111 "completed ioc %x scsi %x state %x xfer %u\n",
2112 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2113 rep->TransferCount);
2114 csio->resid = cm->cm_length;
2115 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2119 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2120 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2121 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2122 mps_dprint(sc, MPS_INFO, "Command completed, "
2123 "unfreezing SIM queue\n");
2126 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2127 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2128 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2131 mps_free_command(sc, cm);
2136 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2138 pMpi2SCSIIORequest_t pIO_req;
2139 struct mps_softc *sc = sassc->sc;
2141 uint32_t physLBA, stripe_offset, stripe_unit;
2142 uint32_t io_size, column;
2143 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2146 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2147 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2148 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2149 * bit different than the 10/16 CDBs, handle them separately.
2151 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2152 CDB = pIO_req->CDB.CDB32;
2155 * Handle 6 byte CDBs.
2157 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2158 (CDB[0] == WRITE_6))) {
2160 * Get the transfer size in blocks.
2162 io_size = (cm->cm_length >> sc->DD_block_exponent);
2165 * Get virtual LBA given in the CDB.
2167 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2168 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2171 * Check that LBA range for I/O does not exceed volume's
2174 if ((virtLBA + (uint64_t)io_size - 1) <=
2177 * Check if the I/O crosses a stripe boundary. If not,
2178 * translate the virtual LBA to a physical LBA and set
2179 * the DevHandle for the PhysDisk to be used. If it
2180 * does cross a boundry, do normal I/O. To get the
2181 * right DevHandle to use, get the map number for the
2182 * column, then use that map number to look up the
2183 * DevHandle of the PhysDisk.
2185 stripe_offset = (uint32_t)virtLBA &
2186 (sc->DD_stripe_size - 1);
2187 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2188 physLBA = (uint32_t)virtLBA >>
2189 sc->DD_stripe_exponent;
2190 stripe_unit = physLBA / sc->DD_num_phys_disks;
2191 column = physLBA % sc->DD_num_phys_disks;
2192 pIO_req->DevHandle =
2193 sc->DD_column_map[column].dev_handle;
2194 cm->cm_desc.SCSIIO.DevHandle =
2197 physLBA = (stripe_unit <<
2198 sc->DD_stripe_exponent) + stripe_offset;
2199 ptrLBA = &pIO_req->CDB.CDB32[1];
2200 physLBA_byte = (uint8_t)(physLBA >> 16);
2201 *ptrLBA = physLBA_byte;
2202 ptrLBA = &pIO_req->CDB.CDB32[2];
2203 physLBA_byte = (uint8_t)(physLBA >> 8);
2204 *ptrLBA = physLBA_byte;
2205 ptrLBA = &pIO_req->CDB.CDB32[3];
2206 physLBA_byte = (uint8_t)physLBA;
2207 *ptrLBA = physLBA_byte;
2210 * Set flag that Direct Drive I/O is
2213 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2220 * Handle 10 or 16 byte CDBs.
2222 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2223 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2224 (CDB[0] == WRITE_16))) {
2226 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2227 * are 0. If not, this is accessing beyond 2TB so handle it in
2228 * the else section. 10-byte CDB's are OK.
2230 if ((CDB[0] < READ_16) ||
2231 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2233 * Get the transfer size in blocks.
2235 io_size = (cm->cm_length >> sc->DD_block_exponent);
2238 * Get virtual LBA. Point to correct lower 4 bytes of
2239 * LBA in the CDB depending on command.
2241 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2242 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2243 ((uint64_t)CDB[lba_idx + 1] << 16) |
2244 ((uint64_t)CDB[lba_idx + 2] << 8) |
2245 (uint64_t)CDB[lba_idx + 3];
2248 * Check that LBA range for I/O does not exceed volume's
2251 if ((virtLBA + (uint64_t)io_size - 1) <=
2254 * Check if the I/O crosses a stripe boundary.
2255 * If not, translate the virtual LBA to a
2256 * physical LBA and set the DevHandle for the
2257 * PhysDisk to be used. If it does cross a
2258 * boundry, do normal I/O. To get the right
2259 * DevHandle to use, get the map number for the
2260 * column, then use that map number to look up
2261 * the DevHandle of the PhysDisk.
2263 stripe_offset = (uint32_t)virtLBA &
2264 (sc->DD_stripe_size - 1);
2265 if ((stripe_offset + io_size) <=
2266 sc->DD_stripe_size) {
2267 physLBA = (uint32_t)virtLBA >>
2268 sc->DD_stripe_exponent;
2269 stripe_unit = physLBA /
2270 sc->DD_num_phys_disks;
2272 sc->DD_num_phys_disks;
2273 pIO_req->DevHandle =
2274 sc->DD_column_map[column].
2276 cm->cm_desc.SCSIIO.DevHandle =
2279 physLBA = (stripe_unit <<
2280 sc->DD_stripe_exponent) +
2283 &pIO_req->CDB.CDB32[lba_idx];
2284 physLBA_byte = (uint8_t)(physLBA >> 24);
2285 *ptrLBA = physLBA_byte;
2287 &pIO_req->CDB.CDB32[lba_idx + 1];
2288 physLBA_byte = (uint8_t)(physLBA >> 16);
2289 *ptrLBA = physLBA_byte;
2291 &pIO_req->CDB.CDB32[lba_idx + 2];
2292 physLBA_byte = (uint8_t)(physLBA >> 8);
2293 *ptrLBA = physLBA_byte;
2295 &pIO_req->CDB.CDB32[lba_idx + 3];
2296 physLBA_byte = (uint8_t)physLBA;
2297 *ptrLBA = physLBA_byte;
2300 * Set flag that Direct Drive I/O is
2303 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2308 * 16-byte CDB and the upper 4 bytes of the CDB are not
2309 * 0. Get the transfer size in blocks.
2311 io_size = (cm->cm_length >> sc->DD_block_exponent);
2316 virtLBA = ((uint64_t)CDB[2] << 54) |
2317 ((uint64_t)CDB[3] << 48) |
2318 ((uint64_t)CDB[4] << 40) |
2319 ((uint64_t)CDB[5] << 32) |
2320 ((uint64_t)CDB[6] << 24) |
2321 ((uint64_t)CDB[7] << 16) |
2322 ((uint64_t)CDB[8] << 8) |
2326 * Check that LBA range for I/O does not exceed volume's
2329 if ((virtLBA + (uint64_t)io_size - 1) <=
2332 * Check if the I/O crosses a stripe boundary.
2333 * If not, translate the virtual LBA to a
2334 * physical LBA and set the DevHandle for the
2335 * PhysDisk to be used. If it does cross a
2336 * boundry, do normal I/O. To get the right
2337 * DevHandle to use, get the map number for the
2338 * column, then use that map number to look up
2339 * the DevHandle of the PhysDisk.
2341 stripe_offset = (uint32_t)virtLBA &
2342 (sc->DD_stripe_size - 1);
2343 if ((stripe_offset + io_size) <=
2344 sc->DD_stripe_size) {
2345 physLBA = (uint32_t)(virtLBA >>
2346 sc->DD_stripe_exponent);
2347 stripe_unit = physLBA /
2348 sc->DD_num_phys_disks;
2350 sc->DD_num_phys_disks;
2351 pIO_req->DevHandle =
2352 sc->DD_column_map[column].
2354 cm->cm_desc.SCSIIO.DevHandle =
2357 physLBA = (stripe_unit <<
2358 sc->DD_stripe_exponent) +
2362 * Set upper 4 bytes of LBA to 0. We
2363 * assume that the phys disks are less
2364 * than 2 TB's in size. Then, set the
2367 pIO_req->CDB.CDB32[2] = 0;
2368 pIO_req->CDB.CDB32[3] = 0;
2369 pIO_req->CDB.CDB32[4] = 0;
2370 pIO_req->CDB.CDB32[5] = 0;
2371 ptrLBA = &pIO_req->CDB.CDB32[6];
2372 physLBA_byte = (uint8_t)(physLBA >> 24);
2373 *ptrLBA = physLBA_byte;
2374 ptrLBA = &pIO_req->CDB.CDB32[7];
2375 physLBA_byte = (uint8_t)(physLBA >> 16);
2376 *ptrLBA = physLBA_byte;
2377 ptrLBA = &pIO_req->CDB.CDB32[8];
2378 physLBA_byte = (uint8_t)(physLBA >> 8);
2379 *ptrLBA = physLBA_byte;
2380 ptrLBA = &pIO_req->CDB.CDB32[9];
2381 physLBA_byte = (uint8_t)physLBA;
2382 *ptrLBA = physLBA_byte;
2385 * Set flag that Direct Drive I/O is
2388 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2395 #if __FreeBSD_version >= 900026
2397 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2399 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2400 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2404 ccb = cm->cm_complete_data;
2407 * Currently there should be no way we can hit this case. It only
2408 * happens when we have a failure to allocate chain frames, and SMP
2409 * commands require two S/G elements only. That should be handled
2410 * in the standard request size.
2412 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2413 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2414 __func__, cm->cm_flags);
2415 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2419 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2421 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2422 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2426 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2427 sasaddr = le32toh(req->SASAddress.Low);
2428 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2430 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2431 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2432 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2433 __func__, rpl->IOCStatus, rpl->SASStatus);
2434 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2438 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2439 "%#jx completed successfully\n", __func__,
2440 (uintmax_t)sasaddr);
2442 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2443 ccb->ccb_h.status = CAM_REQ_CMP;
2445 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2449 * We sync in both directions because we had DMAs in the S/G list
2450 * in both directions.
2452 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2453 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2454 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2455 mps_free_command(sc, cm);
2460 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2462 struct mps_command *cm;
2463 uint8_t *request, *response;
2464 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2465 struct mps_softc *sc;
2472 * XXX We don't yet support physical addresses here.
2474 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2475 mps_printf(sc, "%s: physical addresses not supported\n",
2477 ccb->ccb_h.status = CAM_REQ_INVALID;
2483 * If the user wants to send an S/G list, check to make sure they
2484 * have single buffers.
2486 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2488 * The chip does not support more than one buffer for the
2489 * request or response.
2491 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2492 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2493 mps_printf(sc, "%s: multiple request or response "
2494 "buffer segments not supported for SMP\n",
2496 ccb->ccb_h.status = CAM_REQ_INVALID;
2502 * The CAM_SCATTER_VALID flag was originally implemented
2503 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2504 * We have two. So, just take that flag to mean that we
2505 * might have S/G lists, and look at the S/G segment count
2506 * to figure out whether that is the case for each individual
2509 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2510 bus_dma_segment_t *req_sg;
2512 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2513 request = (uint8_t *)req_sg[0].ds_addr;
2515 request = ccb->smpio.smp_request;
2517 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2518 bus_dma_segment_t *rsp_sg;
2520 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2521 response = (uint8_t *)rsp_sg[0].ds_addr;
2523 response = ccb->smpio.smp_response;
2525 request = ccb->smpio.smp_request;
2526 response = ccb->smpio.smp_response;
2529 cm = mps_alloc_command(sc);
2531 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2532 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2537 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2538 bzero(req, sizeof(*req));
2539 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2541 /* Allow the chip to use any route to this SAS address. */
2542 req->PhysicalPort = 0xff;
2544 req->RequestDataLength = ccb->smpio.smp_request_len;
2546 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2548 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2549 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2551 mpi_init_sge(cm, req, &req->SGL);
2554 * Set up a uio to pass into mps_map_command(). This allows us to
2555 * do one map command, and one busdma call in there.
2557 cm->cm_uio.uio_iov = cm->cm_iovec;
2558 cm->cm_uio.uio_iovcnt = 2;
2559 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2562 * The read/write flag isn't used by busdma, but set it just in
2563 * case. This isn't exactly accurate, either, since we're going in
2566 cm->cm_uio.uio_rw = UIO_WRITE;
2568 cm->cm_iovec[0].iov_base = request;
2569 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2570 cm->cm_iovec[1].iov_base = response;
2571 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2573 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2574 cm->cm_iovec[1].iov_len;
2577 * Trigger a warning message in mps_data_cb() for the user if we
2578 * wind up exceeding two S/G segments. The chip expects one
2579 * segment for the request and another for the response.
2581 cm->cm_max_segs = 2;
2583 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2584 cm->cm_complete = mpssas_smpio_complete;
2585 cm->cm_complete_data = ccb;
2588 * Tell the mapping code that we're using a uio, and that this is
2589 * an SMP passthrough request. There is a little special-case
2590 * logic there (in mps_data_cb()) to handle the bidirectional
2593 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2594 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2596 /* The chip data format is little endian. */
2597 req->SASAddress.High = htole32(sasaddr >> 32);
2598 req->SASAddress.Low = htole32(sasaddr);
2601 * XXX Note that we don't have a timeout/abort mechanism here.
2602 * From the manual, it looks like task management requests only
2603 * work for SCSI IO and SATA passthrough requests. We may need to
2604 * have a mechanism to retry requests in the event of a chip reset
2605 * at least. Hopefully the chip will insure that any errors short
2606 * of that are relayed back to the driver.
2608 error = mps_map_command(sc, cm);
2609 if ((error != 0) && (error != EINPROGRESS)) {
2610 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2618 mps_free_command(sc, cm);
2619 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2626 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2628 struct mps_softc *sc;
2629 struct mpssas_target *targ;
2630 uint64_t sasaddr = 0;
2635 * Make sure the target exists.
2637 targ = &sassc->targets[ccb->ccb_h.target_id];
2638 if (targ->handle == 0x0) {
2639 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2640 ccb->ccb_h.target_id);
2641 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2647 * If this device has an embedded SMP target, we'll talk to it
2649 * figure out what the expander's address is.
2651 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2652 sasaddr = targ->sasaddr;
2655 * If we don't have a SAS address for the expander yet, try
2656 * grabbing it from the page 0x83 information cached in the
2657 * transport layer for this target. LSI expanders report the
2658 * expander SAS address as the port-associated SAS address in
2659 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2662 * XXX KDM disable this for now, but leave it commented out so that
2663 * it is obvious that this is another possible way to get the SAS
2666 * The parent handle method below is a little more reliable, and
2667 * the other benefit is that it works for devices other than SES
2668 * devices. So you can send a SMP request to a da(4) device and it
2669 * will get routed to the expander that device is attached to.
2670 * (Assuming the da(4) device doesn't contain an SMP target...)
2674 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2678 * If we still don't have a SAS address for the expander, look for
2679 * the parent device of this device, which is probably the expander.
2682 #ifdef OLD_MPS_PROBE
2683 struct mpssas_target *parent_target;
2686 if (targ->parent_handle == 0x0) {
2687 mps_printf(sc, "%s: handle %d does not have a valid "
2688 "parent handle!\n", __func__, targ->handle);
2689 ccb->ccb_h.status = CAM_REQ_INVALID;
2692 #ifdef OLD_MPS_PROBE
2693 parent_target = mpssas_find_target_by_handle(sassc, 0,
2694 targ->parent_handle);
2696 if (parent_target == NULL) {
2697 mps_printf(sc, "%s: handle %d does not have a valid "
2698 "parent target!\n", __func__, targ->handle);
2699 ccb->ccb_h.status = CAM_REQ_INVALID;
2703 if ((parent_target->devinfo &
2704 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2705 mps_printf(sc, "%s: handle %d parent %d does not "
2706 "have an SMP target!\n", __func__,
2707 targ->handle, parent_target->handle);
2708 ccb->ccb_h.status = CAM_REQ_INVALID;
2713 sasaddr = parent_target->sasaddr;
2714 #else /* OLD_MPS_PROBE */
2715 if ((targ->parent_devinfo &
2716 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2717 mps_printf(sc, "%s: handle %d parent %d does not "
2718 "have an SMP target!\n", __func__,
2719 targ->handle, targ->parent_handle);
2720 ccb->ccb_h.status = CAM_REQ_INVALID;
2724 if (targ->parent_sasaddr == 0x0) {
2725 mps_printf(sc, "%s: handle %d parent handle %d does "
2726 "not have a valid SAS address!\n",
2727 __func__, targ->handle, targ->parent_handle);
2728 ccb->ccb_h.status = CAM_REQ_INVALID;
2732 sasaddr = targ->parent_sasaddr;
2733 #endif /* OLD_MPS_PROBE */
2738 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2739 __func__, targ->handle);
2740 ccb->ccb_h.status = CAM_REQ_INVALID;
2743 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2751 #endif //__FreeBSD_version >= 900026
2754 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2756 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2757 struct mps_softc *sc;
2758 struct mps_command *tm;
2759 struct mpssas_target *targ;
2761 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2762 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2765 tm = mps_alloc_command(sc);
2767 mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2768 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2773 targ = &sassc->targets[ccb->ccb_h.target_id];
2774 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2775 req->DevHandle = targ->handle;
2776 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2777 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2779 /* SAS Hard Link Reset / SATA Link Reset */
2780 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2783 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2784 tm->cm_complete = mpssas_resetdev_complete;
2785 tm->cm_complete_data = ccb;
2787 mps_map_command(sc, tm);
2791 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2793 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2796 mps_dprint(sc, MPS_TRACE, __func__);
2797 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2799 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2800 ccb = tm->cm_complete_data;
2803 * Currently there should be no way we can hit this case. It only
2804 * happens when we have a failure to allocate chain frames, and
2805 * task management commands don't have S/G lists.
2807 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2808 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2810 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2812 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2813 "This should not happen!\n", __func__, tm->cm_flags,
2815 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2819 kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2820 resp->IOCStatus, resp->ResponseCode);
2822 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2823 ccb->ccb_h.status = CAM_REQ_CMP;
2824 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2828 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2832 mpssas_free_tm(sc, tm);
2837 mpssas_poll(struct cam_sim *sim)
2839 struct mpssas_softc *sassc;
2841 sassc = cam_sim_softc(sim);
2843 if (sassc->sc->mps_debug & MPS_TRACE) {
2844 /* frequent debug messages during a panic just slow
2845 * everything down too much.
2847 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2848 sassc->sc->mps_debug &= ~MPS_TRACE;
2851 mps_intr_locked(sassc->sc);
2855 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2857 struct mpssas_softc *sassc;
2860 if (done_ccb == NULL)
2863 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2865 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2867 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2868 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2870 xpt_free_path(done_ccb->ccb_h.path);
2871 xpt_free_ccb(done_ccb);
2873 #if __FreeBSD_version < 1000006
2875 * Before completing scan, get EEDP stuff for all of the existing
2878 mpssas_check_eedp(sassc);
2883 /* thread to handle bus rescans */
2885 mpssas_scanner_thread(void *arg)
2887 struct mpssas_softc *sassc;
2888 struct mps_softc *sc;
2891 sassc = (struct mpssas_softc *)arg;
2894 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2898 /* Sleep for 1 second and check the queue status*/
2899 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 1 * hz);
2900 if (sassc->flags & MPSSAS_SHUTDOWN) {
2901 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2905 /* Get first work */
2906 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2909 /* Got first work */
2910 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2912 if (sassc->flags & MPSSAS_SHUTDOWN) {
2913 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2919 sassc->flags &= ~MPSSAS_SCANTHREAD;
2920 wakeup(&sassc->flags);
2922 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2927 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2931 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2933 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2938 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2939 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2941 /* Prepare request */
2942 ccb->ccb_h.ppriv_ptr1 = sassc;
2943 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2944 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2945 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2946 wakeup(&sassc->ccb_scanq);
2949 #if __FreeBSD_version >= 1000006
2951 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2954 struct mps_softc *sc;
2956 sc = (struct mps_softc *)callback_arg;
2959 case AC_ADVINFO_CHANGED: {
2960 struct mpssas_target *target;
2961 struct mpssas_softc *sassc;
2962 struct scsi_read_capacity_data_long rcap_buf;
2963 struct ccb_dev_advinfo cdai;
2964 struct mpssas_lun *lun;
2969 buftype = (uintptr_t)arg;
2975 * We're only interested in read capacity data changes.
2977 if (buftype != CDAI_TYPE_RCAPLONG)
2981 * We're only interested in devices that are attached to
2984 if (xpt_path_path_id(path) != sassc->sim->path_id)
2988 * We should have a handle for this, but check to make sure.
2990 target = &sassc->targets[xpt_path_target_id(path)];
2991 if (target->handle == 0)
2994 lunid = xpt_path_lun_id(path);
2996 SLIST_FOREACH(lun, &target->luns, lun_link) {
2997 if (lun->lun_id == lunid) {
3003 if (found_lun == 0) {
3004 lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
3005 M_INTWAIT | M_ZERO);
3007 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
3008 "LUN for EEDP support.\n");
3011 lun->lun_id = lunid;
3012 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3015 bzero(&rcap_buf, sizeof(rcap_buf));
3016 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3017 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3018 cdai.ccb_h.flags = CAM_DIR_IN;
3019 cdai.buftype = CDAI_TYPE_RCAPLONG;
3021 cdai.bufsiz = sizeof(rcap_buf);
3022 cdai.buf = (uint8_t *)&rcap_buf;
3023 xpt_action((union ccb *)&cdai);
3024 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3025 cam_release_devq(cdai.ccb_h.path,
3028 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3029 && (rcap_buf.prot & SRC16_PROT_EN)) {
3030 lun->eedp_formatted = TRUE;
3031 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3033 lun->eedp_formatted = FALSE;
3034 lun->eedp_block_size = 0;
3042 #else /* __FreeBSD_version >= 1000006 */
3045 mpssas_check_eedp(struct mpssas_softc *sassc)
3047 struct mps_softc *sc = sassc->sc;
3048 struct ccb_scsiio *csio;
3049 struct scsi_read_capacity_16 *scsi_cmd;
3050 struct scsi_read_capacity_eedp *rcap_buf;
3052 path_id_t pathid = cam_sim_path(sassc->sim);
3053 target_id_t targetid;
3055 struct cam_periph *found_periph;
3056 struct mpssas_target *target;
3057 struct mpssas_lun *lun;
3061 * Issue a READ CAPACITY 16 command to each LUN of each target. This
3062 * info is used to determine if the LUN is formatted for EEDP support.
3064 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
3065 target = &sassc->targets[targetid];
3066 if (target->handle == 0x0) {
3073 kmalloc(sizeof(struct scsi_read_capacity_eedp),
3074 M_MPT2, M_INTWAIT | M_ZERO);
3075 if (rcap_buf == NULL) {
3076 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
3077 "capacity buffer for EEDP support.\n");
3081 ccb = kmalloc(sizeof(union ccb), M_TEMP,
3084 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
3085 pathid, targetid, lunid) != CAM_REQ_CMP) {
3086 mps_dprint(sc, MPS_FAULT, "Unable to create "
3087 "path for EEDP support\n");
3088 kfree(rcap_buf, M_MPT2);
3094 * If a periph is returned, the LUN exists. Create an
3095 * entry in the target's LUN list.
3097 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3100 * If LUN is already in list, don't create a new
3104 SLIST_FOREACH(lun, &target->luns, lun_link) {
3105 if (lun->lun_id == lunid) {
3111 lun = kmalloc(sizeof(struct mpssas_lun),
3112 M_MPT2, M_INTWAIT | M_ZERO);
3113 lun->lun_id = lunid;
3114 SLIST_INSERT_HEAD(&target->luns, lun,
3120 * Issue a READ CAPACITY 16 command for the LUN.
3121 * The mpssas_read_cap_done function will load
3122 * the read cap info into the LUN struct.
3125 csio->ccb_h.func_code = XPT_SCSI_IO;
3126 csio->ccb_h.flags = CAM_DIR_IN;
3127 csio->ccb_h.retry_count = 4;
3128 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3129 csio->ccb_h.timeout = 60000;
3130 csio->data_ptr = (uint8_t *)rcap_buf;
3131 csio->dxfer_len = sizeof(struct
3132 scsi_read_capacity_eedp);
3133 csio->sense_len = MPS_SENSE_LEN;
3134 csio->cdb_len = sizeof(*scsi_cmd);
3135 csio->tag_action = MSG_SIMPLE_Q_TAG;
3137 scsi_cmd = (struct scsi_read_capacity_16 *)
3138 &csio->cdb_io.cdb_bytes;
3139 bzero(scsi_cmd, sizeof(*scsi_cmd));
3140 scsi_cmd->opcode = 0x9E;
3141 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3142 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3143 scsi_read_capacity_eedp);
3146 * Set the path, target and lun IDs for the READ
3149 ccb->ccb_h.path_id =
3150 xpt_path_path_id(ccb->ccb_h.path);
3151 ccb->ccb_h.target_id =
3152 xpt_path_target_id(ccb->ccb_h.path);
3153 ccb->ccb_h.target_lun =
3154 xpt_path_lun_id(ccb->ccb_h.path);
3156 ccb->ccb_h.ppriv_ptr1 = sassc;
3159 kfree(rcap_buf, M_MPT2);
3160 xpt_free_path(ccb->ccb_h.path);
3163 } while (found_periph);
3169 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3171 struct mpssas_softc *sassc;
3172 struct mpssas_target *target;
3173 struct mpssas_lun *lun;
3174 struct scsi_read_capacity_eedp *rcap_buf;
3176 if (done_ccb == NULL)
3180 * Driver need to release devq, it Scsi command is
3181 * generated by driver internally.
3182 * Currently there is a single place where driver
3183 * calls scsi command internally. In future if driver
3184 * calls more scsi command internally, it needs to release
3185 * devq internally, since those command will not go back to
3188 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3189 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3190 xpt_release_devq(done_ccb->ccb_h.path,
3191 /*count*/ 1, /*run_queue*/TRUE);
3194 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3197 * Get the LUN ID for the path and look it up in the LUN list for the
3200 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3201 target = &sassc->targets[done_ccb->ccb_h.target_id];
3202 SLIST_FOREACH(lun, &target->luns, lun_link) {
3203 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3207 * Got the LUN in the target's LUN list. Fill it in
3208 * with EEDP info. If the READ CAP 16 command had some
3209 * SCSI error (common if command is not supported), mark
3210 * the lun as not supporting EEDP and set the block size
3213 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3214 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3215 lun->eedp_formatted = FALSE;
3216 lun->eedp_block_size = 0;
3220 if (rcap_buf->protect & 0x01) {
3221 lun->eedp_formatted = TRUE;
3222 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3227 // Finished with this CCB and path.
3228 kfree(rcap_buf, M_MPT2);
3229 xpt_free_path(done_ccb->ccb_h.path);
3230 xpt_free_ccb(done_ccb);
3232 #endif /* __FreeBSD_version >= 1000006 */
3235 mpssas_startup(struct mps_softc *sc)
3237 struct mpssas_softc *sassc;
3240 * Send the port enable message and set the wait_for_port_enable flag.
3241 * This flag helps to keep the simq frozen until all discovery events
3245 mpssas_startup_increment(sassc);
3246 sc->wait_for_port_enable = 1;
3247 mpssas_send_portenable(sc);
3252 mpssas_send_portenable(struct mps_softc *sc)
3254 MPI2_PORT_ENABLE_REQUEST *request;
3255 struct mps_command *cm;
3257 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3259 if ((cm = mps_alloc_command(sc)) == NULL)
3261 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3262 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3263 request->MsgFlags = 0;
3265 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3266 cm->cm_complete = mpssas_portenable_complete;
3270 mps_map_command(sc, cm);
3271 mps_dprint(sc, MPS_TRACE,
3272 "mps_send_portenable finished cm %p req %p complete %p\n",
3273 cm, cm->cm_req, cm->cm_complete);
3278 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3280 MPI2_PORT_ENABLE_REPLY *reply;
3281 struct mpssas_softc *sassc;
3282 struct mpssas_target *target;
3285 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3289 * Currently there should be no way we can hit this case. It only
3290 * happens when we have a failure to allocate chain frames, and
3291 * port enable commands don't have S/G lists.
3293 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3294 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3295 "This should not happen!\n", __func__, cm->cm_flags);
3298 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3300 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3301 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3302 MPI2_IOCSTATUS_SUCCESS)
3303 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3305 mps_free_command(sc, cm);
3306 if (sc->mps_ich.ich_arg != NULL) {
3307 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3308 config_intrhook_disestablish(&sc->mps_ich);
3309 sc->mps_ich.ich_arg = NULL;
3313 * Get WarpDrive info after discovery is complete but before the scan
3314 * starts. At this point, all devices are ready to be exposed to the
3315 * OS. If devices should be hidden instead, take them out of the
3316 * 'targets' array before the scan. The devinfo for a disk will have
3317 * some info and a volume's will be 0. Use that to remove disks.
3319 mps_wd_config_pages(sc);
3320 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3321 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3322 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3323 MPS_WD_HIDE_IF_VOLUME))) {
3324 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3325 target = &sassc->targets[i];
3326 if (target->devinfo) {
3327 target->devinfo = 0x0;
3328 target->encl_handle = 0x0;
3329 target->encl_slot = 0x0;
3330 target->handle = 0x0;
3332 target->linkrate = 0x0;
3333 target->flags = 0x0;
3339 * Done waiting for port enable to complete. Decrement the refcount.
3340 * If refcount is 0, discovery is complete and a rescan of the bus can
3341 * take place. Since the simq was explicitly frozen before port
3342 * enable, it must be explicitly released here to keep the
3343 * freeze/release count in sync.
3345 sc->wait_for_port_enable = 0;
3346 sc->port_enable_complete = 1;
3347 mpssas_startup_decrement(sassc);
3348 xpt_release_simq(sassc->sim, 1);