2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
53 * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
56 /* Communications core for LSI MPT2 */
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
69 #include <sys/malloc.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
80 #include <machine/stdarg.h>
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
108 #define MPSSAS_DISCOVERY_TIMEOUT 20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
112 * static array to check SCSI OpCode for EEDP protection bits
114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 static void mpssas_log_command(struct mps_command *, const char *, ...)
141 #if 0 /* XXX unused */
142 static void mpssas_discovery_timeout(void *data);
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151 struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 struct cam_path *path, void *arg);
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176 struct mps_command *cm);
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
181 struct mpssas_target *target;
184 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 target = &sassc->targets[i];
186 if (target->handle == handle)
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194 * commands before device handles have been found by discovery. Since
195 * discovery involves reading config pages and possibly sending commands,
196 * discovery actions may continue even after we receive the end of discovery
197 * event, so refcount discovery actions instead of assuming we can unfreeze
198 * the simq when we get the event.
201 mpssas_startup_increment(struct mpssas_softc *sassc)
203 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 if (sassc->startup_refcount++ == 0) {
205 /* just starting, freeze the simq */
206 mps_dprint(sassc->sc, MPS_INFO,
207 "%s freezing simq\n", __func__);
208 xpt_freeze_simq(sassc->sim, 1);
210 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 sassc->startup_refcount);
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
218 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 if (--sassc->startup_refcount == 0) {
220 /* finished all discovery-related actions, release
221 * the simq and rescan for the latest topology.
223 mps_dprint(sassc->sc, MPS_INFO,
224 "%s releasing simq\n", __func__);
225 sassc->flags &= ~MPSSAS_IN_STARTUP;
226 xpt_release_simq(sassc->sim, 1);
227 mpssas_rescan_target(sassc->sc, NULL);
229 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 sassc->startup_refcount);
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235 * management, so refcount the TMs and keep the simq frozen when any are in
239 mpssas_alloc_tm(struct mps_softc *sc)
241 struct mps_command *tm;
243 tm = mps_alloc_high_priority_command(sc);
245 if (sc->sassc->tm_count++ == 0) {
246 mps_printf(sc, "%s freezing simq\n", __func__);
247 xpt_freeze_simq(sc->sassc->sim, 1);
249 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 sc->sassc->tm_count);
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
261 /* if there are no TMs in use, we can release the simq. We use our
262 * own refcount so that it's easier for a diag reset to cleanup and
265 if (--sc->sassc->tm_count == 0) {
266 mps_printf(sc, "%s releasing simq\n", __func__);
267 xpt_release_simq(sc->sassc->sim, 1);
269 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 sc->sassc->tm_count);
272 mps_free_high_priority_command(sc, tm);
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
279 struct mpssas_softc *sassc = sc->sassc;
281 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
295 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
296 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
302 /* XXX Hardwired to scan the bus for now */
303 ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 mpssas_rescan(sassc, ccb);
309 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
319 sbuf_new(&sb, str, sizeof(str), 0);
323 if (cm->cm_ccb != NULL) {
324 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
326 sbuf_cat(&sb, path_str);
327 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 scsi_command_string(&cm->cm_ccb->csio, &sb);
329 sbuf_printf(&sb, "length %d ",
330 cm->cm_ccb->csio.dxfer_len);
334 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 cam_sim_name(cm->cm_sc->sassc->sim),
336 cam_sim_unit(cm->cm_sc->sassc->sim),
337 cam_sim_bus(cm->cm_sc->sassc->sim),
338 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
342 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 sbuf_vprintf(&sb, fmt, ap);
345 kprintf("%s", sbuf_data(&sb));
351 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
353 struct mpssas_softc *sassc = sc->sassc;
354 path_id_t pathid = cam_sim_path(sassc->sim);
355 struct cam_path *path;
357 mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
358 if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
359 mps_printf(sc, "unable to create path for lost target %d\n",
364 xpt_async(AC_LOST_DEVICE, path, NULL);
369 * The MPT2 firmware performs debounce on the link to avoid transient link
370 * errors and false removals. When it does decide that link has been lost
371 * and a device need to go away, it expects that the host will perform a
372 * target reset and then an op remove. The reset has the side-effect of
373 * aborting any outstanding requests for the device, which is required for
374 * the op-remove to succeed. It's not clear if the host should check for
375 * the device coming back alive after the reset.
378 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
380 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
381 struct mps_softc *sc;
382 struct mps_command *cm;
383 struct mpssas_target *targ = NULL;
385 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
388 * If this is a WD controller, determine if the disk should be exposed
389 * to the OS or not. If disk should be exposed, return from this
390 * function without doing anything.
393 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
394 MPS_WD_EXPOSE_ALWAYS)) {
398 targ = mpssas_find_target_by_handle(sassc, 0, handle);
400 /* FIXME: what is the action? */
401 /* We don't know about this device? */
402 kprintf("%s: invalid handle 0x%x \n", __func__, handle);
406 targ->flags |= MPSSAS_TARGET_INREMOVAL;
408 cm = mpssas_alloc_tm(sc);
410 mps_printf(sc, "%s: command alloc failure\n", __func__);
414 mpssas_lost_target(sc, targ);
416 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
417 memset(req, 0, sizeof(*req));
418 req->DevHandle = targ->handle;
419 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
420 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
422 /* SAS Hard Link Reset / SATA Link Reset */
423 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
427 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
428 cm->cm_complete = mpssas_remove_device;
429 cm->cm_complete_data = (void *)(uintptr_t)handle;
430 mps_map_command(sc, cm);
434 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
436 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
437 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
438 struct mpssas_target *targ;
439 struct mps_command *next_cm;
442 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
444 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
445 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
449 * Currently there should be no way we can hit this case. It only
450 * happens when we have a failure to allocate chain frames, and
451 * task management commands don't have S/G lists.
453 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
454 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
455 "This should not happen!\n", __func__, tm->cm_flags,
457 mpssas_free_tm(sc, tm);
462 /* XXX retry the remove after the diag reset completes? */
463 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
465 mpssas_free_tm(sc, tm);
469 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
470 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
471 reply->IOCStatus, handle);
472 mpssas_free_tm(sc, tm);
476 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
477 reply->TerminationCount);
478 mps_free_reply(sc, tm->cm_reply_data);
479 tm->cm_reply = NULL; /* Ensures the the reply won't get re-freed */
481 /* Reuse the existing command */
482 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
483 memset(req, 0, sizeof(*req));
484 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
485 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
486 req->DevHandle = handle;
488 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
489 tm->cm_complete = mpssas_remove_complete;
490 tm->cm_complete_data = (void *)(uintptr_t)handle;
492 mps_map_command(sc, tm);
494 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
496 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
499 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
500 ccb = tm->cm_complete_data;
501 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
502 mpssas_scsiio_complete(sc, tm);
507 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
509 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
511 struct mpssas_target *targ;
513 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
515 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
516 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
519 * Currently there should be no way we can hit this case. It only
520 * happens when we have a failure to allocate chain frames, and
521 * task management commands don't have S/G lists.
523 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
524 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
525 "This should not happen!\n", __func__, tm->cm_flags,
527 mpssas_free_tm(sc, tm);
532 /* most likely a chip reset */
533 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
535 mpssas_free_tm(sc, tm);
539 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
540 handle, reply->IOCStatus);
543 * Don't clear target if remove fails because things will get confusing.
544 * Leave the devname and sasaddr intact so that we know to avoid reusing
545 * this target id if possible, and so we can assign the same target id
546 * to this device if it comes back in the future.
548 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
551 targ->encl_handle = 0x0;
552 targ->encl_slot = 0x0;
553 targ->exp_dev_handle = 0x0;
555 targ->linkrate = 0x0;
559 mpssas_free_tm(sc, tm);
563 mpssas_register_events(struct mps_softc *sc)
568 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
569 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
570 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
571 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
572 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
573 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
574 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
575 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
576 setbit(events, MPI2_EVENT_IR_VOLUME);
577 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
578 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
579 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
581 mps_register_events(sc, events, mpssas_evt_handler, NULL,
582 &sc->sassc->mpssas_eh);
588 mps_attach_sas(struct mps_softc *sc)
590 struct mpssas_softc *sassc;
591 #if __FreeBSD_version >= 1000006
596 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
598 sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
599 sassc->targets = kmalloc(sizeof(struct mpssas_target) *
600 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
604 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
605 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
610 unit = device_get_unit(sc->mps_dev);
611 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
612 unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
613 cam_simq_release(sassc->devq);
614 if (sassc->sim == NULL) {
615 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
620 TAILQ_INIT(&sassc->ev_queue);
622 /* Initialize taskqueue for Event Handling */
623 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
624 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
625 taskqueue_thread_enqueue, &sassc->ev_tq);
627 /* Run the task queue with lowest priority */
628 taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
629 device_get_nameunit(sc->mps_dev));
631 TAILQ_INIT(&sassc->ccb_scanq);
632 error = mps_kproc_create(mpssas_scanner_thread, sassc,
633 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
635 mps_printf(sc, "Error %d starting rescan thread\n", error);
640 sassc->flags |= MPSSAS_SCANTHREAD;
643 * XXX There should be a bus for every port on the adapter, but since
644 * we're just going to fake the topology for now, we'll pretend that
645 * everything is just a target on a single bus.
647 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
648 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
655 * Assume that discovery events will start right away. Freezing
656 * the simq will prevent the CAM boottime scanner from running
657 * before discovery is complete.
659 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
660 xpt_freeze_simq(sassc->sim, 1);
661 sc->sassc->startup_refcount = 0;
663 callout_init_mp(&sassc->discovery_callout);
664 sassc->discovery_timeouts = 0;
668 #if __FreeBSD_version >= 1000006
669 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
670 if (status != CAM_REQ_CMP) {
671 mps_printf(sc, "Error %#x registering async handler for "
672 "AC_ADVINFO_CHANGED events\n", status);
678 mpssas_register_events(sc);
686 mps_detach_sas(struct mps_softc *sc)
688 struct mpssas_softc *sassc;
690 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
692 if (sc->sassc == NULL)
696 mps_deregister_events(sc, sassc->mpssas_eh);
699 * Drain and free the event handling taskqueue with the lock
700 * unheld so that any parallel processing tasks drain properly
701 * without deadlocking.
703 if (sassc->ev_tq != NULL)
704 taskqueue_free(sassc->ev_tq);
706 /* Make sure CAM doesn't wedge if we had to bail out early. */
709 /* Deregister our async handler */
710 #if __FreeBSD_version >= 1000006
711 xpt_register_async(0, mpssas_async, sc, NULL);
714 if (sassc->flags & MPSSAS_IN_STARTUP)
715 xpt_release_simq(sassc->sim, 1);
717 if (sassc->sim != NULL) {
718 xpt_bus_deregister(cam_sim_path(sassc->sim));
719 cam_sim_free(sassc->sim);
722 if (sassc->flags & MPSSAS_SCANTHREAD) {
723 sassc->flags |= MPSSAS_SHUTDOWN;
724 wakeup(&sassc->ccb_scanq);
726 if (sassc->flags & MPSSAS_SCANTHREAD) {
727 lksleep(&sassc->flags, &sc->mps_lock, 0,
728 "mps_shutdown", 30 * hz);
733 kfree(sassc->targets, M_MPT2);
734 kfree(sassc, M_MPT2);
741 mpssas_discovery_end(struct mpssas_softc *sassc)
743 struct mps_softc *sc = sassc->sc;
745 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
747 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
748 callout_stop(&sassc->discovery_callout);
752 #if 0 /* XXX unused */
754 mpssas_discovery_timeout(void *data)
756 struct mpssas_softc *sassc = data;
757 struct mps_softc *sc;
760 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
764 "Timeout waiting for discovery, interrupts may not be working!\n");
765 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
767 /* Poll the hardware for events in case interrupts aren't working */
770 mps_printf(sassc->sc,
771 "Finished polling after discovery timeout at %d\n", ticks);
773 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
774 mpssas_discovery_end(sassc);
776 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
777 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
778 callout_reset(&sassc->discovery_callout,
779 MPSSAS_DISCOVERY_TIMEOUT * hz,
780 mpssas_discovery_timeout, sassc);
781 sassc->discovery_timeouts++;
783 mps_dprint(sassc->sc, MPS_FAULT,
784 "Discovery timed out, continuing.\n");
785 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
786 mpssas_discovery_end(sassc);
795 mpssas_action(struct cam_sim *sim, union ccb *ccb)
797 struct mpssas_softc *sassc;
799 sassc = cam_sim_softc(sim);
801 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
802 ccb->ccb_h.func_code);
803 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
805 switch (ccb->ccb_h.func_code) {
808 struct ccb_pathinq *cpi = &ccb->cpi;
810 cpi->version_num = 1;
811 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
812 cpi->target_sprt = 0;
813 cpi->hba_misc = PIM_NOBUSRESET;
814 cpi->hba_eng_cnt = 0;
815 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
817 cpi->initiator_id = 255;
818 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
819 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
820 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
821 cpi->unit_number = cam_sim_unit(sim);
822 cpi->bus_id = cam_sim_bus(sim);
823 cpi->base_transfer_speed = 150000;
824 cpi->transport = XPORT_SAS;
825 cpi->transport_version = 0;
826 cpi->protocol = PROTO_SCSI;
827 cpi->protocol_version = SCSI_REV_SPC;
828 #if __FreeBSD_version >= 800001
830 * XXX KDM where does this number come from?
832 cpi->maxio = 256 * 1024;
834 cpi->ccb_h.status = CAM_REQ_CMP;
837 case XPT_GET_TRAN_SETTINGS:
839 struct ccb_trans_settings *cts;
840 struct ccb_trans_settings_sas *sas;
841 struct ccb_trans_settings_scsi *scsi;
842 struct mpssas_target *targ;
845 sas = &cts->xport_specific.sas;
846 scsi = &cts->proto_specific.scsi;
848 targ = &sassc->targets[cts->ccb_h.target_id];
849 if (targ->handle == 0x0) {
850 cts->ccb_h.status = CAM_TID_INVALID;
854 cts->protocol_version = SCSI_REV_SPC2;
855 cts->transport = XPORT_SAS;
856 cts->transport_version = 0;
858 sas->valid = CTS_SAS_VALID_SPEED;
859 switch (targ->linkrate) {
861 sas->bitrate = 150000;
864 sas->bitrate = 300000;
867 sas->bitrate = 600000;
873 cts->protocol = PROTO_SCSI;
874 scsi->valid = CTS_SCSI_VALID_TQ;
875 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
877 cts->ccb_h.status = CAM_REQ_CMP;
880 case XPT_CALC_GEOMETRY:
881 cam_calc_geometry(&ccb->ccg, /*extended*/1);
882 ccb->ccb_h.status = CAM_REQ_CMP;
885 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
886 mpssas_action_resetdev(sassc, ccb);
891 mps_printf(sassc->sc, "mpssas_action faking success for "
893 ccb->ccb_h.status = CAM_REQ_CMP;
896 mpssas_action_scsiio(sassc, ccb);
898 #if __FreeBSD_version >= 900026
900 mpssas_action_smpio(sassc, ccb);
904 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
912 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
913 target_id_t target_id, lun_id_t lun_id)
915 path_id_t path_id = cam_sim_path(sc->sassc->sim);
916 struct cam_path *path;
918 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
919 ac_code, target_id, lun_id);
921 if (xpt_create_path(&path, NULL,
922 path_id, target_id, lun_id) != CAM_REQ_CMP) {
923 mps_printf(sc, "unable to create path for reset "
928 xpt_async(ac_code, path, NULL);
933 mpssas_complete_all_commands(struct mps_softc *sc)
935 struct mps_command *cm;
939 mps_printf(sc, "%s\n", __func__);
940 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
942 /* complete all commands with a NULL reply */
943 for (i = 1; i < sc->num_reqs; i++) {
944 cm = &sc->commands[i];
948 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
949 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
951 if (cm->cm_complete != NULL) {
952 mpssas_log_command(cm,
953 "completing cm %p state %x ccb %p for diag reset\n",
954 cm, cm->cm_state, cm->cm_ccb);
956 cm->cm_complete(sc, cm);
960 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
961 mpssas_log_command(cm,
962 "waking up cm %p state %x ccb %p for diag reset\n",
963 cm, cm->cm_state, cm->cm_ccb);
968 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
969 /* this should never happen, but if it does, log */
970 mpssas_log_command(cm,
971 "cm %p state %x flags 0x%x ccb %p during diag "
972 "reset\n", cm, cm->cm_state, cm->cm_flags,
979 mpssas_handle_reinit(struct mps_softc *sc)
983 /* Go back into startup mode and freeze the simq, so that CAM
984 * doesn't send any commands until after we've rediscovered all
985 * targets and found the proper device handles for them.
987 * After the reset, portenable will trigger discovery, and after all
988 * discovery-related activities have finished, the simq will be
991 mps_printf(sc, "%s startup\n", __func__);
992 sc->sassc->flags |= MPSSAS_IN_STARTUP;
993 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
994 xpt_freeze_simq(sc->sassc->sim, 1);
996 /* notify CAM of a bus reset */
997 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1000 /* complete and cleanup after all outstanding commands */
1001 mpssas_complete_all_commands(sc);
1003 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1004 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1007 * The simq was explicitly frozen above, so set the refcount to 0.
1008 * The simq will be explicitly released after port enable completes.
1010 sc->sassc->startup_refcount = 0;
1012 /* zero all the target handles, since they may change after the
1013 * reset, and we have to rediscover all the targets and use the new
1016 for (i = 0; i < sc->facts->MaxTargets; i++) {
1017 if (sc->sassc->targets[i].outstanding != 0)
1018 mps_printf(sc, "target %u outstanding %u\n",
1019 i, sc->sassc->targets[i].outstanding);
1020 sc->sassc->targets[i].handle = 0x0;
1021 sc->sassc->targets[i].exp_dev_handle = 0x0;
1022 sc->sassc->targets[i].outstanding = 0;
1023 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1027 mpssas_tm_timeout(void *data)
1029 struct mps_command *tm = data;
1030 struct mps_softc *sc = tm->cm_sc;
1033 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1039 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1041 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1042 unsigned int cm_count = 0;
1043 struct mps_command *cm;
1044 struct mpssas_target *targ;
1046 callout_stop(&tm->cm_callout);
1048 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1052 * Currently there should be no way we can hit this case. It only
1053 * happens when we have a failure to allocate chain frames, and
1054 * task management commands don't have S/G lists.
1056 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1057 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1058 "This should not happen!\n", __func__, tm->cm_flags);
1059 mpssas_free_tm(sc, tm);
1063 if (reply == NULL) {
1064 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1065 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1066 /* this completion was due to a reset, just cleanup */
1067 targ->flags &= ~MPSSAS_TARGET_INRESET;
1069 mpssas_free_tm(sc, tm);
1072 /* we should have gotten a reply. */
1078 mpssas_log_command(tm,
1079 "logical unit reset status 0x%x code 0x%x count %u\n",
1080 reply->IOCStatus, reply->ResponseCode,
1081 reply->TerminationCount);
1083 /* See if there are any outstanding commands for this LUN.
1084 * This could be made more efficient by using a per-LU data
1085 * structure of some sort.
1087 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1088 if (cm->cm_lun == tm->cm_lun)
1092 if (cm_count == 0) {
1093 mpssas_log_command(tm,
1094 "logical unit %u finished recovery after reset\n",
1097 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1100 /* we've finished recovery for this logical unit. check and
1101 * see if some other logical unit has a timedout command
1102 * that needs to be processed.
1104 cm = TAILQ_FIRST(&targ->timedout_commands);
1106 mpssas_send_abort(sc, tm, cm);
1110 mpssas_free_tm(sc, tm);
1114 /* if we still have commands for this LUN, the reset
1115 * effectively failed, regardless of the status reported.
1116 * Escalate to a target reset.
1118 mpssas_log_command(tm,
1119 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1121 mpssas_send_reset(sc, tm,
1122 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1127 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1129 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1130 struct mpssas_target *targ;
1132 callout_stop(&tm->cm_callout);
1134 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1138 * Currently there should be no way we can hit this case. It only
1139 * happens when we have a failure to allocate chain frames, and
1140 * task management commands don't have S/G lists.
1142 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1143 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1144 "This should not happen!\n", __func__, tm->cm_flags);
1145 mpssas_free_tm(sc, tm);
1149 if (reply == NULL) {
1150 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1151 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1152 /* this completion was due to a reset, just cleanup */
1153 targ->flags &= ~MPSSAS_TARGET_INRESET;
1155 mpssas_free_tm(sc, tm);
1158 /* we should have gotten a reply. */
1164 mpssas_log_command(tm,
1165 "target reset status 0x%x code 0x%x count %u\n",
1166 reply->IOCStatus, reply->ResponseCode,
1167 reply->TerminationCount);
1169 targ->flags &= ~MPSSAS_TARGET_INRESET;
1171 if (targ->outstanding == 0) {
1172 /* we've finished recovery for this target and all
1173 * of its logical units.
1175 mpssas_log_command(tm,
1176 "recovery finished after target reset\n");
1178 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1182 mpssas_free_tm(sc, tm);
1185 /* after a target reset, if this target still has
1186 * outstanding commands, the reset effectively failed,
1187 * regardless of the status reported. escalate.
1189 mpssas_log_command(tm,
1190 "target reset complete for tm %p, but still have %u command(s)\n",
1191 tm, targ->outstanding);
1196 #define MPS_RESET_TIMEOUT 30
1199 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1201 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1202 struct mpssas_target *target;
1205 target = tm->cm_targ;
1206 if (target->handle == 0) {
1207 mps_printf(sc, "%s null devhandle for target_id %d\n",
1208 __func__, target->tid);
1212 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1213 req->DevHandle = target->handle;
1214 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1215 req->TaskType = type;
1217 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1218 /* XXX Need to handle invalid LUNs */
1219 MPS_SET_LUN(req->LUN, tm->cm_lun);
1220 tm->cm_targ->logical_unit_resets++;
1221 mpssas_log_command(tm, "sending logical unit reset\n");
1222 tm->cm_complete = mpssas_logical_unit_reset_complete;
1224 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1225 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1226 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1227 tm->cm_targ->target_resets++;
1228 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1229 mpssas_log_command(tm, "sending target reset\n");
1230 tm->cm_complete = mpssas_target_reset_complete;
1233 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1238 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1239 tm->cm_complete_data = (void *)tm;
1241 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1242 mpssas_tm_timeout, tm);
1244 err = mps_map_command(sc, tm);
1246 mpssas_log_command(tm,
1247 "error %d sending reset type %u\n",
1255 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1257 struct mps_command *cm;
1258 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1259 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1260 struct mpssas_target *targ;
1262 callout_stop(&tm->cm_callout);
1264 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1265 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1269 * Currently there should be no way we can hit this case. It only
1270 * happens when we have a failure to allocate chain frames, and
1271 * task management commands don't have S/G lists.
1273 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1274 mpssas_log_command(tm,
1275 "cm_flags = %#x for abort %p TaskMID %u!\n",
1276 tm->cm_flags, tm, req->TaskMID);
1277 mpssas_free_tm(sc, tm);
1281 if (reply == NULL) {
1282 mpssas_log_command(tm,
1283 "NULL abort reply for tm %p TaskMID %u\n",
1285 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1286 /* this completion was due to a reset, just cleanup */
1288 mpssas_free_tm(sc, tm);
1291 /* we should have gotten a reply. */
1297 mpssas_log_command(tm,
1298 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1300 reply->IOCStatus, reply->ResponseCode,
1301 reply->TerminationCount);
1303 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1305 /* if there are no more timedout commands, we're done with
1306 * error recovery for this target.
1308 mpssas_log_command(tm,
1309 "finished recovery after aborting TaskMID %u\n",
1313 mpssas_free_tm(sc, tm);
1315 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1316 /* abort success, but we have more timedout commands to abort */
1317 mpssas_log_command(tm,
1318 "continuing recovery after aborting TaskMID %u\n",
1321 mpssas_send_abort(sc, tm, cm);
1324 /* we didn't get a command completion, so the abort
1325 * failed as far as we're concerned. escalate.
1327 mpssas_log_command(tm,
1328 "abort failed for TaskMID %u tm %p\n",
1331 mpssas_send_reset(sc, tm,
1332 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1336 #define MPS_ABORT_TIMEOUT 5
1339 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1341 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1342 struct mpssas_target *targ;
1346 if (targ->handle == 0) {
1347 mps_printf(sc, "%s null devhandle for target_id %d\n",
1348 __func__, cm->cm_ccb->ccb_h.target_id);
1352 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1353 req->DevHandle = targ->handle;
1354 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1355 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1357 /* XXX Need to handle invalid LUNs */
1358 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1360 req->TaskMID = cm->cm_desc.Default.SMID;
1363 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1364 tm->cm_complete = mpssas_abort_complete;
1365 tm->cm_complete_data = (void *)tm;
1366 tm->cm_targ = cm->cm_targ;
1367 tm->cm_lun = cm->cm_lun;
1369 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1370 mpssas_tm_timeout, tm);
1374 err = mps_map_command(sc, tm);
1376 mpssas_log_command(tm,
1377 "error %d sending abort for cm %p SMID %u\n",
1378 err, cm, req->TaskMID);
1384 mpssas_scsiio_timeout(void *data)
1386 struct mps_softc *sc;
1387 struct mps_command *cm;
1388 struct mpssas_target *targ;
1390 cm = (struct mps_command *)data;
1393 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1395 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1398 * Run the interrupt handler to make sure it's not pending. This
1399 * isn't perfect because the command could have already completed
1400 * and been re-used, though this is unlikely.
1402 mps_intr_locked(sc);
1403 if (cm->cm_state == MPS_CM_STATE_FREE) {
1404 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1408 if (cm->cm_ccb == NULL) {
1409 mps_printf(sc, "command timeout with NULL ccb\n");
1413 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1419 /* XXX first, check the firmware state, to see if it's still
1420 * operational. if not, do a diag reset.
1423 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1424 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1425 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1427 if (targ->tm != NULL) {
1428 /* target already in recovery, just queue up another
1429 * timedout command to be processed later.
1431 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1434 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1435 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1438 /* start recovery by aborting the first timedout command */
1439 mpssas_send_abort(sc, targ->tm, cm);
1442 /* XXX queue this target up for recovery once a TM becomes
1443 * available. The firmware only has a limited number of
1444 * HighPriority credits for the high priority requests used
1445 * for task management, and we ran out.
1447 * Isilon: don't worry about this for now, since we have
1448 * more credits than disks in an enclosure, and limit
1449 * ourselves to one TM per target for recovery.
1451 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1458 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1460 MPI2_SCSI_IO_REQUEST *req;
1461 struct ccb_scsiio *csio;
1462 struct mps_softc *sc;
1463 struct mpssas_target *targ;
1464 struct mpssas_lun *lun;
1465 struct mps_command *cm;
1466 uint8_t i, lba_byte, *ref_tag_addr;
1467 uint16_t eedp_flags;
1470 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1471 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1474 targ = &sassc->targets[csio->ccb_h.target_id];
1475 if (targ->handle == 0x0) {
1476 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1477 __func__, csio->ccb_h.target_id);
1478 csio->ccb_h.status = CAM_TID_INVALID;
1483 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1484 * that the volume has timed out. We want volumes to be enumerated
1485 * until they are deleted/removed, not just failed.
1487 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1488 if (targ->devinfo == 0)
1489 csio->ccb_h.status = CAM_REQ_CMP;
1491 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1496 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1497 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1498 csio->ccb_h.status = CAM_TID_INVALID;
1503 cm = mps_alloc_command(sc);
1505 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1506 xpt_freeze_simq(sassc->sim, 1);
1507 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1509 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1510 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1515 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1516 bzero(req, sizeof(*req));
1517 req->DevHandle = targ->handle;
1518 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1520 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1521 req->SenseBufferLength = MPS_SENSE_LEN;
1523 req->ChainOffset = 0;
1524 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1529 req->DataLength = csio->dxfer_len;
1530 req->BidirectionalDataLength = 0;
1531 req->IoFlags = csio->cdb_len;
1534 /* Note: BiDirectional transfers are not supported */
1535 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1537 req->Control = MPI2_SCSIIO_CONTROL_READ;
1538 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1541 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1542 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1546 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1551 * It looks like the hardware doesn't require an explicit tag
1552 * number for each transaction. SAM Task Management not supported
1555 switch (csio->tag_action) {
1556 case MSG_HEAD_OF_Q_TAG:
1557 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1559 case MSG_ORDERED_Q_TAG:
1560 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1563 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1565 case CAM_TAG_ACTION_NONE:
1566 case MSG_SIMPLE_Q_TAG:
1568 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1571 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1573 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1574 mps_free_command(sc, cm);
1575 ccb->ccb_h.status = CAM_LUN_INVALID;
1580 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1581 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1583 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1584 req->IoFlags = csio->cdb_len;
1587 * Check if EEDP is supported and enabled. If it is then check if the
1588 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1589 * is formatted for EEDP support. If all of this is true, set CDB up
1590 * for EEDP transfer.
1592 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1593 if (sc->eedp_enabled && eedp_flags) {
1594 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1595 if (lun->lun_id == csio->ccb_h.target_lun) {
1600 if ((lun != NULL) && (lun->eedp_formatted)) {
1601 req->EEDPBlockSize = lun->eedp_block_size;
1602 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1603 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1604 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1605 req->EEDPFlags = eedp_flags;
1608 * If CDB less than 32, fill in Primary Ref Tag with
1609 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1610 * already there. Also, set protection bit. FreeBSD
1611 * currently does not support CDBs bigger than 16, but
1612 * the code doesn't hurt, and will be here for the
1615 if (csio->cdb_len != 32) {
1616 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1617 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1618 PrimaryReferenceTag;
1619 for (i = 0; i < 4; i++) {
1621 req->CDB.CDB32[lba_byte + i];
1624 req->CDB.EEDP32.PrimaryApplicationTagMask =
1626 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1630 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1631 req->EEDPFlags = eedp_flags;
1632 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1638 cm->cm_data = csio->data_ptr;
1639 cm->cm_length = csio->dxfer_len;
1640 cm->cm_sge = &req->SGL;
1641 cm->cm_sglsize = (32 - 24) * 4;
1642 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1643 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1644 cm->cm_complete = mpssas_scsiio_complete;
1645 cm->cm_complete_data = ccb;
1647 cm->cm_lun = csio->ccb_h.target_lun;
1651 * If HBA is a WD and the command is not for a retry, try to build a
1652 * direct I/O message. If failed, or the command is for a retry, send
1653 * the I/O to the IR volume itself.
1655 if (sc->WD_valid_config) {
1656 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1657 mpssas_direct_drive_io(sassc, cm, ccb);
1659 ccb->ccb_h.status = CAM_REQ_INPROG;
1663 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1664 mpssas_scsiio_timeout, cm);
1667 targ->outstanding++;
1668 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1670 if ((sc->mps_debug & MPS_TRACE) != 0)
1671 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1672 __func__, cm, ccb, targ->outstanding);
1674 mps_map_command(sc, cm);
1679 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1681 MPI2_SCSI_IO_REPLY *rep;
1683 struct ccb_scsiio *csio;
1684 struct mpssas_softc *sassc;
1685 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1686 u8 *TLR_bits, TLR_on;
1690 mps_dprint(sc, MPS_TRACE,
1691 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1692 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1693 cm->cm_targ->outstanding);
1695 callout_stop(&cm->cm_callout);
1696 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1699 ccb = cm->cm_complete_data;
1701 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1703 * XXX KDM if the chain allocation fails, does it matter if we do
1704 * the sync and unload here? It is simpler to do it in every case,
1705 * assuming it doesn't cause problems.
1707 if (cm->cm_data != NULL) {
1708 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1709 dir = BUS_DMASYNC_POSTREAD;
1710 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1711 dir = BUS_DMASYNC_POSTWRITE;
1712 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1713 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1716 cm->cm_targ->completed++;
1717 cm->cm_targ->outstanding--;
1718 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1720 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1721 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1722 if (cm->cm_reply != NULL)
1723 mpssas_log_command(cm,
1724 "completed timedout cm %p ccb %p during recovery "
1725 "ioc %x scsi %x state %x xfer %u\n",
1727 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1728 rep->TransferCount);
1730 mpssas_log_command(cm,
1731 "completed timedout cm %p ccb %p during recovery\n",
1733 } else if (cm->cm_targ->tm != NULL) {
1734 if (cm->cm_reply != NULL)
1735 mpssas_log_command(cm,
1736 "completed cm %p ccb %p during recovery "
1737 "ioc %x scsi %x state %x xfer %u\n",
1739 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1740 rep->TransferCount);
1742 mpssas_log_command(cm,
1743 "completed cm %p ccb %p during recovery\n",
1745 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1746 mpssas_log_command(cm,
1747 "reset completed cm %p ccb %p\n",
1751 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1753 * We ran into an error after we tried to map the command,
1754 * so we're getting a callback without queueing the command
1755 * to the hardware. So we set the status here, and it will
1756 * be retained below. We'll go through the "fast path",
1757 * because there can be no reply when we haven't actually
1758 * gone out to the hardware.
1760 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1763 * Currently the only error included in the mask is
1764 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1765 * chain frames. We need to freeze the queue until we get
1766 * a command that completed without this error, which will
1767 * hopefully have some chain frames attached that we can
1768 * use. If we wanted to get smarter about it, we would
1769 * only unfreeze the queue in this condition when we're
1770 * sure that we're getting some chain frames back. That's
1771 * probably unnecessary.
1773 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1774 xpt_freeze_simq(sassc->sim, 1);
1775 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1776 mps_dprint(sc, MPS_INFO, "Error sending command, "
1777 "freezing SIM queue\n");
1781 /* Take the fast path to completion */
1782 if (cm->cm_reply == NULL) {
1783 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1784 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1785 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1787 ccb->ccb_h.status = CAM_REQ_CMP;
1788 ccb->csio.scsi_status = SCSI_STATUS_OK;
1790 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1791 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1792 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1793 mps_dprint(sc, MPS_INFO,
1794 "Unfreezing SIM queue\n");
1799 * There are two scenarios where the status won't be
1800 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1801 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1803 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1805 * Freeze the dev queue so that commands are
1806 * executed in the correct order with after error
1809 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1810 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1812 mps_free_command(sc, cm);
1817 if (sc->mps_debug & MPS_TRACE)
1818 mpssas_log_command(cm,
1819 "ioc %x scsi %x state %x xfer %u\n",
1820 rep->IOCStatus, rep->SCSIStatus,
1821 rep->SCSIState, rep->TransferCount);
1824 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1825 * Volume if an error occurred (normal I/O retry). Use the original
1826 * CCB, but set a flag that this will be a retry so that it's sent to
1827 * the original volume. Free the command but reuse the CCB.
1829 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1830 mps_free_command(sc, cm);
1831 ccb->ccb_h.status = MPS_WD_RETRY;
1832 mpssas_action_scsiio(sassc, ccb);
1836 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1837 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1838 csio->resid = cm->cm_length - rep->TransferCount;
1840 case MPI2_IOCSTATUS_SUCCESS:
1841 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1843 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1844 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1845 mpssas_log_command(cm, "recovered error\n");
1847 /* Completion failed at the transport level. */
1848 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1849 MPI2_SCSI_STATE_TERMINATED)) {
1850 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1854 /* In a modern packetized environment, an autosense failure
1855 * implies that there's not much else that can be done to
1856 * recover the command.
1858 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1859 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1864 * CAM doesn't care about SAS Response Info data, but if this is
1865 * the state check if TLR should be done. If not, clear the
1866 * TLR_bits for the target.
1868 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1869 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1870 MPS_SCSI_RI_INVALID_FRAME)) {
1871 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1872 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1876 * Intentionally override the normal SCSI status reporting
1877 * for these two cases. These are likely to happen in a
1878 * multi-initiator environment, and we want to make sure that
1879 * CAM retries these commands rather than fail them.
1881 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1882 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1883 ccb->ccb_h.status = CAM_REQ_ABORTED;
1887 /* Handle normal status and sense */
1888 csio->scsi_status = rep->SCSIStatus;
1889 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1890 ccb->ccb_h.status = CAM_REQ_CMP;
1892 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1894 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1895 int sense_len, returned_sense_len;
1897 returned_sense_len = min(rep->SenseCount,
1898 sizeof(struct scsi_sense_data));
1899 if (returned_sense_len < ccb->csio.sense_len)
1900 ccb->csio.sense_resid = ccb->csio.sense_len -
1903 ccb->csio.sense_resid = 0;
1905 sense_len = min(returned_sense_len,
1906 ccb->csio.sense_len - ccb->csio.sense_resid);
1907 bzero(&ccb->csio.sense_data,
1908 sizeof(ccb->csio.sense_data));
1909 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1910 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1914 * Check if this is an INQUIRY command. If it's a VPD inquiry,
1915 * and it's page code 0 (Supported Page List), and there is
1916 * inquiry data, and this is for a sequential access device, and
1917 * the device is an SSP target, and TLR is supported by the
1918 * controller, turn the TLR_bits value ON if page 0x90 is
1921 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1922 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1923 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1924 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1925 T_SEQUENTIAL) && (sc->control_TLR) &&
1926 (sc->mapping_table[csio->ccb_h.target_id].device_info &
1927 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1928 vpd_list = (struct scsi_vpd_supported_page_list *)
1930 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1932 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1933 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1934 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1935 csio->cdb_io.cdb_bytes[4];
1936 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1937 if (vpd_list->list[i] == 0x90) {
1944 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1945 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1947 * If devinfo is 0 this will be a volume. In that case don't
1948 * tell CAM that the volume is not there. We want volumes to
1949 * be enumerated until they are deleted/removed, not just
1952 if (cm->cm_targ->devinfo == 0)
1953 ccb->ccb_h.status = CAM_REQ_CMP;
1955 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1957 case MPI2_IOCSTATUS_INVALID_SGL:
1958 mps_print_scsiio_cmd(sc, cm);
1959 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1961 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1963 * This is one of the responses that comes back when an I/O
1964 * has been aborted. If it is because of a timeout that we
1965 * initiated, just set the status to CAM_CMD_TIMEOUT.
1966 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
1967 * command is the same (it gets retried, subject to the
1968 * retry counter), the only difference is what gets printed
1971 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1972 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1974 ccb->ccb_h.status = CAM_REQ_ABORTED;
1976 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1977 /* resid is ignored for this condition */
1979 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1981 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1982 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1984 * Since these are generally external (i.e. hopefully
1985 * transient transport-related) errors, retry these without
1986 * decrementing the retry count.
1988 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1989 mpssas_log_command(cm,
1990 "terminated ioc %x scsi %x state %x xfer %u\n",
1991 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1992 rep->TransferCount);
1994 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1995 case MPI2_IOCSTATUS_INTERNAL_ERROR:
1996 case MPI2_IOCSTATUS_INVALID_VPID:
1997 case MPI2_IOCSTATUS_INVALID_FIELD:
1998 case MPI2_IOCSTATUS_INVALID_STATE:
1999 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2000 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2001 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2002 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2003 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2005 mpssas_log_command(cm,
2006 "completed ioc %x scsi %x state %x xfer %u\n",
2007 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2008 rep->TransferCount);
2009 csio->resid = cm->cm_length;
2010 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2014 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2015 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2016 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2017 mps_dprint(sc, MPS_INFO, "Command completed, "
2018 "unfreezing SIM queue\n");
2021 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2022 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2023 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2026 mps_free_command(sc, cm);
2031 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2033 pMpi2SCSIIORequest_t pIO_req;
2034 struct mps_softc *sc = sassc->sc;
2036 uint32_t physLBA, stripe_offset, stripe_unit;
2037 uint32_t io_size, column;
2038 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2041 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2042 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2043 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2044 * bit different than the 10/16 CDBs, handle them separately.
2046 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2047 CDB = pIO_req->CDB.CDB32;
2050 * Handle 6 byte CDBs.
2052 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2053 (CDB[0] == WRITE_6))) {
2055 * Get the transfer size in blocks.
2057 io_size = (cm->cm_length >> sc->DD_block_exponent);
2060 * Get virtual LBA given in the CDB.
2062 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2063 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2066 * Check that LBA range for I/O does not exceed volume's
2069 if ((virtLBA + (uint64_t)io_size - 1) <=
2072 * Check if the I/O crosses a stripe boundary. If not,
2073 * translate the virtual LBA to a physical LBA and set
2074 * the DevHandle for the PhysDisk to be used. If it
2075 * does cross a boundry, do normal I/O. To get the
2076 * right DevHandle to use, get the map number for the
2077 * column, then use that map number to look up the
2078 * DevHandle of the PhysDisk.
2080 stripe_offset = (uint32_t)virtLBA &
2081 (sc->DD_stripe_size - 1);
2082 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2083 physLBA = (uint32_t)virtLBA >>
2084 sc->DD_stripe_exponent;
2085 stripe_unit = physLBA / sc->DD_num_phys_disks;
2086 column = physLBA % sc->DD_num_phys_disks;
2087 pIO_req->DevHandle =
2088 sc->DD_column_map[column].dev_handle;
2089 cm->cm_desc.SCSIIO.DevHandle =
2092 physLBA = (stripe_unit <<
2093 sc->DD_stripe_exponent) + stripe_offset;
2094 ptrLBA = &pIO_req->CDB.CDB32[1];
2095 physLBA_byte = (uint8_t)(physLBA >> 16);
2096 *ptrLBA = physLBA_byte;
2097 ptrLBA = &pIO_req->CDB.CDB32[2];
2098 physLBA_byte = (uint8_t)(physLBA >> 8);
2099 *ptrLBA = physLBA_byte;
2100 ptrLBA = &pIO_req->CDB.CDB32[3];
2101 physLBA_byte = (uint8_t)physLBA;
2102 *ptrLBA = physLBA_byte;
2105 * Set flag that Direct Drive I/O is
2108 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2115 * Handle 10 or 16 byte CDBs.
2117 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2118 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2119 (CDB[0] == WRITE_16))) {
2121 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2122 * are 0. If not, this is accessing beyond 2TB so handle it in
2123 * the else section. 10-byte CDB's are OK.
2125 if ((CDB[0] < READ_16) ||
2126 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2128 * Get the transfer size in blocks.
2130 io_size = (cm->cm_length >> sc->DD_block_exponent);
2133 * Get virtual LBA. Point to correct lower 4 bytes of
2134 * LBA in the CDB depending on command.
2136 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2137 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2138 ((uint64_t)CDB[lba_idx + 1] << 16) |
2139 ((uint64_t)CDB[lba_idx + 2] << 8) |
2140 (uint64_t)CDB[lba_idx + 3];
2143 * Check that LBA range for I/O does not exceed volume's
2146 if ((virtLBA + (uint64_t)io_size - 1) <=
2149 * Check if the I/O crosses a stripe boundary.
2150 * If not, translate the virtual LBA to a
2151 * physical LBA and set the DevHandle for the
2152 * PhysDisk to be used. If it does cross a
2153 * boundry, do normal I/O. To get the right
2154 * DevHandle to use, get the map number for the
2155 * column, then use that map number to look up
2156 * the DevHandle of the PhysDisk.
2158 stripe_offset = (uint32_t)virtLBA &
2159 (sc->DD_stripe_size - 1);
2160 if ((stripe_offset + io_size) <=
2161 sc->DD_stripe_size) {
2162 physLBA = (uint32_t)virtLBA >>
2163 sc->DD_stripe_exponent;
2164 stripe_unit = physLBA /
2165 sc->DD_num_phys_disks;
2167 sc->DD_num_phys_disks;
2168 pIO_req->DevHandle =
2169 sc->DD_column_map[column].
2171 cm->cm_desc.SCSIIO.DevHandle =
2174 physLBA = (stripe_unit <<
2175 sc->DD_stripe_exponent) +
2178 &pIO_req->CDB.CDB32[lba_idx];
2179 physLBA_byte = (uint8_t)(physLBA >> 24);
2180 *ptrLBA = physLBA_byte;
2182 &pIO_req->CDB.CDB32[lba_idx + 1];
2183 physLBA_byte = (uint8_t)(physLBA >> 16);
2184 *ptrLBA = physLBA_byte;
2186 &pIO_req->CDB.CDB32[lba_idx + 2];
2187 physLBA_byte = (uint8_t)(physLBA >> 8);
2188 *ptrLBA = physLBA_byte;
2190 &pIO_req->CDB.CDB32[lba_idx + 3];
2191 physLBA_byte = (uint8_t)physLBA;
2192 *ptrLBA = physLBA_byte;
2195 * Set flag that Direct Drive I/O is
2198 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2203 * 16-byte CDB and the upper 4 bytes of the CDB are not
2204 * 0. Get the transfer size in blocks.
2206 io_size = (cm->cm_length >> sc->DD_block_exponent);
2211 virtLBA = ((uint64_t)CDB[2] << 54) |
2212 ((uint64_t)CDB[3] << 48) |
2213 ((uint64_t)CDB[4] << 40) |
2214 ((uint64_t)CDB[5] << 32) |
2215 ((uint64_t)CDB[6] << 24) |
2216 ((uint64_t)CDB[7] << 16) |
2217 ((uint64_t)CDB[8] << 8) |
2221 * Check that LBA range for I/O does not exceed volume's
2224 if ((virtLBA + (uint64_t)io_size - 1) <=
2227 * Check if the I/O crosses a stripe boundary.
2228 * If not, translate the virtual LBA to a
2229 * physical LBA and set the DevHandle for the
2230 * PhysDisk to be used. If it does cross a
2231 * boundry, do normal I/O. To get the right
2232 * DevHandle to use, get the map number for the
2233 * column, then use that map number to look up
2234 * the DevHandle of the PhysDisk.
2236 stripe_offset = (uint32_t)virtLBA &
2237 (sc->DD_stripe_size - 1);
2238 if ((stripe_offset + io_size) <=
2239 sc->DD_stripe_size) {
2240 physLBA = (uint32_t)(virtLBA >>
2241 sc->DD_stripe_exponent);
2242 stripe_unit = physLBA /
2243 sc->DD_num_phys_disks;
2245 sc->DD_num_phys_disks;
2246 pIO_req->DevHandle =
2247 sc->DD_column_map[column].
2249 cm->cm_desc.SCSIIO.DevHandle =
2252 physLBA = (stripe_unit <<
2253 sc->DD_stripe_exponent) +
2257 * Set upper 4 bytes of LBA to 0. We
2258 * assume that the phys disks are less
2259 * than 2 TB's in size. Then, set the
2262 pIO_req->CDB.CDB32[2] = 0;
2263 pIO_req->CDB.CDB32[3] = 0;
2264 pIO_req->CDB.CDB32[4] = 0;
2265 pIO_req->CDB.CDB32[5] = 0;
2266 ptrLBA = &pIO_req->CDB.CDB32[6];
2267 physLBA_byte = (uint8_t)(physLBA >> 24);
2268 *ptrLBA = physLBA_byte;
2269 ptrLBA = &pIO_req->CDB.CDB32[7];
2270 physLBA_byte = (uint8_t)(physLBA >> 16);
2271 *ptrLBA = physLBA_byte;
2272 ptrLBA = &pIO_req->CDB.CDB32[8];
2273 physLBA_byte = (uint8_t)(physLBA >> 8);
2274 *ptrLBA = physLBA_byte;
2275 ptrLBA = &pIO_req->CDB.CDB32[9];
2276 physLBA_byte = (uint8_t)physLBA;
2277 *ptrLBA = physLBA_byte;
2280 * Set flag that Direct Drive I/O is
2283 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2290 #if __FreeBSD_version >= 900026
2292 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2294 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2295 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2299 ccb = cm->cm_complete_data;
2302 * Currently there should be no way we can hit this case. It only
2303 * happens when we have a failure to allocate chain frames, and SMP
2304 * commands require two S/G elements only. That should be handled
2305 * in the standard request size.
2307 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2308 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2309 __func__, cm->cm_flags);
2310 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2314 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2316 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2317 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2321 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2322 sasaddr = le32toh(req->SASAddress.Low);
2323 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2325 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2326 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2327 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2328 __func__, rpl->IOCStatus, rpl->SASStatus);
2329 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2333 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2334 "%#jx completed successfully\n", __func__,
2335 (uintmax_t)sasaddr);
2337 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2338 ccb->ccb_h.status = CAM_REQ_CMP;
2340 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2344 * We sync in both directions because we had DMAs in the S/G list
2345 * in both directions.
2347 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2348 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2349 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2350 mps_free_command(sc, cm);
2355 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2357 struct mps_command *cm;
2358 uint8_t *request, *response;
2359 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2360 struct mps_softc *sc;
2367 * XXX We don't yet support physical addresses here.
2369 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2370 mps_printf(sc, "%s: physical addresses not supported\n",
2372 ccb->ccb_h.status = CAM_REQ_INVALID;
2378 * If the user wants to send an S/G list, check to make sure they
2379 * have single buffers.
2381 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2383 * The chip does not support more than one buffer for the
2384 * request or response.
2386 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2387 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2388 mps_printf(sc, "%s: multiple request or response "
2389 "buffer segments not supported for SMP\n",
2391 ccb->ccb_h.status = CAM_REQ_INVALID;
2397 * The CAM_SCATTER_VALID flag was originally implemented
2398 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2399 * We have two. So, just take that flag to mean that we
2400 * might have S/G lists, and look at the S/G segment count
2401 * to figure out whether that is the case for each individual
2404 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2405 bus_dma_segment_t *req_sg;
2407 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2408 request = (uint8_t *)req_sg[0].ds_addr;
2410 request = ccb->smpio.smp_request;
2412 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2413 bus_dma_segment_t *rsp_sg;
2415 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2416 response = (uint8_t *)rsp_sg[0].ds_addr;
2418 response = ccb->smpio.smp_response;
2420 request = ccb->smpio.smp_request;
2421 response = ccb->smpio.smp_response;
2424 cm = mps_alloc_command(sc);
2426 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2427 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2432 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2433 bzero(req, sizeof(*req));
2434 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2436 /* Allow the chip to use any route to this SAS address. */
2437 req->PhysicalPort = 0xff;
2439 req->RequestDataLength = ccb->smpio.smp_request_len;
2441 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2443 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2444 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2446 mpi_init_sge(cm, req, &req->SGL);
2449 * Set up a uio to pass into mps_map_command(). This allows us to
2450 * do one map command, and one busdma call in there.
2452 cm->cm_uio.uio_iov = cm->cm_iovec;
2453 cm->cm_uio.uio_iovcnt = 2;
2454 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2457 * The read/write flag isn't used by busdma, but set it just in
2458 * case. This isn't exactly accurate, either, since we're going in
2461 cm->cm_uio.uio_rw = UIO_WRITE;
2463 cm->cm_iovec[0].iov_base = request;
2464 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2465 cm->cm_iovec[1].iov_base = response;
2466 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2468 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2469 cm->cm_iovec[1].iov_len;
2472 * Trigger a warning message in mps_data_cb() for the user if we
2473 * wind up exceeding two S/G segments. The chip expects one
2474 * segment for the request and another for the response.
2476 cm->cm_max_segs = 2;
2478 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2479 cm->cm_complete = mpssas_smpio_complete;
2480 cm->cm_complete_data = ccb;
2483 * Tell the mapping code that we're using a uio, and that this is
2484 * an SMP passthrough request. There is a little special-case
2485 * logic there (in mps_data_cb()) to handle the bidirectional
2488 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2489 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2491 /* The chip data format is little endian. */
2492 req->SASAddress.High = htole32(sasaddr >> 32);
2493 req->SASAddress.Low = htole32(sasaddr);
2496 * XXX Note that we don't have a timeout/abort mechanism here.
2497 * From the manual, it looks like task management requests only
2498 * work for SCSI IO and SATA passthrough requests. We may need to
2499 * have a mechanism to retry requests in the event of a chip reset
2500 * at least. Hopefully the chip will insure that any errors short
2501 * of that are relayed back to the driver.
2503 error = mps_map_command(sc, cm);
2504 if ((error != 0) && (error != EINPROGRESS)) {
2505 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2513 mps_free_command(sc, cm);
2514 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2521 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2523 struct mps_softc *sc;
2524 struct mpssas_target *targ;
2525 uint64_t sasaddr = 0;
2530 * Make sure the target exists.
2532 targ = &sassc->targets[ccb->ccb_h.target_id];
2533 if (targ->handle == 0x0) {
2534 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2535 ccb->ccb_h.target_id);
2536 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2542 * If this device has an embedded SMP target, we'll talk to it
2544 * figure out what the expander's address is.
2546 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2547 sasaddr = targ->sasaddr;
2550 * If we don't have a SAS address for the expander yet, try
2551 * grabbing it from the page 0x83 information cached in the
2552 * transport layer for this target. LSI expanders report the
2553 * expander SAS address as the port-associated SAS address in
2554 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2557 * XXX KDM disable this for now, but leave it commented out so that
2558 * it is obvious that this is another possible way to get the SAS
2561 * The parent handle method below is a little more reliable, and
2562 * the other benefit is that it works for devices other than SES
2563 * devices. So you can send a SMP request to a da(4) device and it
2564 * will get routed to the expander that device is attached to.
2565 * (Assuming the da(4) device doesn't contain an SMP target...)
2569 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2573 * If we still don't have a SAS address for the expander, look for
2574 * the parent device of this device, which is probably the expander.
2577 #ifdef OLD_MPS_PROBE
2578 struct mpssas_target *parent_target;
2581 if (targ->parent_handle == 0x0) {
2582 mps_printf(sc, "%s: handle %d does not have a valid "
2583 "parent handle!\n", __func__, targ->handle);
2584 ccb->ccb_h.status = CAM_REQ_INVALID;
2587 #ifdef OLD_MPS_PROBE
2588 parent_target = mpssas_find_target_by_handle(sassc, 0,
2589 targ->parent_handle);
2591 if (parent_target == NULL) {
2592 mps_printf(sc, "%s: handle %d does not have a valid "
2593 "parent target!\n", __func__, targ->handle);
2594 ccb->ccb_h.status = CAM_REQ_INVALID;
2598 if ((parent_target->devinfo &
2599 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2600 mps_printf(sc, "%s: handle %d parent %d does not "
2601 "have an SMP target!\n", __func__,
2602 targ->handle, parent_target->handle);
2603 ccb->ccb_h.status = CAM_REQ_INVALID;
2608 sasaddr = parent_target->sasaddr;
2609 #else /* OLD_MPS_PROBE */
2610 if ((targ->parent_devinfo &
2611 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2612 mps_printf(sc, "%s: handle %d parent %d does not "
2613 "have an SMP target!\n", __func__,
2614 targ->handle, targ->parent_handle);
2615 ccb->ccb_h.status = CAM_REQ_INVALID;
2619 if (targ->parent_sasaddr == 0x0) {
2620 mps_printf(sc, "%s: handle %d parent handle %d does "
2621 "not have a valid SAS address!\n",
2622 __func__, targ->handle, targ->parent_handle);
2623 ccb->ccb_h.status = CAM_REQ_INVALID;
2627 sasaddr = targ->parent_sasaddr;
2628 #endif /* OLD_MPS_PROBE */
2633 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2634 __func__, targ->handle);
2635 ccb->ccb_h.status = CAM_REQ_INVALID;
2638 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2646 #endif //__FreeBSD_version >= 900026
2649 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2651 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2652 struct mps_softc *sc;
2653 struct mps_command *tm;
2654 struct mpssas_target *targ;
2656 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2657 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2660 tm = mps_alloc_command(sc);
2662 mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2663 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2668 targ = &sassc->targets[ccb->ccb_h.target_id];
2669 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2670 req->DevHandle = targ->handle;
2671 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2672 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2674 /* SAS Hard Link Reset / SATA Link Reset */
2675 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2678 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2679 tm->cm_complete = mpssas_resetdev_complete;
2680 tm->cm_complete_data = ccb;
2681 mps_map_command(sc, tm);
2685 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2687 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2690 mps_dprint(sc, MPS_TRACE, __func__);
2691 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2693 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2694 ccb = tm->cm_complete_data;
2697 * Currently there should be no way we can hit this case. It only
2698 * happens when we have a failure to allocate chain frames, and
2699 * task management commands don't have S/G lists.
2701 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2702 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2704 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2706 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2707 "This should not happen!\n", __func__, tm->cm_flags,
2709 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2713 kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2714 resp->IOCStatus, resp->ResponseCode);
2716 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2717 ccb->ccb_h.status = CAM_REQ_CMP;
2718 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2722 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2726 mpssas_free_tm(sc, tm);
2731 mpssas_poll(struct cam_sim *sim)
2733 struct mpssas_softc *sassc;
2735 sassc = cam_sim_softc(sim);
2737 if (sassc->sc->mps_debug & MPS_TRACE) {
2738 /* frequent debug messages during a panic just slow
2739 * everything down too much.
2741 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2742 sassc->sc->mps_debug &= ~MPS_TRACE;
2745 mps_intr_locked(sassc->sc);
2749 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2751 struct mpssas_softc *sassc;
2754 if (done_ccb == NULL)
2757 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2759 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2761 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2762 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2764 xpt_free_path(done_ccb->ccb_h.path);
2765 xpt_free_ccb(done_ccb);
2767 #if __FreeBSD_version < 1000006
2769 * Before completing scan, get EEDP stuff for all of the existing
2772 mpssas_check_eedp(sassc);
2777 /* thread to handle bus rescans */
2779 mpssas_scanner_thread(void *arg)
2781 struct mpssas_softc *sassc;
2782 struct mps_softc *sc;
2785 sassc = (struct mpssas_softc *)arg;
2788 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2792 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2793 if (sassc->flags & MPSSAS_SHUTDOWN) {
2794 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2797 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2800 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2804 sassc->flags &= ~MPSSAS_SCANTHREAD;
2805 wakeup(&sassc->flags);
2807 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2812 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2816 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2818 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2823 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2824 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2826 /* Prepare request */
2827 ccb->ccb_h.ppriv_ptr1 = sassc;
2828 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2829 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2830 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2831 wakeup(&sassc->ccb_scanq);
2834 #if __FreeBSD_version >= 1000006
2836 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2839 struct mps_softc *sc;
2841 sc = (struct mps_softc *)callback_arg;
2844 case AC_ADVINFO_CHANGED: {
2845 struct mpssas_target *target;
2846 struct mpssas_softc *sassc;
2847 struct scsi_read_capacity_data_long rcap_buf;
2848 struct ccb_dev_advinfo cdai;
2849 struct mpssas_lun *lun;
2854 buftype = (uintptr_t)arg;
2860 * We're only interested in read capacity data changes.
2862 if (buftype != CDAI_TYPE_RCAPLONG)
2866 * We're only interested in devices that are attached to
2869 if (xpt_path_path_id(path) != sassc->sim->path_id)
2873 * We should have a handle for this, but check to make sure.
2875 target = &sassc->targets[xpt_path_target_id(path)];
2876 if (target->handle == 0)
2879 lunid = xpt_path_lun_id(path);
2881 SLIST_FOREACH(lun, &target->luns, lun_link) {
2882 if (lun->lun_id == lunid) {
2888 if (found_lun == 0) {
2889 lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2892 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2893 "LUN for EEDP support.\n");
2896 lun->lun_id = lunid;
2897 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2900 bzero(&rcap_buf, sizeof(rcap_buf));
2901 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2902 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2903 cdai.ccb_h.flags = CAM_DIR_IN;
2904 cdai.buftype = CDAI_TYPE_RCAPLONG;
2906 cdai.bufsiz = sizeof(rcap_buf);
2907 cdai.buf = (uint8_t *)&rcap_buf;
2908 xpt_action((union ccb *)&cdai);
2909 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2910 cam_release_devq(cdai.ccb_h.path,
2913 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2914 && (rcap_buf.prot & SRC16_PROT_EN)) {
2915 lun->eedp_formatted = TRUE;
2916 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2918 lun->eedp_formatted = FALSE;
2919 lun->eedp_block_size = 0;
2927 #else /* __FreeBSD_version >= 1000006 */
2930 mpssas_check_eedp(struct mpssas_softc *sassc)
2932 struct mps_softc *sc = sassc->sc;
2933 struct ccb_scsiio *csio;
2934 struct scsi_read_capacity_16 *scsi_cmd;
2935 struct scsi_read_capacity_eedp *rcap_buf;
2937 path_id_t pathid = cam_sim_path(sassc->sim);
2938 target_id_t targetid;
2940 struct cam_periph *found_periph;
2941 struct mpssas_target *target;
2942 struct mpssas_lun *lun;
2946 * Issue a READ CAPACITY 16 command to each LUN of each target. This
2947 * info is used to determine if the LUN is formatted for EEDP support.
2949 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2950 target = &sassc->targets[targetid];
2951 if (target->handle == 0x0) {
2958 kmalloc(sizeof(struct scsi_read_capacity_eedp),
2959 M_MPT2, M_NOWAIT | M_ZERO);
2960 if (rcap_buf == NULL) {
2961 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2962 "capacity buffer for EEDP support.\n");
2966 ccb = kmalloc(sizeof(union ccb), M_TEMP,
2969 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2970 pathid, targetid, lunid) != CAM_REQ_CMP) {
2971 mps_dprint(sc, MPS_FAULT, "Unable to create "
2972 "path for EEDP support\n");
2973 kfree(rcap_buf, M_MPT2);
2979 * If a periph is returned, the LUN exists. Create an
2980 * entry in the target's LUN list.
2982 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2985 * If LUN is already in list, don't create a new
2989 SLIST_FOREACH(lun, &target->luns, lun_link) {
2990 if (lun->lun_id == lunid) {
2996 lun = kmalloc(sizeof(struct mpssas_lun),
2997 M_MPT2, M_WAITOK | M_ZERO);
2998 lun->lun_id = lunid;
2999 SLIST_INSERT_HEAD(&target->luns, lun,
3005 * Issue a READ CAPACITY 16 command for the LUN.
3006 * The mpssas_read_cap_done function will load
3007 * the read cap info into the LUN struct.
3010 csio->ccb_h.func_code = XPT_SCSI_IO;
3011 csio->ccb_h.flags = CAM_DIR_IN;
3012 csio->ccb_h.retry_count = 4;
3013 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3014 csio->ccb_h.timeout = 60000;
3015 csio->data_ptr = (uint8_t *)rcap_buf;
3016 csio->dxfer_len = sizeof(struct
3017 scsi_read_capacity_eedp);
3018 csio->sense_len = MPS_SENSE_LEN;
3019 csio->cdb_len = sizeof(*scsi_cmd);
3020 csio->tag_action = MSG_SIMPLE_Q_TAG;
3022 scsi_cmd = (struct scsi_read_capacity_16 *)
3023 &csio->cdb_io.cdb_bytes;
3024 bzero(scsi_cmd, sizeof(*scsi_cmd));
3025 scsi_cmd->opcode = 0x9E;
3026 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3027 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3028 scsi_read_capacity_eedp);
3031 * Set the path, target and lun IDs for the READ
3034 ccb->ccb_h.path_id =
3035 xpt_path_path_id(ccb->ccb_h.path);
3036 ccb->ccb_h.target_id =
3037 xpt_path_target_id(ccb->ccb_h.path);
3038 ccb->ccb_h.target_lun =
3039 xpt_path_lun_id(ccb->ccb_h.path);
3041 ccb->ccb_h.ppriv_ptr1 = sassc;
3044 kfree(rcap_buf, M_MPT2);
3045 xpt_free_path(ccb->ccb_h.path);
3048 } while (found_periph);
3054 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3056 struct mpssas_softc *sassc;
3057 struct mpssas_target *target;
3058 struct mpssas_lun *lun;
3059 struct scsi_read_capacity_eedp *rcap_buf;
3061 if (done_ccb == NULL)
3064 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3067 * Get the LUN ID for the path and look it up in the LUN list for the
3070 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3071 target = &sassc->targets[done_ccb->ccb_h.target_id];
3072 SLIST_FOREACH(lun, &target->luns, lun_link) {
3073 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3077 * Got the LUN in the target's LUN list. Fill it in
3078 * with EEDP info. If the READ CAP 16 command had some
3079 * SCSI error (common if command is not supported), mark
3080 * the lun as not supporting EEDP and set the block size
3083 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3084 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3085 lun->eedp_formatted = FALSE;
3086 lun->eedp_block_size = 0;
3090 if (rcap_buf->protect & 0x01) {
3091 lun->eedp_formatted = TRUE;
3092 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3097 // Finished with this CCB and path.
3098 kfree(rcap_buf, M_MPT2);
3099 xpt_free_path(done_ccb->ccb_h.path);
3100 xpt_free_ccb(done_ccb);
3102 #endif /* __FreeBSD_version >= 1000006 */
3105 mpssas_startup(struct mps_softc *sc)
3107 struct mpssas_softc *sassc;
3110 * Send the port enable message and set the wait_for_port_enable flag.
3111 * This flag helps to keep the simq frozen until all discovery events
3115 mpssas_startup_increment(sassc);
3116 sc->wait_for_port_enable = 1;
3117 mpssas_send_portenable(sc);
3122 mpssas_send_portenable(struct mps_softc *sc)
3124 MPI2_PORT_ENABLE_REQUEST *request;
3125 struct mps_command *cm;
3127 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3129 if ((cm = mps_alloc_command(sc)) == NULL)
3131 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3132 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3133 request->MsgFlags = 0;
3135 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3136 cm->cm_complete = mpssas_portenable_complete;
3140 mps_map_command(sc, cm);
3141 mps_dprint(sc, MPS_TRACE,
3142 "mps_send_portenable finished cm %p req %p complete %p\n",
3143 cm, cm->cm_req, cm->cm_complete);
3148 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3150 MPI2_PORT_ENABLE_REPLY *reply;
3151 struct mpssas_softc *sassc;
3152 struct mpssas_target *target;
3155 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3159 * Currently there should be no way we can hit this case. It only
3160 * happens when we have a failure to allocate chain frames, and
3161 * port enable commands don't have S/G lists.
3163 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3164 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3165 "This should not happen!\n", __func__, cm->cm_flags);
3168 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3170 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3171 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3172 MPI2_IOCSTATUS_SUCCESS)
3173 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3175 mps_free_command(sc, cm);
3176 if (sc->mps_ich.ich_arg != NULL) {
3177 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3178 config_intrhook_disestablish(&sc->mps_ich);
3179 sc->mps_ich.ich_arg = NULL;
3183 * Get WarpDrive info after discovery is complete but before the scan
3184 * starts. At this point, all devices are ready to be exposed to the
3185 * OS. If devices should be hidden instead, take them out of the
3186 * 'targets' array before the scan. The devinfo for a disk will have
3187 * some info and a volume's will be 0. Use that to remove disks.
3189 mps_wd_config_pages(sc);
3190 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3191 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3192 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3193 MPS_WD_HIDE_IF_VOLUME))) {
3194 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3195 target = &sassc->targets[i];
3196 if (target->devinfo) {
3197 target->devinfo = 0x0;
3198 target->encl_handle = 0x0;
3199 target->encl_slot = 0x0;
3200 target->handle = 0x0;
3202 target->linkrate = 0x0;
3203 target->flags = 0x0;
3209 * Done waiting for port enable to complete. Decrement the refcount.
3210 * If refcount is 0, discovery is complete and a rescan of the bus can
3211 * take place. Since the simq was explicitly frozen before port
3212 * enable, it must be explicitly released here to keep the
3213 * freeze/release count in sync.
3215 sc->wait_for_port_enable = 0;
3216 sc->port_enable_complete = 1;
3217 mpssas_startup_decrement(sassc);
3218 xpt_release_simq(sassc->sim, 1);