2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
53 * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
56 /* Communications core for LSI MPT2 */
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
69 #include <sys/malloc.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
80 #include <machine/stdarg.h>
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
108 #define MPSSAS_DISCOVERY_TIMEOUT 20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
112 * static array to check SCSI OpCode for EEDP protection bits
114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 static void mpssas_log_command(struct mps_command *, const char *, ...)
141 #if 0 /* XXX unused */
142 static void mpssas_discovery_timeout(void *data);
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151 struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 struct cam_path *path, void *arg);
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176 struct mps_command *cm);
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
181 struct mpssas_target *target;
184 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 target = &sassc->targets[i];
186 if (target->handle == handle)
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194 * commands before device handles have been found by discovery. Since
195 * discovery involves reading config pages and possibly sending commands,
196 * discovery actions may continue even after we receive the end of discovery
197 * event, so refcount discovery actions instead of assuming we can unfreeze
198 * the simq when we get the event.
201 mpssas_startup_increment(struct mpssas_softc *sassc)
203 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 if (sassc->startup_refcount++ == 0) {
205 /* just starting, freeze the simq */
206 mps_dprint(sassc->sc, MPS_INFO,
207 "%s freezing simq\n", __func__);
208 xpt_freeze_simq(sassc->sim, 1);
210 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 sassc->startup_refcount);
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
218 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 if (--sassc->startup_refcount == 0) {
220 /* finished all discovery-related actions, release
221 * the simq and rescan for the latest topology.
223 mps_dprint(sassc->sc, MPS_INFO,
224 "%s releasing simq\n", __func__);
225 sassc->flags &= ~MPSSAS_IN_STARTUP;
226 xpt_release_simq(sassc->sim, 1);
227 mpssas_rescan_target(sassc->sc, NULL);
229 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 sassc->startup_refcount);
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235 * management, so refcount the TMs and keep the simq frozen when any are in
239 mpssas_alloc_tm(struct mps_softc *sc)
241 struct mps_command *tm;
243 tm = mps_alloc_high_priority_command(sc);
245 if (sc->sassc->tm_count++ == 0) {
246 mps_printf(sc, "%s freezing simq\n", __func__);
247 xpt_freeze_simq(sc->sassc->sim, 1);
249 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 sc->sassc->tm_count);
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
261 /* if there are no TMs in use, we can release the simq. We use our
262 * own refcount so that it's easier for a diag reset to cleanup and
265 if (--sc->sassc->tm_count == 0) {
266 mps_printf(sc, "%s releasing simq\n", __func__);
267 xpt_release_simq(sc->sassc->sim, 1);
269 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 sc->sassc->tm_count);
272 mps_free_high_priority_command(sc, tm);
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
279 struct mpssas_softc *sassc = sc->sassc;
281 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
295 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
296 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
302 /* XXX Hardwired to scan the bus for now */
303 ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 mpssas_rescan(sassc, ccb);
309 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
319 sbuf_new(&sb, str, sizeof(str), 0);
323 if (cm->cm_ccb != NULL) {
324 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
326 sbuf_cat(&sb, path_str);
327 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 scsi_command_string(&cm->cm_ccb->csio, &sb);
329 sbuf_printf(&sb, "length %d ",
330 cm->cm_ccb->csio.dxfer_len);
334 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 cam_sim_name(cm->cm_sc->sassc->sim),
336 cam_sim_unit(cm->cm_sc->sassc->sim),
337 cam_sim_bus(cm->cm_sc->sassc->sim),
338 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
342 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 sbuf_vprintf(&sb, fmt, ap);
345 kprintf("%s", sbuf_data(&sb));
351 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
353 struct mpssas_softc *sassc = sc->sassc;
354 path_id_t pathid = cam_sim_path(sassc->sim);
355 struct cam_path *path;
357 mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
358 if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
359 mps_printf(sc, "unable to create path for lost target %d\n",
364 xpt_async(AC_LOST_DEVICE, path, NULL);
369 * The MPT2 firmware performs debounce on the link to avoid transient link
370 * errors and false removals. When it does decide that link has been lost
371 * and a device need to go away, it expects that the host will perform a
372 * target reset and then an op remove. The reset has the side-effect of
373 * aborting any outstanding requests for the device, which is required for
374 * the op-remove to succeed. It's not clear if the host should check for
375 * the device coming back alive after the reset.
378 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
380 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
381 struct mps_softc *sc;
382 struct mps_command *cm;
383 struct mpssas_target *targ = NULL;
385 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
388 * If this is a WD controller, determine if the disk should be exposed
389 * to the OS or not. If disk should be exposed, return from this
390 * function without doing anything.
393 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
394 MPS_WD_EXPOSE_ALWAYS)) {
398 targ = mpssas_find_target_by_handle(sassc, 0, handle);
400 /* FIXME: what is the action? */
401 /* We don't know about this device? */
402 kprintf("%s: invalid handle 0x%x \n", __func__, handle);
406 targ->flags |= MPSSAS_TARGET_INREMOVAL;
408 cm = mpssas_alloc_tm(sc);
410 mps_printf(sc, "%s: command alloc failure\n", __func__);
414 mpssas_lost_target(sc, targ);
416 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
417 memset(req, 0, sizeof(*req));
418 req->DevHandle = targ->handle;
419 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
420 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
422 /* SAS Hard Link Reset / SATA Link Reset */
423 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
427 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
428 cm->cm_complete = mpssas_remove_device;
429 cm->cm_complete_data = (void *)(uintptr_t)handle;
430 mps_map_command(sc, cm);
434 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
436 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
437 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
438 struct mpssas_target *targ;
439 struct mps_command *next_cm;
442 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
444 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
445 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
449 * Currently there should be no way we can hit this case. It only
450 * happens when we have a failure to allocate chain frames, and
451 * task management commands don't have S/G lists.
453 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
454 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
455 "This should not happen!\n", __func__, tm->cm_flags,
457 mpssas_free_tm(sc, tm);
462 /* XXX retry the remove after the diag reset completes? */
463 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
465 mpssas_free_tm(sc, tm);
469 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
470 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
471 reply->IOCStatus, handle);
472 mpssas_free_tm(sc, tm);
476 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
477 reply->TerminationCount);
478 mps_free_reply(sc, tm->cm_reply_data);
479 tm->cm_reply = NULL; /* Ensures the the reply won't get re-freed */
481 /* Reuse the existing command */
482 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
483 memset(req, 0, sizeof(*req));
484 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
485 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
486 req->DevHandle = handle;
488 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
489 tm->cm_complete = mpssas_remove_complete;
490 tm->cm_complete_data = (void *)(uintptr_t)handle;
492 mps_map_command(sc, tm);
494 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
496 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
499 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
500 ccb = tm->cm_complete_data;
501 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
502 mpssas_scsiio_complete(sc, tm);
507 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
509 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
511 struct mpssas_target *targ;
513 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
515 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
516 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
519 * Currently there should be no way we can hit this case. It only
520 * happens when we have a failure to allocate chain frames, and
521 * task management commands don't have S/G lists.
523 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
524 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
525 "This should not happen!\n", __func__, tm->cm_flags,
527 mpssas_free_tm(sc, tm);
532 /* most likely a chip reset */
533 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
535 mpssas_free_tm(sc, tm);
539 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
540 handle, reply->IOCStatus);
543 * Don't clear target if remove fails because things will get confusing.
544 * Leave the devname and sasaddr intact so that we know to avoid reusing
545 * this target id if possible, and so we can assign the same target id
546 * to this device if it comes back in the future.
548 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
551 targ->encl_handle = 0x0;
552 targ->encl_slot = 0x0;
553 targ->exp_dev_handle = 0x0;
555 targ->linkrate = 0x0;
559 mpssas_free_tm(sc, tm);
563 mpssas_register_events(struct mps_softc *sc)
568 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
569 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
570 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
571 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
572 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
573 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
574 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
575 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
576 setbit(events, MPI2_EVENT_IR_VOLUME);
577 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
578 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
579 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
581 mps_register_events(sc, events, mpssas_evt_handler, NULL,
582 &sc->sassc->mpssas_eh);
588 mps_attach_sas(struct mps_softc *sc)
590 struct mpssas_softc *sassc;
591 #if __FreeBSD_version >= 1000006
596 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
598 sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
599 sassc->targets = kmalloc(sizeof(struct mpssas_target) *
600 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
604 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
605 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
610 unit = device_get_unit(sc->mps_dev);
611 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
612 unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
613 if (sassc->sim == NULL) {
614 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
619 TAILQ_INIT(&sassc->ev_queue);
621 /* Initialize taskqueue for Event Handling */
622 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
623 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
624 taskqueue_thread_enqueue, &sassc->ev_tq);
626 /* Run the task queue with lowest priority */
627 taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
628 device_get_nameunit(sc->mps_dev));
630 TAILQ_INIT(&sassc->ccb_scanq);
631 error = mps_kproc_create(mpssas_scanner_thread, sassc,
632 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
634 mps_printf(sc, "Error %d starting rescan thread\n", error);
639 sassc->flags |= MPSSAS_SCANTHREAD;
642 * XXX There should be a bus for every port on the adapter, but since
643 * we're just going to fake the topology for now, we'll pretend that
644 * everything is just a target on a single bus.
646 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
647 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
654 * Assume that discovery events will start right away. Freezing
655 * the simq will prevent the CAM boottime scanner from running
656 * before discovery is complete.
658 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
659 xpt_freeze_simq(sassc->sim, 1);
660 sc->sassc->startup_refcount = 0;
662 callout_init_mp(&sassc->discovery_callout);
663 sassc->discovery_timeouts = 0;
667 #if __FreeBSD_version >= 1000006
668 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
669 if (status != CAM_REQ_CMP) {
670 mps_printf(sc, "Error %#x registering async handler for "
671 "AC_ADVINFO_CHANGED events\n", status);
677 mpssas_register_events(sc);
685 mps_detach_sas(struct mps_softc *sc)
687 struct mpssas_softc *sassc;
689 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
691 if (sc->sassc == NULL)
695 mps_deregister_events(sc, sassc->mpssas_eh);
698 * Drain and free the event handling taskqueue with the lock
699 * unheld so that any parallel processing tasks drain properly
700 * without deadlocking.
702 if (sassc->ev_tq != NULL)
703 taskqueue_free(sassc->ev_tq);
705 /* Make sure CAM doesn't wedge if we had to bail out early. */
708 /* Deregister our async handler */
709 #if __FreeBSD_version >= 1000006
710 xpt_register_async(0, mpssas_async, sc, NULL);
713 if (sassc->flags & MPSSAS_IN_STARTUP)
714 xpt_release_simq(sassc->sim, 1);
716 if (sassc->sim != NULL) {
717 xpt_bus_deregister(cam_sim_path(sassc->sim));
718 cam_sim_free(sassc->sim);
721 if (sassc->flags & MPSSAS_SCANTHREAD) {
722 sassc->flags |= MPSSAS_SHUTDOWN;
723 wakeup(&sassc->ccb_scanq);
725 if (sassc->flags & MPSSAS_SCANTHREAD) {
726 lksleep(&sassc->flags, &sc->mps_lock, 0,
727 "mps_shutdown", 30 * hz);
732 if (sassc->devq != NULL)
733 cam_simq_release(sassc->devq);
735 kfree(sassc->targets, M_MPT2);
736 kfree(sassc, M_MPT2);
743 mpssas_discovery_end(struct mpssas_softc *sassc)
745 struct mps_softc *sc = sassc->sc;
747 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
749 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
750 callout_stop(&sassc->discovery_callout);
754 #if 0 /* XXX unused */
756 mpssas_discovery_timeout(void *data)
758 struct mpssas_softc *sassc = data;
759 struct mps_softc *sc;
762 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
766 "Timeout waiting for discovery, interrupts may not be working!\n");
767 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
769 /* Poll the hardware for events in case interrupts aren't working */
772 mps_printf(sassc->sc,
773 "Finished polling after discovery timeout at %d\n", ticks);
775 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
776 mpssas_discovery_end(sassc);
778 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
779 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
780 callout_reset(&sassc->discovery_callout,
781 MPSSAS_DISCOVERY_TIMEOUT * hz,
782 mpssas_discovery_timeout, sassc);
783 sassc->discovery_timeouts++;
785 mps_dprint(sassc->sc, MPS_FAULT,
786 "Discovery timed out, continuing.\n");
787 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
788 mpssas_discovery_end(sassc);
797 mpssas_action(struct cam_sim *sim, union ccb *ccb)
799 struct mpssas_softc *sassc;
801 sassc = cam_sim_softc(sim);
803 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
804 ccb->ccb_h.func_code);
805 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
807 switch (ccb->ccb_h.func_code) {
810 struct ccb_pathinq *cpi = &ccb->cpi;
812 cpi->version_num = 1;
813 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
814 cpi->target_sprt = 0;
815 cpi->hba_misc = PIM_NOBUSRESET;
816 cpi->hba_eng_cnt = 0;
817 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
819 cpi->initiator_id = 255;
820 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
821 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
822 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
823 cpi->unit_number = cam_sim_unit(sim);
824 cpi->bus_id = cam_sim_bus(sim);
825 cpi->base_transfer_speed = 150000;
826 cpi->transport = XPORT_SAS;
827 cpi->transport_version = 0;
828 cpi->protocol = PROTO_SCSI;
829 cpi->protocol_version = SCSI_REV_SPC;
830 #if __FreeBSD_version >= 800001
832 * XXX KDM where does this number come from?
834 cpi->maxio = 256 * 1024;
836 cpi->ccb_h.status = CAM_REQ_CMP;
839 case XPT_GET_TRAN_SETTINGS:
841 struct ccb_trans_settings *cts;
842 struct ccb_trans_settings_sas *sas;
843 struct ccb_trans_settings_scsi *scsi;
844 struct mpssas_target *targ;
847 sas = &cts->xport_specific.sas;
848 scsi = &cts->proto_specific.scsi;
850 targ = &sassc->targets[cts->ccb_h.target_id];
851 if (targ->handle == 0x0) {
852 cts->ccb_h.status = CAM_TID_INVALID;
856 cts->protocol_version = SCSI_REV_SPC2;
857 cts->transport = XPORT_SAS;
858 cts->transport_version = 0;
860 sas->valid = CTS_SAS_VALID_SPEED;
861 switch (targ->linkrate) {
863 sas->bitrate = 150000;
866 sas->bitrate = 300000;
869 sas->bitrate = 600000;
875 cts->protocol = PROTO_SCSI;
876 scsi->valid = CTS_SCSI_VALID_TQ;
877 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
879 cts->ccb_h.status = CAM_REQ_CMP;
882 case XPT_CALC_GEOMETRY:
883 cam_calc_geometry(&ccb->ccg, /*extended*/1);
884 ccb->ccb_h.status = CAM_REQ_CMP;
887 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
888 mpssas_action_resetdev(sassc, ccb);
893 mps_printf(sassc->sc, "mpssas_action faking success for "
895 ccb->ccb_h.status = CAM_REQ_CMP;
898 mpssas_action_scsiio(sassc, ccb);
900 #if __FreeBSD_version >= 900026
902 mpssas_action_smpio(sassc, ccb);
906 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
914 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
915 target_id_t target_id, lun_id_t lun_id)
917 path_id_t path_id = cam_sim_path(sc->sassc->sim);
918 struct cam_path *path;
920 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
921 ac_code, target_id, lun_id);
923 if (xpt_create_path(&path, NULL,
924 path_id, target_id, lun_id) != CAM_REQ_CMP) {
925 mps_printf(sc, "unable to create path for reset "
930 xpt_async(ac_code, path, NULL);
935 mpssas_complete_all_commands(struct mps_softc *sc)
937 struct mps_command *cm;
941 mps_printf(sc, "%s\n", __func__);
942 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
944 /* complete all commands with a NULL reply */
945 for (i = 1; i < sc->num_reqs; i++) {
946 cm = &sc->commands[i];
950 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
951 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
953 if (cm->cm_complete != NULL) {
954 mpssas_log_command(cm,
955 "completing cm %p state %x ccb %p for diag reset\n",
956 cm, cm->cm_state, cm->cm_ccb);
958 cm->cm_complete(sc, cm);
962 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
963 mpssas_log_command(cm,
964 "waking up cm %p state %x ccb %p for diag reset\n",
965 cm, cm->cm_state, cm->cm_ccb);
970 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
971 /* this should never happen, but if it does, log */
972 mpssas_log_command(cm,
973 "cm %p state %x flags 0x%x ccb %p during diag "
974 "reset\n", cm, cm->cm_state, cm->cm_flags,
981 mpssas_handle_reinit(struct mps_softc *sc)
985 /* Go back into startup mode and freeze the simq, so that CAM
986 * doesn't send any commands until after we've rediscovered all
987 * targets and found the proper device handles for them.
989 * After the reset, portenable will trigger discovery, and after all
990 * discovery-related activities have finished, the simq will be
993 mps_printf(sc, "%s startup\n", __func__);
994 sc->sassc->flags |= MPSSAS_IN_STARTUP;
995 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
996 xpt_freeze_simq(sc->sassc->sim, 1);
998 /* notify CAM of a bus reset */
999 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1002 /* complete and cleanup after all outstanding commands */
1003 mpssas_complete_all_commands(sc);
1005 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1006 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1009 * The simq was explicitly frozen above, so set the refcount to 0.
1010 * The simq will be explicitly released after port enable completes.
1012 sc->sassc->startup_refcount = 0;
1014 /* zero all the target handles, since they may change after the
1015 * reset, and we have to rediscover all the targets and use the new
1018 for (i = 0; i < sc->facts->MaxTargets; i++) {
1019 if (sc->sassc->targets[i].outstanding != 0)
1020 mps_printf(sc, "target %u outstanding %u\n",
1021 i, sc->sassc->targets[i].outstanding);
1022 sc->sassc->targets[i].handle = 0x0;
1023 sc->sassc->targets[i].exp_dev_handle = 0x0;
1024 sc->sassc->targets[i].outstanding = 0;
1025 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1029 mpssas_tm_timeout(void *data)
1031 struct mps_command *tm = data;
1032 struct mps_softc *sc = tm->cm_sc;
1035 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1041 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1043 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1044 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1045 unsigned int cm_count = 0;
1046 struct mps_command *cm;
1047 struct mpssas_target *targ;
1049 callout_stop(&tm->cm_callout);
1051 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1052 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1056 * Currently there should be no way we can hit this case. It only
1057 * happens when we have a failure to allocate chain frames, and
1058 * task management commands don't have S/G lists.
1060 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1061 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1062 "This should not happen!\n", __func__, tm->cm_flags);
1063 mpssas_free_tm(sc, tm);
1067 if (reply == NULL) {
1068 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1069 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1070 /* this completion was due to a reset, just cleanup */
1071 targ->flags &= ~MPSSAS_TARGET_INRESET;
1073 mpssas_free_tm(sc, tm);
1076 /* we should have gotten a reply. */
1082 mpssas_log_command(tm,
1083 "logical unit reset status 0x%x code 0x%x count %u\n",
1084 reply->IOCStatus, reply->ResponseCode,
1085 reply->TerminationCount);
1087 /* See if there are any outstanding commands for this LUN.
1088 * This could be made more efficient by using a per-LU data
1089 * structure of some sort.
1091 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1092 if (cm->cm_lun == tm->cm_lun)
1096 if (cm_count == 0) {
1097 mpssas_log_command(tm,
1098 "logical unit %u finished recovery after reset\n",
1101 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1104 /* we've finished recovery for this logical unit. check and
1105 * see if some other logical unit has a timedout command
1106 * that needs to be processed.
1108 cm = TAILQ_FIRST(&targ->timedout_commands);
1110 mpssas_send_abort(sc, tm, cm);
1114 mpssas_free_tm(sc, tm);
1118 /* if we still have commands for this LUN, the reset
1119 * effectively failed, regardless of the status reported.
1120 * Escalate to a target reset.
1122 mpssas_log_command(tm,
1123 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1125 mpssas_send_reset(sc, tm,
1126 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1131 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1133 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1134 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1135 struct mpssas_target *targ;
1137 callout_stop(&tm->cm_callout);
1139 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1140 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1144 * Currently there should be no way we can hit this case. It only
1145 * happens when we have a failure to allocate chain frames, and
1146 * task management commands don't have S/G lists.
1148 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1149 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1150 "This should not happen!\n", __func__, tm->cm_flags);
1151 mpssas_free_tm(sc, tm);
1155 if (reply == NULL) {
1156 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1157 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1158 /* this completion was due to a reset, just cleanup */
1159 targ->flags &= ~MPSSAS_TARGET_INRESET;
1161 mpssas_free_tm(sc, tm);
1164 /* we should have gotten a reply. */
1170 mpssas_log_command(tm,
1171 "target reset status 0x%x code 0x%x count %u\n",
1172 reply->IOCStatus, reply->ResponseCode,
1173 reply->TerminationCount);
1175 targ->flags &= ~MPSSAS_TARGET_INRESET;
1177 if (targ->outstanding == 0) {
1178 /* we've finished recovery for this target and all
1179 * of its logical units.
1181 mpssas_log_command(tm,
1182 "recovery finished after target reset\n");
1184 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1188 mpssas_free_tm(sc, tm);
1191 /* after a target reset, if this target still has
1192 * outstanding commands, the reset effectively failed,
1193 * regardless of the status reported. escalate.
1195 mpssas_log_command(tm,
1196 "target reset complete for tm %p, but still have %u command(s)\n",
1197 tm, targ->outstanding);
1202 #define MPS_RESET_TIMEOUT 30
1205 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1207 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1208 struct mpssas_target *target;
1211 target = tm->cm_targ;
1212 if (target->handle == 0) {
1213 mps_printf(sc, "%s null devhandle for target_id %d\n",
1214 __func__, target->tid);
1218 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1219 req->DevHandle = target->handle;
1220 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1221 req->TaskType = type;
1223 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1224 /* XXX Need to handle invalid LUNs */
1225 MPS_SET_LUN(req->LUN, tm->cm_lun);
1226 tm->cm_targ->logical_unit_resets++;
1227 mpssas_log_command(tm, "sending logical unit reset\n");
1228 tm->cm_complete = mpssas_logical_unit_reset_complete;
1230 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1231 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1232 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1233 tm->cm_targ->target_resets++;
1234 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1235 mpssas_log_command(tm, "sending target reset\n");
1236 tm->cm_complete = mpssas_target_reset_complete;
1239 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1244 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1245 tm->cm_complete_data = (void *)tm;
1247 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1248 mpssas_tm_timeout, tm);
1250 err = mps_map_command(sc, tm);
1252 mpssas_log_command(tm,
1253 "error %d sending reset type %u\n",
1261 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1263 struct mps_command *cm;
1264 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1265 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1266 struct mpssas_target *targ;
1268 callout_stop(&tm->cm_callout);
1270 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1271 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1275 * Currently there should be no way we can hit this case. It only
1276 * happens when we have a failure to allocate chain frames, and
1277 * task management commands don't have S/G lists.
1279 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1280 mpssas_log_command(tm,
1281 "cm_flags = %#x for abort %p TaskMID %u!\n",
1282 tm->cm_flags, tm, req->TaskMID);
1283 mpssas_free_tm(sc, tm);
1287 if (reply == NULL) {
1288 mpssas_log_command(tm,
1289 "NULL abort reply for tm %p TaskMID %u\n",
1291 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1292 /* this completion was due to a reset, just cleanup */
1294 mpssas_free_tm(sc, tm);
1297 /* we should have gotten a reply. */
1303 mpssas_log_command(tm,
1304 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1306 reply->IOCStatus, reply->ResponseCode,
1307 reply->TerminationCount);
1309 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1311 /* if there are no more timedout commands, we're done with
1312 * error recovery for this target.
1314 mpssas_log_command(tm,
1315 "finished recovery after aborting TaskMID %u\n",
1319 mpssas_free_tm(sc, tm);
1321 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1322 /* abort success, but we have more timedout commands to abort */
1323 mpssas_log_command(tm,
1324 "continuing recovery after aborting TaskMID %u\n",
1327 mpssas_send_abort(sc, tm, cm);
1330 /* we didn't get a command completion, so the abort
1331 * failed as far as we're concerned. escalate.
1333 mpssas_log_command(tm,
1334 "abort failed for TaskMID %u tm %p\n",
1337 mpssas_send_reset(sc, tm,
1338 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1342 #define MPS_ABORT_TIMEOUT 5
1345 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1347 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1348 struct mpssas_target *targ;
1352 if (targ->handle == 0) {
1353 mps_printf(sc, "%s null devhandle for target_id %d\n",
1354 __func__, cm->cm_ccb->ccb_h.target_id);
1358 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1359 req->DevHandle = targ->handle;
1360 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1361 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1363 /* XXX Need to handle invalid LUNs */
1364 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1366 req->TaskMID = cm->cm_desc.Default.SMID;
1369 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1370 tm->cm_complete = mpssas_abort_complete;
1371 tm->cm_complete_data = (void *)tm;
1372 tm->cm_targ = cm->cm_targ;
1373 tm->cm_lun = cm->cm_lun;
1375 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1376 mpssas_tm_timeout, tm);
1380 err = mps_map_command(sc, tm);
1382 mpssas_log_command(tm,
1383 "error %d sending abort for cm %p SMID %u\n",
1384 err, cm, req->TaskMID);
1390 mpssas_scsiio_timeout(void *data)
1392 struct mps_softc *sc;
1393 struct mps_command *cm;
1394 struct mpssas_target *targ;
1396 cm = (struct mps_command *)data;
1399 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1401 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1404 * Run the interrupt handler to make sure it's not pending. This
1405 * isn't perfect because the command could have already completed
1406 * and been re-used, though this is unlikely.
1408 mps_intr_locked(sc);
1409 if (cm->cm_state == MPS_CM_STATE_FREE) {
1410 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1414 if (cm->cm_ccb == NULL) {
1415 mps_printf(sc, "command timeout with NULL ccb\n");
1419 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1425 /* XXX first, check the firmware state, to see if it's still
1426 * operational. if not, do a diag reset.
1429 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1430 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1431 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1433 if (targ->tm != NULL) {
1434 /* target already in recovery, just queue up another
1435 * timedout command to be processed later.
1437 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1440 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1441 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1444 /* start recovery by aborting the first timedout command */
1445 mpssas_send_abort(sc, targ->tm, cm);
1448 /* XXX queue this target up for recovery once a TM becomes
1449 * available. The firmware only has a limited number of
1450 * HighPriority credits for the high priority requests used
1451 * for task management, and we ran out.
1453 * Isilon: don't worry about this for now, since we have
1454 * more credits than disks in an enclosure, and limit
1455 * ourselves to one TM per target for recovery.
1457 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1464 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1466 MPI2_SCSI_IO_REQUEST *req;
1467 struct ccb_scsiio *csio;
1468 struct mps_softc *sc;
1469 struct mpssas_target *targ;
1470 struct mpssas_lun *lun;
1471 struct mps_command *cm;
1472 uint8_t i, lba_byte, *ref_tag_addr;
1473 uint16_t eedp_flags;
1476 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1477 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1480 targ = &sassc->targets[csio->ccb_h.target_id];
1481 if (targ->handle == 0x0) {
1482 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1483 __func__, csio->ccb_h.target_id);
1484 csio->ccb_h.status = CAM_TID_INVALID;
1489 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1490 * that the volume has timed out. We want volumes to be enumerated
1491 * until they are deleted/removed, not just failed.
1493 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1494 if (targ->devinfo == 0)
1495 csio->ccb_h.status = CAM_REQ_CMP;
1497 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1502 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1503 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1504 csio->ccb_h.status = CAM_TID_INVALID;
1509 cm = mps_alloc_command(sc);
1511 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1512 xpt_freeze_simq(sassc->sim, 1);
1513 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1515 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1516 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1521 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1522 bzero(req, sizeof(*req));
1523 req->DevHandle = targ->handle;
1524 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1526 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1527 req->SenseBufferLength = MPS_SENSE_LEN;
1529 req->ChainOffset = 0;
1530 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1535 req->DataLength = csio->dxfer_len;
1536 req->BidirectionalDataLength = 0;
1537 req->IoFlags = csio->cdb_len;
1540 /* Note: BiDirectional transfers are not supported */
1541 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1543 req->Control = MPI2_SCSIIO_CONTROL_READ;
1544 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1547 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1548 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1552 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1557 * It looks like the hardware doesn't require an explicit tag
1558 * number for each transaction. SAM Task Management not supported
1561 switch (csio->tag_action) {
1562 case MSG_HEAD_OF_Q_TAG:
1563 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1565 case MSG_ORDERED_Q_TAG:
1566 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1569 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1571 case CAM_TAG_ACTION_NONE:
1572 case MSG_SIMPLE_Q_TAG:
1574 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1577 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1579 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1580 mps_free_command(sc, cm);
1581 ccb->ccb_h.status = CAM_LUN_INVALID;
1586 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1587 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1589 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1590 req->IoFlags = csio->cdb_len;
1593 * Check if EEDP is supported and enabled. If it is then check if the
1594 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1595 * is formatted for EEDP support. If all of this is true, set CDB up
1596 * for EEDP transfer.
1598 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1599 if (sc->eedp_enabled && eedp_flags) {
1600 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1601 if (lun->lun_id == csio->ccb_h.target_lun) {
1606 if ((lun != NULL) && (lun->eedp_formatted)) {
1607 req->EEDPBlockSize = lun->eedp_block_size;
1608 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1609 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1610 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1611 req->EEDPFlags = eedp_flags;
1614 * If CDB less than 32, fill in Primary Ref Tag with
1615 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1616 * already there. Also, set protection bit. FreeBSD
1617 * currently does not support CDBs bigger than 16, but
1618 * the code doesn't hurt, and will be here for the
1621 if (csio->cdb_len != 32) {
1622 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1623 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1624 PrimaryReferenceTag;
1625 for (i = 0; i < 4; i++) {
1627 req->CDB.CDB32[lba_byte + i];
1630 req->CDB.EEDP32.PrimaryApplicationTagMask =
1632 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1636 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1637 req->EEDPFlags = eedp_flags;
1638 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1644 cm->cm_data = csio->data_ptr;
1645 cm->cm_length = csio->dxfer_len;
1646 cm->cm_sge = &req->SGL;
1647 cm->cm_sglsize = (32 - 24) * 4;
1648 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1649 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1650 cm->cm_complete = mpssas_scsiio_complete;
1651 cm->cm_complete_data = ccb;
1653 cm->cm_lun = csio->ccb_h.target_lun;
1657 * If HBA is a WD and the command is not for a retry, try to build a
1658 * direct I/O message. If failed, or the command is for a retry, send
1659 * the I/O to the IR volume itself.
1661 if (sc->WD_valid_config) {
1662 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1663 mpssas_direct_drive_io(sassc, cm, ccb);
1665 ccb->ccb_h.status = CAM_REQ_INPROG;
1669 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1670 mpssas_scsiio_timeout, cm);
1673 targ->outstanding++;
1674 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1676 if ((sc->mps_debug & MPS_TRACE) != 0)
1677 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1678 __func__, cm, ccb, targ->outstanding);
1680 mps_map_command(sc, cm);
1685 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1687 MPI2_SCSI_IO_REPLY *rep;
1689 struct ccb_scsiio *csio;
1690 struct mpssas_softc *sassc;
1691 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1692 u8 *TLR_bits, TLR_on;
1696 mps_dprint(sc, MPS_TRACE,
1697 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1698 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1699 cm->cm_targ->outstanding);
1701 callout_stop(&cm->cm_callout);
1702 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1705 ccb = cm->cm_complete_data;
1707 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1709 * XXX KDM if the chain allocation fails, does it matter if we do
1710 * the sync and unload here? It is simpler to do it in every case,
1711 * assuming it doesn't cause problems.
1713 if (cm->cm_data != NULL) {
1714 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1715 dir = BUS_DMASYNC_POSTREAD;
1716 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1717 dir = BUS_DMASYNC_POSTWRITE;;
1718 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1719 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1722 cm->cm_targ->completed++;
1723 cm->cm_targ->outstanding--;
1724 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1726 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1727 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1728 if (cm->cm_reply != NULL)
1729 mpssas_log_command(cm,
1730 "completed timedout cm %p ccb %p during recovery "
1731 "ioc %x scsi %x state %x xfer %u\n",
1733 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1734 rep->TransferCount);
1736 mpssas_log_command(cm,
1737 "completed timedout cm %p ccb %p during recovery\n",
1739 } else if (cm->cm_targ->tm != NULL) {
1740 if (cm->cm_reply != NULL)
1741 mpssas_log_command(cm,
1742 "completed cm %p ccb %p during recovery "
1743 "ioc %x scsi %x state %x xfer %u\n",
1745 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1746 rep->TransferCount);
1748 mpssas_log_command(cm,
1749 "completed cm %p ccb %p during recovery\n",
1751 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1752 mpssas_log_command(cm,
1753 "reset completed cm %p ccb %p\n",
1757 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1759 * We ran into an error after we tried to map the command,
1760 * so we're getting a callback without queueing the command
1761 * to the hardware. So we set the status here, and it will
1762 * be retained below. We'll go through the "fast path",
1763 * because there can be no reply when we haven't actually
1764 * gone out to the hardware.
1766 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1769 * Currently the only error included in the mask is
1770 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1771 * chain frames. We need to freeze the queue until we get
1772 * a command that completed without this error, which will
1773 * hopefully have some chain frames attached that we can
1774 * use. If we wanted to get smarter about it, we would
1775 * only unfreeze the queue in this condition when we're
1776 * sure that we're getting some chain frames back. That's
1777 * probably unnecessary.
1779 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1780 xpt_freeze_simq(sassc->sim, 1);
1781 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1782 mps_dprint(sc, MPS_INFO, "Error sending command, "
1783 "freezing SIM queue\n");
1787 /* Take the fast path to completion */
1788 if (cm->cm_reply == NULL) {
1789 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1790 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1791 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1793 ccb->ccb_h.status = CAM_REQ_CMP;
1794 ccb->csio.scsi_status = SCSI_STATUS_OK;
1796 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1797 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1798 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1799 mps_dprint(sc, MPS_INFO,
1800 "Unfreezing SIM queue\n");
1805 * There are two scenarios where the status won't be
1806 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1807 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1809 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1811 * Freeze the dev queue so that commands are
1812 * executed in the correct order with after error
1815 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1816 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1818 mps_free_command(sc, cm);
1823 if (sc->mps_debug & MPS_TRACE)
1824 mpssas_log_command(cm,
1825 "ioc %x scsi %x state %x xfer %u\n",
1826 rep->IOCStatus, rep->SCSIStatus,
1827 rep->SCSIState, rep->TransferCount);
1830 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1831 * Volume if an error occurred (normal I/O retry). Use the original
1832 * CCB, but set a flag that this will be a retry so that it's sent to
1833 * the original volume. Free the command but reuse the CCB.
1835 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1836 mps_free_command(sc, cm);
1837 ccb->ccb_h.status = MPS_WD_RETRY;
1838 mpssas_action_scsiio(sassc, ccb);
1842 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1843 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1844 csio->resid = cm->cm_length - rep->TransferCount;
1846 case MPI2_IOCSTATUS_SUCCESS:
1847 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1849 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1850 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1851 mpssas_log_command(cm, "recovered error\n");
1853 /* Completion failed at the transport level. */
1854 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1855 MPI2_SCSI_STATE_TERMINATED)) {
1856 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1860 /* In a modern packetized environment, an autosense failure
1861 * implies that there's not much else that can be done to
1862 * recover the command.
1864 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1865 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1870 * CAM doesn't care about SAS Response Info data, but if this is
1871 * the state check if TLR should be done. If not, clear the
1872 * TLR_bits for the target.
1874 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1875 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1876 MPS_SCSI_RI_INVALID_FRAME)) {
1877 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1878 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1882 * Intentionally override the normal SCSI status reporting
1883 * for these two cases. These are likely to happen in a
1884 * multi-initiator environment, and we want to make sure that
1885 * CAM retries these commands rather than fail them.
1887 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1888 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1889 ccb->ccb_h.status = CAM_REQ_ABORTED;
1893 /* Handle normal status and sense */
1894 csio->scsi_status = rep->SCSIStatus;
1895 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1896 ccb->ccb_h.status = CAM_REQ_CMP;
1898 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1900 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1901 int sense_len, returned_sense_len;
1903 returned_sense_len = min(rep->SenseCount,
1904 sizeof(struct scsi_sense_data));
1905 if (returned_sense_len < ccb->csio.sense_len)
1906 ccb->csio.sense_resid = ccb->csio.sense_len -
1909 ccb->csio.sense_resid = 0;
1911 sense_len = min(returned_sense_len,
1912 ccb->csio.sense_len - ccb->csio.sense_resid);
1913 bzero(&ccb->csio.sense_data,
1914 sizeof(&ccb->csio.sense_data));
1915 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1916 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1920 * Check if this is an INQUIRY command. If it's a VPD inquiry,
1921 * and it's page code 0 (Supported Page List), and there is
1922 * inquiry data, and this is for a sequential access device, and
1923 * the device is an SSP target, and TLR is supported by the
1924 * controller, turn the TLR_bits value ON if page 0x90 is
1927 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1928 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1929 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1930 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1931 T_SEQUENTIAL) && (sc->control_TLR) &&
1932 (sc->mapping_table[csio->ccb_h.target_id].device_info &
1933 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1934 vpd_list = (struct scsi_vpd_supported_page_list *)
1936 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1938 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1939 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1940 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1941 csio->cdb_io.cdb_bytes[4];
1942 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1943 if (vpd_list->list[i] == 0x90) {
1950 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1951 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1953 * If devinfo is 0 this will be a volume. In that case don't
1954 * tell CAM that the volume is not there. We want volumes to
1955 * be enumerated until they are deleted/removed, not just
1958 if (cm->cm_targ->devinfo == 0)
1959 ccb->ccb_h.status = CAM_REQ_CMP;
1961 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1963 case MPI2_IOCSTATUS_INVALID_SGL:
1964 mps_print_scsiio_cmd(sc, cm);
1965 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1967 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1969 * This is one of the responses that comes back when an I/O
1970 * has been aborted. If it is because of a timeout that we
1971 * initiated, just set the status to CAM_CMD_TIMEOUT.
1972 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
1973 * command is the same (it gets retried, subject to the
1974 * retry counter), the only difference is what gets printed
1977 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1978 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1980 ccb->ccb_h.status = CAM_REQ_ABORTED;
1982 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1983 /* resid is ignored for this condition */
1985 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1987 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1988 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1990 * Since these are generally external (i.e. hopefully
1991 * transient transport-related) errors, retry these without
1992 * decrementing the retry count.
1994 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1995 mpssas_log_command(cm,
1996 "terminated ioc %x scsi %x state %x xfer %u\n",
1997 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1998 rep->TransferCount);
2000 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2001 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2002 case MPI2_IOCSTATUS_INVALID_VPID:
2003 case MPI2_IOCSTATUS_INVALID_FIELD:
2004 case MPI2_IOCSTATUS_INVALID_STATE:
2005 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2006 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2007 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2008 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2009 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2011 mpssas_log_command(cm,
2012 "completed ioc %x scsi %x state %x xfer %u\n",
2013 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2014 rep->TransferCount);
2015 csio->resid = cm->cm_length;
2016 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2020 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2021 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2022 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2023 mps_dprint(sc, MPS_INFO, "Command completed, "
2024 "unfreezing SIM queue\n");
2027 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2028 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2029 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2032 mps_free_command(sc, cm);
2037 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2039 pMpi2SCSIIORequest_t pIO_req;
2040 struct mps_softc *sc = sassc->sc;
2042 uint32_t physLBA, stripe_offset, stripe_unit;
2043 uint32_t io_size, column;
2044 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2047 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2048 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2049 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2050 * bit different than the 10/16 CDBs, handle them separately.
2052 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2053 CDB = pIO_req->CDB.CDB32;
2056 * Handle 6 byte CDBs.
2058 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2059 (CDB[0] == WRITE_6))) {
2061 * Get the transfer size in blocks.
2063 io_size = (cm->cm_length >> sc->DD_block_exponent);
2066 * Get virtual LBA given in the CDB.
2068 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2069 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2072 * Check that LBA range for I/O does not exceed volume's
2075 if ((virtLBA + (uint64_t)io_size - 1) <=
2078 * Check if the I/O crosses a stripe boundary. If not,
2079 * translate the virtual LBA to a physical LBA and set
2080 * the DevHandle for the PhysDisk to be used. If it
2081 * does cross a boundry, do normal I/O. To get the
2082 * right DevHandle to use, get the map number for the
2083 * column, then use that map number to look up the
2084 * DevHandle of the PhysDisk.
2086 stripe_offset = (uint32_t)virtLBA &
2087 (sc->DD_stripe_size - 1);
2088 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2089 physLBA = (uint32_t)virtLBA >>
2090 sc->DD_stripe_exponent;
2091 stripe_unit = physLBA / sc->DD_num_phys_disks;
2092 column = physLBA % sc->DD_num_phys_disks;
2093 pIO_req->DevHandle =
2094 sc->DD_column_map[column].dev_handle;
2095 cm->cm_desc.SCSIIO.DevHandle =
2098 physLBA = (stripe_unit <<
2099 sc->DD_stripe_exponent) + stripe_offset;
2100 ptrLBA = &pIO_req->CDB.CDB32[1];
2101 physLBA_byte = (uint8_t)(physLBA >> 16);
2102 *ptrLBA = physLBA_byte;
2103 ptrLBA = &pIO_req->CDB.CDB32[2];
2104 physLBA_byte = (uint8_t)(physLBA >> 8);
2105 *ptrLBA = physLBA_byte;
2106 ptrLBA = &pIO_req->CDB.CDB32[3];
2107 physLBA_byte = (uint8_t)physLBA;
2108 *ptrLBA = physLBA_byte;
2111 * Set flag that Direct Drive I/O is
2114 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2121 * Handle 10 or 16 byte CDBs.
2123 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2124 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2125 (CDB[0] == WRITE_16))) {
2127 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2128 * are 0. If not, this is accessing beyond 2TB so handle it in
2129 * the else section. 10-byte CDB's are OK.
2131 if ((CDB[0] < READ_16) ||
2132 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2134 * Get the transfer size in blocks.
2136 io_size = (cm->cm_length >> sc->DD_block_exponent);
2139 * Get virtual LBA. Point to correct lower 4 bytes of
2140 * LBA in the CDB depending on command.
2142 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2143 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2144 ((uint64_t)CDB[lba_idx + 1] << 16) |
2145 ((uint64_t)CDB[lba_idx + 2] << 8) |
2146 (uint64_t)CDB[lba_idx + 3];
2149 * Check that LBA range for I/O does not exceed volume's
2152 if ((virtLBA + (uint64_t)io_size - 1) <=
2155 * Check if the I/O crosses a stripe boundary.
2156 * If not, translate the virtual LBA to a
2157 * physical LBA and set the DevHandle for the
2158 * PhysDisk to be used. If it does cross a
2159 * boundry, do normal I/O. To get the right
2160 * DevHandle to use, get the map number for the
2161 * column, then use that map number to look up
2162 * the DevHandle of the PhysDisk.
2164 stripe_offset = (uint32_t)virtLBA &
2165 (sc->DD_stripe_size - 1);
2166 if ((stripe_offset + io_size) <=
2167 sc->DD_stripe_size) {
2168 physLBA = (uint32_t)virtLBA >>
2169 sc->DD_stripe_exponent;
2170 stripe_unit = physLBA /
2171 sc->DD_num_phys_disks;
2173 sc->DD_num_phys_disks;
2174 pIO_req->DevHandle =
2175 sc->DD_column_map[column].
2177 cm->cm_desc.SCSIIO.DevHandle =
2180 physLBA = (stripe_unit <<
2181 sc->DD_stripe_exponent) +
2184 &pIO_req->CDB.CDB32[lba_idx];
2185 physLBA_byte = (uint8_t)(physLBA >> 24);
2186 *ptrLBA = physLBA_byte;
2188 &pIO_req->CDB.CDB32[lba_idx + 1];
2189 physLBA_byte = (uint8_t)(physLBA >> 16);
2190 *ptrLBA = physLBA_byte;
2192 &pIO_req->CDB.CDB32[lba_idx + 2];
2193 physLBA_byte = (uint8_t)(physLBA >> 8);
2194 *ptrLBA = physLBA_byte;
2196 &pIO_req->CDB.CDB32[lba_idx + 3];
2197 physLBA_byte = (uint8_t)physLBA;
2198 *ptrLBA = physLBA_byte;
2201 * Set flag that Direct Drive I/O is
2204 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2209 * 16-byte CDB and the upper 4 bytes of the CDB are not
2210 * 0. Get the transfer size in blocks.
2212 io_size = (cm->cm_length >> sc->DD_block_exponent);
2217 virtLBA = ((uint64_t)CDB[2] << 54) |
2218 ((uint64_t)CDB[3] << 48) |
2219 ((uint64_t)CDB[4] << 40) |
2220 ((uint64_t)CDB[5] << 32) |
2221 ((uint64_t)CDB[6] << 24) |
2222 ((uint64_t)CDB[7] << 16) |
2223 ((uint64_t)CDB[8] << 8) |
2227 * Check that LBA range for I/O does not exceed volume's
2230 if ((virtLBA + (uint64_t)io_size - 1) <=
2233 * Check if the I/O crosses a stripe boundary.
2234 * If not, translate the virtual LBA to a
2235 * physical LBA and set the DevHandle for the
2236 * PhysDisk to be used. If it does cross a
2237 * boundry, do normal I/O. To get the right
2238 * DevHandle to use, get the map number for the
2239 * column, then use that map number to look up
2240 * the DevHandle of the PhysDisk.
2242 stripe_offset = (uint32_t)virtLBA &
2243 (sc->DD_stripe_size - 1);
2244 if ((stripe_offset + io_size) <=
2245 sc->DD_stripe_size) {
2246 physLBA = (uint32_t)(virtLBA >>
2247 sc->DD_stripe_exponent);
2248 stripe_unit = physLBA /
2249 sc->DD_num_phys_disks;
2251 sc->DD_num_phys_disks;
2252 pIO_req->DevHandle =
2253 sc->DD_column_map[column].
2255 cm->cm_desc.SCSIIO.DevHandle =
2258 physLBA = (stripe_unit <<
2259 sc->DD_stripe_exponent) +
2263 * Set upper 4 bytes of LBA to 0. We
2264 * assume that the phys disks are less
2265 * than 2 TB's in size. Then, set the
2268 pIO_req->CDB.CDB32[2] = 0;
2269 pIO_req->CDB.CDB32[3] = 0;
2270 pIO_req->CDB.CDB32[4] = 0;
2271 pIO_req->CDB.CDB32[5] = 0;
2272 ptrLBA = &pIO_req->CDB.CDB32[6];
2273 physLBA_byte = (uint8_t)(physLBA >> 24);
2274 *ptrLBA = physLBA_byte;
2275 ptrLBA = &pIO_req->CDB.CDB32[7];
2276 physLBA_byte = (uint8_t)(physLBA >> 16);
2277 *ptrLBA = physLBA_byte;
2278 ptrLBA = &pIO_req->CDB.CDB32[8];
2279 physLBA_byte = (uint8_t)(physLBA >> 8);
2280 *ptrLBA = physLBA_byte;
2281 ptrLBA = &pIO_req->CDB.CDB32[9];
2282 physLBA_byte = (uint8_t)physLBA;
2283 *ptrLBA = physLBA_byte;
2286 * Set flag that Direct Drive I/O is
2289 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2296 #if __FreeBSD_version >= 900026
2298 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2300 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2301 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2305 ccb = cm->cm_complete_data;
2308 * Currently there should be no way we can hit this case. It only
2309 * happens when we have a failure to allocate chain frames, and SMP
2310 * commands require two S/G elements only. That should be handled
2311 * in the standard request size.
2313 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2314 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2315 __func__, cm->cm_flags);
2316 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2320 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2322 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2323 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2327 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2328 sasaddr = le32toh(req->SASAddress.Low);
2329 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2331 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2332 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2333 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2334 __func__, rpl->IOCStatus, rpl->SASStatus);
2335 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2339 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2340 "%#jx completed successfully\n", __func__,
2341 (uintmax_t)sasaddr);
2343 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2344 ccb->ccb_h.status = CAM_REQ_CMP;
2346 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2350 * We sync in both directions because we had DMAs in the S/G list
2351 * in both directions.
2353 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2354 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2355 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2356 mps_free_command(sc, cm);
2361 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2363 struct mps_command *cm;
2364 uint8_t *request, *response;
2365 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2366 struct mps_softc *sc;
2375 * XXX We don't yet support physical addresses here.
2377 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2378 mps_printf(sc, "%s: physical addresses not supported\n",
2380 ccb->ccb_h.status = CAM_REQ_INVALID;
2386 * If the user wants to send an S/G list, check to make sure they
2387 * have single buffers.
2389 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2391 * The chip does not support more than one buffer for the
2392 * request or response.
2394 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2395 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2396 mps_printf(sc, "%s: multiple request or response "
2397 "buffer segments not supported for SMP\n",
2399 ccb->ccb_h.status = CAM_REQ_INVALID;
2405 * The CAM_SCATTER_VALID flag was originally implemented
2406 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2407 * We have two. So, just take that flag to mean that we
2408 * might have S/G lists, and look at the S/G segment count
2409 * to figure out whether that is the case for each individual
2412 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2413 bus_dma_segment_t *req_sg;
2415 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2416 request = (uint8_t *)req_sg[0].ds_addr;
2418 request = ccb->smpio.smp_request;
2420 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2421 bus_dma_segment_t *rsp_sg;
2423 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2424 response = (uint8_t *)rsp_sg[0].ds_addr;
2426 response = ccb->smpio.smp_response;
2428 request = ccb->smpio.smp_request;
2429 response = ccb->smpio.smp_response;
2432 cm = mps_alloc_command(sc);
2434 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2435 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2440 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2441 bzero(req, sizeof(*req));
2442 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2444 /* Allow the chip to use any route to this SAS address. */
2445 req->PhysicalPort = 0xff;
2447 req->RequestDataLength = ccb->smpio.smp_request_len;
2449 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2451 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2452 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2454 mpi_init_sge(cm, req, &req->SGL);
2457 * Set up a uio to pass into mps_map_command(). This allows us to
2458 * do one map command, and one busdma call in there.
2460 cm->cm_uio.uio_iov = cm->cm_iovec;
2461 cm->cm_uio.uio_iovcnt = 2;
2462 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2465 * The read/write flag isn't used by busdma, but set it just in
2466 * case. This isn't exactly accurate, either, since we're going in
2469 cm->cm_uio.uio_rw = UIO_WRITE;
2471 cm->cm_iovec[0].iov_base = request;
2472 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2473 cm->cm_iovec[1].iov_base = response;
2474 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2476 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2477 cm->cm_iovec[1].iov_len;
2480 * Trigger a warning message in mps_data_cb() for the user if we
2481 * wind up exceeding two S/G segments. The chip expects one
2482 * segment for the request and another for the response.
2484 cm->cm_max_segs = 2;
2486 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2487 cm->cm_complete = mpssas_smpio_complete;
2488 cm->cm_complete_data = ccb;
2491 * Tell the mapping code that we're using a uio, and that this is
2492 * an SMP passthrough request. There is a little special-case
2493 * logic there (in mps_data_cb()) to handle the bidirectional
2496 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2497 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2499 /* The chip data format is little endian. */
2500 req->SASAddress.High = htole32(sasaddr >> 32);
2501 req->SASAddress.Low = htole32(sasaddr);
2504 * XXX Note that we don't have a timeout/abort mechanism here.
2505 * From the manual, it looks like task management requests only
2506 * work for SCSI IO and SATA passthrough requests. We may need to
2507 * have a mechanism to retry requests in the event of a chip reset
2508 * at least. Hopefully the chip will insure that any errors short
2509 * of that are relayed back to the driver.
2511 error = mps_map_command(sc, cm);
2512 if ((error != 0) && (error != EINPROGRESS)) {
2513 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2521 mps_free_command(sc, cm);
2522 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2529 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2531 struct mps_softc *sc;
2532 struct mpssas_target *targ;
2533 uint64_t sasaddr = 0;
2538 * Make sure the target exists.
2540 targ = &sassc->targets[ccb->ccb_h.target_id];
2541 if (targ->handle == 0x0) {
2542 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2543 ccb->ccb_h.target_id);
2544 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2550 * If this device has an embedded SMP target, we'll talk to it
2552 * figure out what the expander's address is.
2554 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2555 sasaddr = targ->sasaddr;
2558 * If we don't have a SAS address for the expander yet, try
2559 * grabbing it from the page 0x83 information cached in the
2560 * transport layer for this target. LSI expanders report the
2561 * expander SAS address as the port-associated SAS address in
2562 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2565 * XXX KDM disable this for now, but leave it commented out so that
2566 * it is obvious that this is another possible way to get the SAS
2569 * The parent handle method below is a little more reliable, and
2570 * the other benefit is that it works for devices other than SES
2571 * devices. So you can send a SMP request to a da(4) device and it
2572 * will get routed to the expander that device is attached to.
2573 * (Assuming the da(4) device doesn't contain an SMP target...)
2577 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2581 * If we still don't have a SAS address for the expander, look for
2582 * the parent device of this device, which is probably the expander.
2585 #ifdef OLD_MPS_PROBE
2586 struct mpssas_target *parent_target;
2589 if (targ->parent_handle == 0x0) {
2590 mps_printf(sc, "%s: handle %d does not have a valid "
2591 "parent handle!\n", __func__, targ->handle);
2592 ccb->ccb_h.status = CAM_REQ_INVALID;
2595 #ifdef OLD_MPS_PROBE
2596 parent_target = mpssas_find_target_by_handle(sassc, 0,
2597 targ->parent_handle);
2599 if (parent_target == NULL) {
2600 mps_printf(sc, "%s: handle %d does not have a valid "
2601 "parent target!\n", __func__, targ->handle);
2602 ccb->ccb_h.status = CAM_REQ_INVALID;
2606 if ((parent_target->devinfo &
2607 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2608 mps_printf(sc, "%s: handle %d parent %d does not "
2609 "have an SMP target!\n", __func__,
2610 targ->handle, parent_target->handle);
2611 ccb->ccb_h.status = CAM_REQ_INVALID;
2616 sasaddr = parent_target->sasaddr;
2617 #else /* OLD_MPS_PROBE */
2618 if ((targ->parent_devinfo &
2619 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2620 mps_printf(sc, "%s: handle %d parent %d does not "
2621 "have an SMP target!\n", __func__,
2622 targ->handle, targ->parent_handle);
2623 ccb->ccb_h.status = CAM_REQ_INVALID;
2627 if (targ->parent_sasaddr == 0x0) {
2628 mps_printf(sc, "%s: handle %d parent handle %d does "
2629 "not have a valid SAS address!\n",
2630 __func__, targ->handle, targ->parent_handle);
2631 ccb->ccb_h.status = CAM_REQ_INVALID;
2635 sasaddr = targ->parent_sasaddr;
2636 #endif /* OLD_MPS_PROBE */
2641 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2642 __func__, targ->handle);
2643 ccb->ccb_h.status = CAM_REQ_INVALID;
2646 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2654 #endif //__FreeBSD_version >= 900026
2657 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2659 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2660 struct mps_softc *sc;
2661 struct mps_command *tm;
2662 struct mpssas_target *targ;
2664 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2665 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2668 tm = mps_alloc_command(sc);
2670 mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2671 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2676 targ = &sassc->targets[ccb->ccb_h.target_id];
2677 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2678 req->DevHandle = targ->handle;
2679 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2680 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2682 /* SAS Hard Link Reset / SATA Link Reset */
2683 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2686 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2687 tm->cm_complete = mpssas_resetdev_complete;
2688 tm->cm_complete_data = ccb;
2689 mps_map_command(sc, tm);
2693 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2695 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2698 mps_dprint(sc, MPS_TRACE, __func__);
2699 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2701 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2702 ccb = tm->cm_complete_data;
2705 * Currently there should be no way we can hit this case. It only
2706 * happens when we have a failure to allocate chain frames, and
2707 * task management commands don't have S/G lists.
2709 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2710 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2712 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2714 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2715 "This should not happen!\n", __func__, tm->cm_flags,
2717 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2721 kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2722 resp->IOCStatus, resp->ResponseCode);
2724 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2725 ccb->ccb_h.status = CAM_REQ_CMP;
2726 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2730 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2734 mpssas_free_tm(sc, tm);
2739 mpssas_poll(struct cam_sim *sim)
2741 struct mpssas_softc *sassc;
2743 sassc = cam_sim_softc(sim);
2745 if (sassc->sc->mps_debug & MPS_TRACE) {
2746 /* frequent debug messages during a panic just slow
2747 * everything down too much.
2749 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2750 sassc->sc->mps_debug &= ~MPS_TRACE;
2753 mps_intr_locked(sassc->sc);
2757 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2759 struct mpssas_softc *sassc;
2762 if (done_ccb == NULL)
2765 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2767 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2769 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2770 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2772 xpt_free_path(done_ccb->ccb_h.path);
2773 xpt_free_ccb(done_ccb);
2775 #if __FreeBSD_version < 1000006
2777 * Before completing scan, get EEDP stuff for all of the existing
2780 mpssas_check_eedp(sassc);
2785 /* thread to handle bus rescans */
2787 mpssas_scanner_thread(void *arg)
2789 struct mpssas_softc *sassc;
2790 struct mps_softc *sc;
2793 sassc = (struct mpssas_softc *)arg;
2796 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2800 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2801 if (sassc->flags & MPSSAS_SHUTDOWN) {
2802 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2805 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2808 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2812 sassc->flags &= ~MPSSAS_SCANTHREAD;
2813 wakeup(&sassc->flags);
2815 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2820 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2824 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2826 KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2831 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2832 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2834 /* Prepare request */
2835 ccb->ccb_h.ppriv_ptr1 = sassc;
2836 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2837 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2838 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2839 wakeup(&sassc->ccb_scanq);
2842 #if __FreeBSD_version >= 1000006
2844 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2847 struct mps_softc *sc;
2849 sc = (struct mps_softc *)callback_arg;
2852 case AC_ADVINFO_CHANGED: {
2853 struct mpssas_target *target;
2854 struct mpssas_softc *sassc;
2855 struct scsi_read_capacity_data_long rcap_buf;
2856 struct ccb_dev_advinfo cdai;
2857 struct mpssas_lun *lun;
2862 buftype = (uintptr_t)arg;
2868 * We're only interested in read capacity data changes.
2870 if (buftype != CDAI_TYPE_RCAPLONG)
2874 * We're only interested in devices that are attached to
2877 if (xpt_path_path_id(path) != sassc->sim->path_id)
2881 * We should have a handle for this, but check to make sure.
2883 target = &sassc->targets[xpt_path_target_id(path)];
2884 if (target->handle == 0)
2887 lunid = xpt_path_lun_id(path);
2889 SLIST_FOREACH(lun, &target->luns, lun_link) {
2890 if (lun->lun_id == lunid) {
2896 if (found_lun == 0) {
2897 lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2900 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2901 "LUN for EEDP support.\n");
2904 lun->lun_id = lunid;
2905 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2908 bzero(&rcap_buf, sizeof(rcap_buf));
2909 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2910 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2911 cdai.ccb_h.flags = CAM_DIR_IN;
2912 cdai.buftype = CDAI_TYPE_RCAPLONG;
2914 cdai.bufsiz = sizeof(rcap_buf);
2915 cdai.buf = (uint8_t *)&rcap_buf;
2916 xpt_action((union ccb *)&cdai);
2917 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2918 cam_release_devq(cdai.ccb_h.path,
2921 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2922 && (rcap_buf.prot & SRC16_PROT_EN)) {
2923 lun->eedp_formatted = TRUE;
2924 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2926 lun->eedp_formatted = FALSE;
2927 lun->eedp_block_size = 0;
2935 #else /* __FreeBSD_version >= 1000006 */
2938 mpssas_check_eedp(struct mpssas_softc *sassc)
2940 struct mps_softc *sc = sassc->sc;
2941 struct ccb_scsiio *csio;
2942 struct scsi_read_capacity_16 *scsi_cmd;
2943 struct scsi_read_capacity_eedp *rcap_buf;
2945 path_id_t pathid = cam_sim_path(sassc->sim);
2946 target_id_t targetid;
2948 struct cam_periph *found_periph;
2949 struct mpssas_target *target;
2950 struct mpssas_lun *lun;
2954 * Issue a READ CAPACITY 16 command to each LUN of each target. This
2955 * info is used to determine if the LUN is formatted for EEDP support.
2957 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2958 target = &sassc->targets[targetid];
2959 if (target->handle == 0x0) {
2966 kmalloc(sizeof(struct scsi_read_capacity_eedp),
2967 M_MPT2, M_NOWAIT | M_ZERO);
2968 if (rcap_buf == NULL) {
2969 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2970 "capacity buffer for EEDP support.\n");
2974 ccb = kmalloc(sizeof(union ccb), M_TEMP,
2977 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2978 pathid, targetid, lunid) != CAM_REQ_CMP) {
2979 mps_dprint(sc, MPS_FAULT, "Unable to create "
2980 "path for EEDP support\n");
2981 kfree(rcap_buf, M_MPT2);
2987 * If a periph is returned, the LUN exists. Create an
2988 * entry in the target's LUN list.
2990 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2993 * If LUN is already in list, don't create a new
2997 SLIST_FOREACH(lun, &target->luns, lun_link) {
2998 if (lun->lun_id == lunid) {
3004 lun = kmalloc(sizeof(struct mpssas_lun),
3005 M_MPT2, M_WAITOK | M_ZERO);
3007 mps_dprint(sc, MPS_FAULT,
3008 "Unable to alloc LUN for "
3010 kfree(rcap_buf, M_MPT2);
3011 xpt_free_path(ccb->ccb_h.path);
3015 lun->lun_id = lunid;
3016 SLIST_INSERT_HEAD(&target->luns, lun,
3022 * Issue a READ CAPACITY 16 command for the LUN.
3023 * The mpssas_read_cap_done function will load
3024 * the read cap info into the LUN struct.
3027 csio->ccb_h.func_code = XPT_SCSI_IO;
3028 csio->ccb_h.flags = CAM_DIR_IN;
3029 csio->ccb_h.retry_count = 4;
3030 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3031 csio->ccb_h.timeout = 60000;
3032 csio->data_ptr = (uint8_t *)rcap_buf;
3033 csio->dxfer_len = sizeof(struct
3034 scsi_read_capacity_eedp);
3035 csio->sense_len = MPS_SENSE_LEN;
3036 csio->cdb_len = sizeof(*scsi_cmd);
3037 csio->tag_action = MSG_SIMPLE_Q_TAG;
3039 scsi_cmd = (struct scsi_read_capacity_16 *)
3040 &csio->cdb_io.cdb_bytes;
3041 bzero(scsi_cmd, sizeof(*scsi_cmd));
3042 scsi_cmd->opcode = 0x9E;
3043 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3044 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3045 scsi_read_capacity_eedp);
3048 * Set the path, target and lun IDs for the READ
3051 ccb->ccb_h.path_id =
3052 xpt_path_path_id(ccb->ccb_h.path);
3053 ccb->ccb_h.target_id =
3054 xpt_path_target_id(ccb->ccb_h.path);
3055 ccb->ccb_h.target_lun =
3056 xpt_path_lun_id(ccb->ccb_h.path);
3058 ccb->ccb_h.ppriv_ptr1 = sassc;
3061 kfree(rcap_buf, M_MPT2);
3062 xpt_free_path(ccb->ccb_h.path);
3065 } while (found_periph);
3071 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3073 struct mpssas_softc *sassc;
3074 struct mpssas_target *target;
3075 struct mpssas_lun *lun;
3076 struct scsi_read_capacity_eedp *rcap_buf;
3078 if (done_ccb == NULL)
3081 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3084 * Get the LUN ID for the path and look it up in the LUN list for the
3087 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3088 target = &sassc->targets[done_ccb->ccb_h.target_id];
3089 SLIST_FOREACH(lun, &target->luns, lun_link) {
3090 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3094 * Got the LUN in the target's LUN list. Fill it in
3095 * with EEDP info. If the READ CAP 16 command had some
3096 * SCSI error (common if command is not supported), mark
3097 * the lun as not supporting EEDP and set the block size
3100 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3101 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3102 lun->eedp_formatted = FALSE;
3103 lun->eedp_block_size = 0;
3107 if (rcap_buf->protect & 0x01) {
3108 lun->eedp_formatted = TRUE;
3109 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3114 // Finished with this CCB and path.
3115 kfree(rcap_buf, M_MPT2);
3116 xpt_free_path(done_ccb->ccb_h.path);
3117 xpt_free_ccb(done_ccb);
3119 #endif /* __FreeBSD_version >= 1000006 */
3122 mpssas_startup(struct mps_softc *sc)
3124 struct mpssas_softc *sassc;
3127 * Send the port enable message and set the wait_for_port_enable flag.
3128 * This flag helps to keep the simq frozen until all discovery events
3132 mpssas_startup_increment(sassc);
3133 sc->wait_for_port_enable = 1;
3134 mpssas_send_portenable(sc);
3139 mpssas_send_portenable(struct mps_softc *sc)
3141 MPI2_PORT_ENABLE_REQUEST *request;
3142 struct mps_command *cm;
3144 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3146 if ((cm = mps_alloc_command(sc)) == NULL)
3148 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3149 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3150 request->MsgFlags = 0;
3152 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3153 cm->cm_complete = mpssas_portenable_complete;
3157 mps_map_command(sc, cm);
3158 mps_dprint(sc, MPS_TRACE,
3159 "mps_send_portenable finished cm %p req %p complete %p\n",
3160 cm, cm->cm_req, cm->cm_complete);
3165 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3167 MPI2_PORT_ENABLE_REPLY *reply;
3168 struct mpssas_softc *sassc;
3169 struct mpssas_target *target;
3172 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3176 * Currently there should be no way we can hit this case. It only
3177 * happens when we have a failure to allocate chain frames, and
3178 * port enable commands don't have S/G lists.
3180 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3181 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3182 "This should not happen!\n", __func__, cm->cm_flags);
3185 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3187 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3188 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3189 MPI2_IOCSTATUS_SUCCESS)
3190 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3192 mps_free_command(sc, cm);
3193 if (sc->mps_ich.ich_arg != NULL) {
3194 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3195 config_intrhook_disestablish(&sc->mps_ich);
3196 sc->mps_ich.ich_arg = NULL;
3200 * Get WarpDrive info after discovery is complete but before the scan
3201 * starts. At this point, all devices are ready to be exposed to the
3202 * OS. If devices should be hidden instead, take them out of the
3203 * 'targets' array before the scan. The devinfo for a disk will have
3204 * some info and a volume's will be 0. Use that to remove disks.
3206 mps_wd_config_pages(sc);
3207 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3208 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3209 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3210 MPS_WD_HIDE_IF_VOLUME))) {
3211 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3212 target = &sassc->targets[i];
3213 if (target->devinfo) {
3214 target->devinfo = 0x0;
3215 target->encl_handle = 0x0;
3216 target->encl_slot = 0x0;
3217 target->handle = 0x0;
3219 target->linkrate = 0x0;
3220 target->flags = 0x0;
3226 * Done waiting for port enable to complete. Decrement the refcount.
3227 * If refcount is 0, discovery is complete and a rescan of the bus can
3228 * take place. Since the simq was explicitly frozen before port
3229 * enable, it must be explicitly released here to keep the
3230 * freeze/release count in sync.
3232 sc->wait_for_port_enable = 0;
3233 sc->port_enable_complete = 1;
3234 mpssas_startup_decrement(sassc);
3235 xpt_release_simq(sassc->sim, 1);