2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $
99 #include <dev/disk/mpt/mpt.h>
100 #include <dev/disk/mpt/mpt_cam.h>
101 #include <dev/disk/mpt/mpt_raid.h>
103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/disk/mpt/mpilib/mpi_init.h"
105 #include "dev/disk/mpt/mpilib/mpi_targ.h"
106 #include "dev/disk/mpt/mpilib/mpi_fc.h"
107 #include "dev/disk/mpt/mpilib/mpi_sas.h"
108 #include <sys/callout.h>
109 #include <sys/kthread.h>
110 #include <sys/sysctl.h>
112 static void mpt_poll(struct cam_sim *);
113 static timeout_t mpt_timeout;
114 static void mpt_action(struct cam_sim *, union ccb *);
116 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
117 static void mpt_setwidth(struct mpt_softc *, int, int);
118 static void mpt_setsync(struct mpt_softc *, int, int, int);
119 static int mpt_update_spi_config(struct mpt_softc *, int);
121 static mpt_reply_handler_t mpt_scsi_reply_handler;
122 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
123 static mpt_reply_handler_t mpt_fc_els_reply_handler;
124 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
125 MSG_DEFAULT_REPLY *);
126 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
127 static int mpt_fc_reset_link(struct mpt_softc *, int);
129 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
130 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_recovery_thread(void *arg);
132 static void mpt_recover_commands(struct mpt_softc *mpt);
134 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
135 u_int, u_int, u_int, int);
137 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
138 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
139 static int mpt_add_els_buffers(struct mpt_softc *mpt);
140 static int mpt_add_target_commands(struct mpt_softc *mpt);
141 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
142 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
144 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
145 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
146 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
147 uint8_t, uint8_t const *);
149 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
150 tgt_resource_t *, int);
151 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
152 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
153 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
154 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
156 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
157 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
161 static mpt_probe_handler_t mpt_cam_probe;
162 static mpt_attach_handler_t mpt_cam_attach;
163 static mpt_enable_handler_t mpt_cam_enable;
164 static mpt_ready_handler_t mpt_cam_ready;
165 static mpt_event_handler_t mpt_cam_event;
166 static mpt_reset_handler_t mpt_cam_ioc_reset;
167 static mpt_detach_handler_t mpt_cam_detach;
169 static struct mpt_personality mpt_cam_personality =
172 .probe = mpt_cam_probe,
173 .attach = mpt_cam_attach,
174 .enable = mpt_cam_enable,
175 .ready = mpt_cam_ready,
176 .event = mpt_cam_event,
177 .reset = mpt_cam_ioc_reset,
178 .detach = mpt_cam_detach,
181 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
182 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
184 int mpt_enable_sata_wc = -1;
185 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
188 mpt_cam_probe(struct mpt_softc *mpt)
193 * Only attach to nodes that support the initiator or target role
194 * (or want to) or have RAID physical devices that need CAM pass-thru
197 if (mpt->do_cfg_role) {
198 role = mpt->cfg_role;
202 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
203 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
210 mpt_cam_attach(struct mpt_softc *mpt)
212 struct cam_devq *devq;
213 mpt_handler_t handler;
218 TAILQ_INIT(&mpt->request_timeout_list);
219 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
220 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
222 handler.reply_handler = mpt_scsi_reply_handler;
223 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
224 &scsi_io_handler_id);
230 handler.reply_handler = mpt_scsi_tmf_reply_handler;
231 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
232 &scsi_tmf_handler_id);
239 * If we're fibre channel and could support target mode, we register
240 * an ELS reply handler and give it resources.
242 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
243 handler.reply_handler = mpt_fc_els_reply_handler;
244 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
250 if (mpt_add_els_buffers(mpt) == FALSE) {
255 maxq -= mpt->els_cmds_allocated;
259 * If we support target mode, we register a reply handler for it,
260 * but don't add command resources until we actually enable target
263 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
264 handler.reply_handler = mpt_scsi_tgt_reply_handler;
265 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
266 &mpt->scsi_tgt_handler_id);
274 handler.reply_handler = mpt_sata_pass_reply_handler;
275 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
276 &sata_pass_handler_id);
284 * We keep one request reserved for timeout TMF requests.
286 mpt->tmf_req = mpt_get_request(mpt, FALSE);
287 if (mpt->tmf_req == NULL) {
288 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
295 * Mark the request as free even though not on the free list.
296 * There is only one TMF request allowed to be outstanding at
297 * a time and the TMF routines perform their own allocation
298 * tracking using the standard state flags.
300 mpt->tmf_req->state = REQ_STATE_FREE;
304 * The rest of this is CAM foo, for which we need to drop our lock
308 if (mpt_spawn_recovery_thread(mpt) != 0) {
309 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
315 * Create the device queue for our SIM(s).
317 devq = cam_simq_alloc(maxq);
319 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
325 * Construct our SIM entry.
328 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
329 if (mpt->sim == NULL) {
330 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
331 cam_devq_release(devq);
337 * Register exactly this bus.
340 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
341 mpt_prt(mpt, "Bus registration Failed!\n");
347 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
348 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
349 mpt_prt(mpt, "Unable to allocate Path!\n");
357 * Only register a second bus for RAID physical
358 * devices if the controller supports RAID.
360 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
365 * Create a "bus" to export all hidden disks to CAM.
368 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
369 if (mpt->phydisk_sim == NULL) {
370 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
379 if (xpt_bus_register(mpt->phydisk_sim, 1) !=
381 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
387 if (xpt_create_path(&mpt->phydisk_path, NULL,
388 cam_sim_path(mpt->phydisk_sim),
389 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
390 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
396 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
405 * Read FC configuration information
408 mpt_read_config_info_fc(struct mpt_softc *mpt)
410 char *topology = NULL;
413 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
414 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
418 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
419 mpt->mpt_fcport_page0.Header.PageVersion,
420 mpt->mpt_fcport_page0.Header.PageLength,
421 mpt->mpt_fcport_page0.Header.PageNumber,
422 mpt->mpt_fcport_page0.Header.PageType);
425 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
426 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
428 mpt_prt(mpt, "failed to read FC Port Page 0\n");
431 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
433 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
435 switch (mpt->mpt_fcport_page0.Flags &
436 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
437 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
438 mpt->mpt_fcport_speed = 0;
439 topology = "<NO LOOP>";
441 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
444 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
445 topology = "NL-Port";
447 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
451 topology = "FL-Port";
454 mpt->mpt_fcport_speed = 0;
459 mpt_lprt(mpt, MPT_PRT_INFO,
460 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
461 "Speed %u-Gbit\n", topology,
462 mpt->mpt_fcport_page0.WWNN.High,
463 mpt->mpt_fcport_page0.WWNN.Low,
464 mpt->mpt_fcport_page0.WWPN.High,
465 mpt->mpt_fcport_page0.WWPN.Low,
466 mpt->mpt_fcport_speed);
469 ksnprintf(mpt->scinfo.fc.wwnn,
470 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
471 mpt->mpt_fcport_page0.WWNN.High,
472 mpt->mpt_fcport_page0.WWNN.Low);
474 ksnprintf(mpt->scinfo.fc.wwpn,
475 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
476 mpt->mpt_fcport_page0.WWPN.High,
477 mpt->mpt_fcport_page0.WWPN.Low);
479 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
480 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
481 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
482 "World Wide Node Name");
484 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
485 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
486 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
487 "World Wide Port Name");
495 * Set FC configuration information.
498 mpt_set_initial_config_fc(struct mpt_softc *mpt)
500 CONFIG_PAGE_FC_PORT_1 fc;
505 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
506 &fc.Header, FALSE, 5000);
508 mpt_prt(mpt, "failed to read FC page 1 header\n");
509 return (mpt_fc_reset_link(mpt, 1));
512 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
513 &fc.Header, sizeof (fc), FALSE, 5000);
515 mpt_prt(mpt, "failed to read FC page 1\n");
516 return (mpt_fc_reset_link(mpt, 1));
518 mpt2host_config_page_fc_port_1(&fc);
521 * Check our flags to make sure we support the role we want.
527 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
528 role |= MPT_ROLE_INITIATOR;
530 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
531 role |= MPT_ROLE_TARGET;
534 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
536 if (mpt->do_cfg_role == 0) {
537 role = mpt->cfg_role;
539 mpt->do_cfg_role = 0;
542 if (role != mpt->cfg_role) {
543 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
544 if ((role & MPT_ROLE_INITIATOR) == 0) {
545 mpt_prt(mpt, "adding initiator role\n");
546 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
549 mpt_prt(mpt, "keeping initiator role\n");
551 } else if (role & MPT_ROLE_INITIATOR) {
552 mpt_prt(mpt, "removing initiator role\n");
555 if (mpt->cfg_role & MPT_ROLE_TARGET) {
556 if ((role & MPT_ROLE_TARGET) == 0) {
557 mpt_prt(mpt, "adding target role\n");
558 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
561 mpt_prt(mpt, "keeping target role\n");
563 } else if (role & MPT_ROLE_TARGET) {
564 mpt_prt(mpt, "removing target role\n");
567 mpt->role = mpt->cfg_role;
570 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
571 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
572 mpt_prt(mpt, "adding OXID option\n");
573 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
580 host2mpt_config_page_fc_port_1(&fc);
581 r = mpt_write_cfg_page(mpt,
582 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
583 sizeof(fc), FALSE, 5000);
585 mpt_prt(mpt, "failed to update NVRAM with changes\n");
588 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
589 "effect until next reboot or IOC reset\n");
595 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
597 ConfigExtendedPageHeader_t hdr;
598 struct mptsas_phyinfo *phyinfo;
599 SasIOUnitPage0_t *buffer;
602 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
603 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
607 if (hdr.ExtPageLength == 0) {
612 len = hdr.ExtPageLength * 4;
613 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
614 if (buffer == NULL) {
619 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
620 0, &hdr, buffer, len, 0, 10000);
622 kfree(buffer, M_DEVBUF);
626 portinfo->num_phys = buffer->NumPhys;
627 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
628 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
629 if (portinfo->phy_info == NULL) {
630 kfree(buffer, M_DEVBUF);
635 for (i = 0; i < portinfo->num_phys; i++) {
636 phyinfo = &portinfo->phy_info[i];
637 phyinfo->phy_num = i;
638 phyinfo->port_id = buffer->PhyData[i].Port;
639 phyinfo->negotiated_link_rate =
640 buffer->PhyData[i].NegotiatedLinkRate;
642 le16toh(buffer->PhyData[i].ControllerDevHandle);
645 kfree(buffer, M_DEVBUF);
651 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
652 uint32_t form, uint32_t form_specific)
654 ConfigExtendedPageHeader_t hdr;
655 SasPhyPage0_t *buffer;
658 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
659 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
663 if (hdr.ExtPageLength == 0) {
668 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
669 if (buffer == NULL) {
674 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
675 form + form_specific, &hdr, buffer,
676 sizeof(SasPhyPage0_t), 0, 10000);
678 kfree(buffer, M_DEVBUF);
682 phy_info->hw_link_rate = buffer->HwLinkRate;
683 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
684 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
685 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
687 kfree(buffer, M_DEVBUF);
693 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
694 uint32_t form, uint32_t form_specific)
696 ConfigExtendedPageHeader_t hdr;
697 SasDevicePage0_t *buffer;
698 uint64_t sas_address;
701 bzero(device_info, sizeof(*device_info));
702 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
703 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
707 if (hdr.ExtPageLength == 0) {
712 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
713 if (buffer == NULL) {
718 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
719 form + form_specific, &hdr, buffer,
720 sizeof(SasDevicePage0_t), 0, 10000);
722 kfree(buffer, M_DEVBUF);
726 device_info->dev_handle = le16toh(buffer->DevHandle);
727 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
728 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
729 device_info->slot = le16toh(buffer->Slot);
730 device_info->phy_num = buffer->PhyNum;
731 device_info->physical_port = buffer->PhysicalPort;
732 device_info->target_id = buffer->TargetID;
733 device_info->bus = buffer->Bus;
734 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
735 device_info->sas_address = le64toh(sas_address);
736 device_info->device_info = le32toh(buffer->DeviceInfo);
738 kfree(buffer, M_DEVBUF);
744 * Read SAS configuration information. Nothing to do yet.
747 mpt_read_config_info_sas(struct mpt_softc *mpt)
749 struct mptsas_portinfo *portinfo;
750 struct mptsas_phyinfo *phyinfo;
753 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
754 if (portinfo == NULL)
757 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
759 kfree(portinfo, M_DEVBUF);
763 for (i = 0; i < portinfo->num_phys; i++) {
764 phyinfo = &portinfo->phy_info[i];
765 error = mptsas_sas_phy_pg0(mpt, phyinfo,
766 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
767 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
770 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
771 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
772 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
776 phyinfo->identify.phy_num = phyinfo->phy_num = i;
777 if (phyinfo->attached.dev_handle)
778 error = mptsas_sas_device_pg0(mpt,
780 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
781 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
782 phyinfo->attached.dev_handle);
786 mpt->sas_portinfo = portinfo;
791 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
794 SataPassthroughRequest_t *pass;
798 req = mpt_get_request(mpt, 0);
802 pass = req->req_vbuf;
803 bzero(pass, sizeof(SataPassthroughRequest_t));
804 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
805 pass->TargetID = devinfo->target_id;
806 pass->Bus = devinfo->bus;
807 pass->PassthroughFlags = 0;
808 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
809 pass->DataLength = 0;
810 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
811 pass->CommandFIS[0] = 0x27;
812 pass->CommandFIS[1] = 0x80;
813 pass->CommandFIS[2] = 0xef;
814 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
815 pass->CommandFIS[7] = 0x40;
816 pass->CommandFIS[15] = 0x08;
818 mpt_check_doorbell(mpt);
819 mpt_send_cmd(mpt, req);
820 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
823 mpt_free_request(mpt, req);
824 kprintf("error %d sending passthrough\n", error);
828 status = le16toh(req->IOCStatus);
829 if (status != MPI_IOCSTATUS_SUCCESS) {
830 mpt_free_request(mpt, req);
831 kprintf("IOCSTATUS %d\n", status);
835 mpt_free_request(mpt, req);
839 * Set SAS configuration information. Nothing to do yet.
842 mpt_set_initial_config_sas(struct mpt_softc *mpt)
844 struct mptsas_phyinfo *phyinfo;
847 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
848 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
849 phyinfo = &mpt->sas_portinfo->phy_info[i];
850 if (phyinfo->attached.dev_handle == 0)
852 if ((phyinfo->attached.device_info &
853 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
856 device_printf(mpt->dev,
857 "%sabling SATA WC on phy %d\n",
858 (mpt_enable_sata_wc) ? "En" : "Dis", i);
859 mptsas_set_sata_wc(mpt, &phyinfo->attached,
868 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
869 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
873 if (reply_frame != NULL) {
874 req->IOCStatus = le16toh(reply_frame->IOCStatus);
876 req->state &= ~REQ_STATE_QUEUED;
877 req->state |= REQ_STATE_DONE;
878 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
879 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
881 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
883 * Whew- we can free this request (late completion)
885 mpt_free_request(mpt, req);
893 * Read SCSI configuration information
896 mpt_read_config_info_spi(struct mpt_softc *mpt)
900 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
901 &mpt->mpt_port_page0.Header, FALSE, 5000);
905 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
906 mpt->mpt_port_page0.Header.PageVersion,
907 mpt->mpt_port_page0.Header.PageLength,
908 mpt->mpt_port_page0.Header.PageNumber,
909 mpt->mpt_port_page0.Header.PageType);
911 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
912 &mpt->mpt_port_page1.Header, FALSE, 5000);
916 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
917 mpt->mpt_port_page1.Header.PageVersion,
918 mpt->mpt_port_page1.Header.PageLength,
919 mpt->mpt_port_page1.Header.PageNumber,
920 mpt->mpt_port_page1.Header.PageType);
922 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
923 &mpt->mpt_port_page2.Header, FALSE, 5000);
927 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
928 mpt->mpt_port_page2.Header.PageVersion,
929 mpt->mpt_port_page2.Header.PageLength,
930 mpt->mpt_port_page2.Header.PageNumber,
931 mpt->mpt_port_page2.Header.PageType);
933 for (i = 0; i < 16; i++) {
934 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
935 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
939 mpt_lprt(mpt, MPT_PRT_DEBUG,
940 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
941 mpt->mpt_dev_page0[i].Header.PageVersion,
942 mpt->mpt_dev_page0[i].Header.PageLength,
943 mpt->mpt_dev_page0[i].Header.PageNumber,
944 mpt->mpt_dev_page0[i].Header.PageType);
946 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
947 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
951 mpt_lprt(mpt, MPT_PRT_DEBUG,
952 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
953 mpt->mpt_dev_page1[i].Header.PageVersion,
954 mpt->mpt_dev_page1[i].Header.PageLength,
955 mpt->mpt_dev_page1[i].Header.PageNumber,
956 mpt->mpt_dev_page1[i].Header.PageType);
960 * At this point, we don't *have* to fail. As long as we have
961 * valid config header information, we can (barely) lurch
965 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
966 sizeof(mpt->mpt_port_page0), FALSE, 5000);
968 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
970 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
971 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
972 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
973 mpt->mpt_port_page0.Capabilities,
974 mpt->mpt_port_page0.PhysicalInterface);
977 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
978 sizeof(mpt->mpt_port_page1), FALSE, 5000);
980 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
982 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
983 mpt_lprt(mpt, MPT_PRT_DEBUG,
984 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
985 mpt->mpt_port_page1.Configuration,
986 mpt->mpt_port_page1.OnBusTimerValue);
989 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
990 sizeof(mpt->mpt_port_page2), FALSE, 5000);
992 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
994 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
995 "Port Page 2: Flags %x Settings %x\n",
996 mpt->mpt_port_page2.PortFlags,
997 mpt->mpt_port_page2.PortSettings);
998 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
999 for (i = 0; i < 16; i++) {
1000 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1002 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1003 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1004 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1008 for (i = 0; i < 16; i++) {
1009 rv = mpt_read_cur_cfg_page(mpt, i,
1010 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1014 "cannot read SPI Target %d Device Page 0\n", i);
1017 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1018 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1019 "target %d page 0: Negotiated Params %x Information %x\n",
1020 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1021 mpt->mpt_dev_page0[i].Information);
1023 rv = mpt_read_cur_cfg_page(mpt, i,
1024 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1028 "cannot read SPI Target %d Device Page 1\n", i);
1031 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1032 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1033 "target %d page 1: Requested Params %x Configuration %x\n",
1034 i, mpt->mpt_dev_page1[i].RequestedParameters,
1035 mpt->mpt_dev_page1[i].Configuration);
1041 * Validate SPI configuration information.
1043 * In particular, validate SPI Port Page 1.
1046 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1048 int error, i, pp1val;
1050 mpt->mpt_disc_enable = 0xff;
1051 mpt->mpt_tag_enable = 0;
1053 pp1val = ((1 << mpt->mpt_ini_id) <<
1054 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1055 if (mpt->mpt_port_page1.Configuration != pp1val) {
1056 CONFIG_PAGE_SCSI_PORT_1 tmp;
1058 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1059 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1060 tmp = mpt->mpt_port_page1;
1061 tmp.Configuration = pp1val;
1062 host2mpt_config_page_scsi_port_1(&tmp);
1063 error = mpt_write_cur_cfg_page(mpt, 0,
1064 &tmp.Header, sizeof(tmp), FALSE, 5000);
1068 error = mpt_read_cur_cfg_page(mpt, 0,
1069 &tmp.Header, sizeof(tmp), FALSE, 5000);
1073 mpt2host_config_page_scsi_port_1(&tmp);
1074 if (tmp.Configuration != pp1val) {
1076 "failed to reset SPI Port Page 1 Config value\n");
1079 mpt->mpt_port_page1 = tmp;
1083 * The purpose of this exercise is to get
1084 * all targets back to async/narrow.
1086 * We skip this step if the BIOS has already negotiated
1087 * speeds with the targets.
1089 i = mpt->mpt_port_page2.PortSettings &
1090 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1091 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1092 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1093 "honoring BIOS transfer negotiations\n");
1095 for (i = 0; i < 16; i++) {
1096 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1097 mpt->mpt_dev_page1[i].Configuration = 0;
1098 (void) mpt_update_spi_config(mpt, i);
1105 mpt_cam_enable(struct mpt_softc *mpt)
1113 if (mpt_read_config_info_fc(mpt)) {
1116 if (mpt_set_initial_config_fc(mpt)) {
1119 } else if (mpt->is_sas) {
1120 if (mpt_read_config_info_sas(mpt)) {
1123 if (mpt_set_initial_config_sas(mpt)) {
1126 } else if (mpt->is_spi) {
1127 if (mpt_read_config_info_spi(mpt)) {
1130 if (mpt_set_initial_config_spi(mpt)) {
1142 mpt_cam_ready(struct mpt_softc *mpt)
1146 * If we're in target mode, hang out resources now
1147 * so we don't cause the world to hang talking to us.
1149 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1151 * Try to add some target command resources
1154 if (mpt_add_target_commands(mpt) == FALSE) {
1155 mpt_prt(mpt, "failed to add target commands\n");
1163 mpt_cam_detach(struct mpt_softc *mpt)
1165 mpt_handler_t handler;
1169 mpt_terminate_recovery_thread(mpt);
1171 handler.reply_handler = mpt_scsi_reply_handler;
1172 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1173 scsi_io_handler_id);
1174 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1175 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1176 scsi_tmf_handler_id);
1177 handler.reply_handler = mpt_fc_els_reply_handler;
1178 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1180 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1181 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1182 mpt->scsi_tgt_handler_id);
1183 handler.reply_handler = mpt_sata_pass_reply_handler;
1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 sata_pass_handler_id);
1187 if (mpt->tmf_req != NULL) {
1188 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1189 mpt_free_request(mpt, mpt->tmf_req);
1190 mpt->tmf_req = NULL;
1192 if (mpt->sas_portinfo != NULL) {
1193 kfree(mpt->sas_portinfo, M_DEVBUF);
1194 mpt->sas_portinfo = NULL;
1197 if (mpt->sim != NULL) {
1198 xpt_free_path(mpt->path);
1199 xpt_bus_deregister(cam_sim_path(mpt->sim));
1200 cam_sim_free(mpt->sim);
1204 if (mpt->phydisk_sim != NULL) {
1205 xpt_free_path(mpt->phydisk_path);
1206 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1207 cam_sim_free(mpt->phydisk_sim);
1208 mpt->phydisk_sim = NULL;
1213 /* This routine is used after a system crash to dump core onto the swap device.
1216 mpt_poll(struct cam_sim *sim)
1218 struct mpt_softc *mpt;
1220 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1225 * Watchdog timeout routine for SCSI requests.
1228 mpt_timeout(void *arg)
1231 struct mpt_softc *mpt;
1234 ccb = (union ccb *)arg;
1235 mpt = ccb->ccb_h.ccb_mpt_ptr;
1238 req = ccb->ccb_h.ccb_req_ptr;
1239 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1240 req->serno, ccb, req->ccb);
1241 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1242 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1243 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1244 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1245 req->state |= REQ_STATE_TIMEDOUT;
1246 mpt_wakeup_recovery_thread(mpt);
1252 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1254 * Takes a list of physical segments and builds the SGL for SCSI IO command
1255 * and forwards the commard to the IOC after one last check that CAM has not
1256 * aborted the transaction.
1259 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1261 request_t *req, *trq;
1264 struct mpt_softc *mpt;
1265 bus_addr_t chain_list_addr;
1266 int first_lim, seg, this_seg_lim;
1267 uint32_t addr, cur_off, flags, nxt_off, tf;
1269 MSG_REQUEST_HEADER *hdrp;
1274 req = (request_t *)arg;
1277 mpt = ccb->ccb_h.ccb_mpt_ptr;
1278 req = ccb->ccb_h.ccb_req_ptr;
1280 hdrp = req->req_vbuf;
1281 mpt_off = req->req_vbuf;
1283 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1288 switch (hdrp->Function) {
1289 case MPI_FUNCTION_SCSI_IO_REQUEST:
1290 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1292 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1294 case MPI_FUNCTION_TARGET_ASSIST:
1296 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1299 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1306 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1308 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1309 nseg, mpt->max_seg_cnt);
1314 if (error != EFBIG && error != ENOMEM) {
1315 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1317 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1319 mpt_freeze_ccb(ccb);
1320 if (error == EFBIG) {
1321 status = CAM_REQ_TOO_BIG;
1322 } else if (error == ENOMEM) {
1323 if (mpt->outofbeer == 0) {
1325 xpt_freeze_simq(mpt->sim, 1);
1326 mpt_lprt(mpt, MPT_PRT_DEBUG,
1329 status = CAM_REQUEUE_REQ;
1331 status = CAM_REQ_CMP_ERR;
1333 mpt_set_ccb_status(ccb, status);
1335 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1336 request_t *cmd_req =
1337 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1338 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1339 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1340 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1342 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1343 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1345 mpt_free_request(mpt, req);
1350 * No data to transfer?
1351 * Just make a single simple SGL with zero length.
1354 if (mpt->verbose >= MPT_PRT_DEBUG) {
1355 int tidx = ((char *)sglp) - mpt_off;
1356 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1360 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1361 MPI_pSGE_SET_FLAGS(se1,
1362 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1363 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1364 se1->FlagsLength = htole32(se1->FlagsLength);
1369 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1371 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1372 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1375 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1376 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1380 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1381 bus_dmasync_op_t op;
1383 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1384 op = BUS_DMASYNC_PREREAD;
1386 op = BUS_DMASYNC_PREWRITE;
1389 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 op = BUS_DMASYNC_PREWRITE;
1392 op = BUS_DMASYNC_PREREAD;
1395 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1399 * Okay, fill in what we can at the end of the command frame.
1400 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1401 * the command frame.
1403 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1404 * SIMPLE64 pointers and start doing CHAIN64 entries after
1408 if (nseg < MPT_NSGL_FIRST(mpt)) {
1412 * Leave room for CHAIN element
1414 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1417 se = (SGE_SIMPLE64 *) sglp;
1418 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1420 memset(se, 0, sizeof (*se));
1421 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1422 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1423 if (sizeof(bus_addr_t) > 4) {
1424 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1425 /* SAS1078 36GB limitation WAR */
1426 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1427 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1429 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1431 se->Address.High = htole32(addr);
1433 if (seg == first_lim - 1) {
1434 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1436 if (seg == nseg - 1) {
1437 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1438 MPI_SGE_FLAGS_END_OF_BUFFER;
1440 MPI_pSGE_SET_FLAGS(se, tf);
1441 se->FlagsLength = htole32(se->FlagsLength);
1449 * Tell the IOC where to find the first chain element.
1451 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1452 nxt_off = MPT_RQSL(mpt);
1456 * Make up the rest of the data segments out of a chain element
1457 * (contained in the current request frame) which points to
1458 * SIMPLE64 elements in the next request frame, possibly ending
1459 * with *another* chain element (if there's more).
1461 while (seg < nseg) {
1463 * Point to the chain descriptor. Note that the chain
1464 * descriptor is at the end of the *previous* list (whether
1467 ce = (SGE_CHAIN64 *) se;
1470 * Before we change our current pointer, make sure we won't
1471 * overflow the request area with this frame. Note that we
1472 * test against 'greater than' here as it's okay in this case
1473 * to have next offset be just outside the request area.
1475 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1476 nxt_off = MPT_REQUEST_AREA;
1481 * Set our SGE element pointer to the beginning of the chain
1482 * list and update our next chain list offset.
1484 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1486 nxt_off += MPT_RQSL(mpt);
1489 * Now initialize the chain descriptor.
1491 memset(ce, 0, sizeof (*ce));
1494 * Get the physical address of the chain list.
1496 chain_list_addr = trq->req_pbuf;
1497 chain_list_addr += cur_off;
1498 if (sizeof (bus_addr_t) > 4) {
1500 htole32(((uint64_t)chain_list_addr) >> 32);
1502 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1503 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1504 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1507 * If we have more than a frame's worth of segments left,
1508 * set up the chain list to have the last element be another
1511 if ((nseg - seg) > MPT_NSGL(mpt)) {
1512 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1514 * The length of the chain is the length in bytes of the
1515 * number of segments plus the next chain element.
1517 * The next chain descriptor offset is the length,
1518 * in words, of the number of segments.
1520 ce->Length = (this_seg_lim - seg) *
1521 sizeof (SGE_SIMPLE64);
1522 ce->NextChainOffset = ce->Length >> 2;
1523 ce->Length += sizeof (SGE_CHAIN64);
1525 this_seg_lim = nseg;
1526 ce->Length = (this_seg_lim - seg) *
1527 sizeof (SGE_SIMPLE64);
1529 ce->Length = htole16(ce->Length);
1532 * Fill in the chain list SGE elements with our segment data.
1534 * If we're the last element in this chain list, set the last
1535 * element flag. If we're the completely last element period,
1536 * set the end of list and end of buffer flags.
1538 while (seg < this_seg_lim) {
1540 memset(se, 0, sizeof (*se));
1541 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1542 se->Address.Low = htole32(dm_segs->ds_addr &
1544 if (sizeof (bus_addr_t) > 4) {
1545 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1546 /* SAS1078 36GB limitation WAR */
1548 (((uint64_t)dm_segs->ds_addr +
1549 MPI_SGE_LENGTH(se->FlagsLength)) >>
1552 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1554 se->Address.High = htole32(addr);
1556 if (seg == this_seg_lim - 1) {
1557 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1559 if (seg == nseg - 1) {
1560 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1561 MPI_SGE_FLAGS_END_OF_BUFFER;
1563 MPI_pSGE_SET_FLAGS(se, tf);
1564 se->FlagsLength = htole32(se->FlagsLength);
1572 * If we have more segments to do and we've used up all of
1573 * the space in a request area, go allocate another one
1574 * and chain to that.
1576 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1579 nrq = mpt_get_request(mpt, FALSE);
1587 * Append the new request area on the tail of our list.
1589 if ((trq = req->chain) == NULL) {
1592 while (trq->chain != NULL) {
1598 mpt_off = trq->req_vbuf;
1599 if (mpt->verbose >= MPT_PRT_DEBUG) {
1600 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1608 * Last time we need to check if this CCB needs to be aborted.
1610 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1611 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 request_t *cmd_req =
1613 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1614 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1615 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1616 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1619 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1620 ccb->ccb_h.status & CAM_STATUS_MASK);
1621 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1622 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1624 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1625 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1627 mpt_free_request(mpt, req);
1631 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1632 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1633 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1636 if (mpt->verbose > MPT_PRT_DEBUG) {
1638 mpt_print_request(req->req_vbuf);
1639 for (trq = req->chain; trq; trq = trq->chain) {
1640 kprintf(" Additional Chain Area %d\n", nc++);
1641 mpt_dump_sgl(trq->req_vbuf, 0);
1645 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1646 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1647 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1648 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1649 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1650 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1651 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1653 tgt->state = TGT_STATE_MOVING_DATA;
1656 tgt->state = TGT_STATE_MOVING_DATA;
1659 mpt_send_cmd(mpt, req);
1663 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1665 request_t *req, *trq;
1668 struct mpt_softc *mpt;
1670 uint32_t flags, nxt_off;
1672 MSG_REQUEST_HEADER *hdrp;
1677 req = (request_t *)arg;
1680 mpt = ccb->ccb_h.ccb_mpt_ptr;
1681 req = ccb->ccb_h.ccb_req_ptr;
1683 hdrp = req->req_vbuf;
1684 mpt_off = req->req_vbuf;
1687 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1692 switch (hdrp->Function) {
1693 case MPI_FUNCTION_SCSI_IO_REQUEST:
1694 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1695 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1697 case MPI_FUNCTION_TARGET_ASSIST:
1699 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1702 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1709 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1711 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1712 nseg, mpt->max_seg_cnt);
1717 if (error != EFBIG && error != ENOMEM) {
1718 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1720 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1722 mpt_freeze_ccb(ccb);
1723 if (error == EFBIG) {
1724 status = CAM_REQ_TOO_BIG;
1725 } else if (error == ENOMEM) {
1726 if (mpt->outofbeer == 0) {
1728 xpt_freeze_simq(mpt->sim, 1);
1729 mpt_lprt(mpt, MPT_PRT_DEBUG,
1732 status = CAM_REQUEUE_REQ;
1734 status = CAM_REQ_CMP_ERR;
1736 mpt_set_ccb_status(ccb, status);
1738 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1739 request_t *cmd_req =
1740 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1741 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1742 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1743 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1745 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1746 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1748 mpt_free_request(mpt, req);
1753 * No data to transfer?
1754 * Just make a single simple SGL with zero length.
1757 if (mpt->verbose >= MPT_PRT_DEBUG) {
1758 int tidx = ((char *)sglp) - mpt_off;
1759 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1763 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1764 MPI_pSGE_SET_FLAGS(se1,
1765 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1766 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1767 se1->FlagsLength = htole32(se1->FlagsLength);
1772 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1774 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1775 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1778 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1779 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1783 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1784 bus_dmasync_op_t op;
1786 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1787 op = BUS_DMASYNC_PREREAD;
1789 op = BUS_DMASYNC_PREWRITE;
1792 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1793 op = BUS_DMASYNC_PREWRITE;
1795 op = BUS_DMASYNC_PREREAD;
1798 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1802 * Okay, fill in what we can at the end of the command frame.
1803 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1804 * the command frame.
1806 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1807 * SIMPLE32 pointers and start doing CHAIN32 entries after
1811 if (nseg < MPT_NSGL_FIRST(mpt)) {
1815 * Leave room for CHAIN element
1817 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1820 se = (SGE_SIMPLE32 *) sglp;
1821 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1824 memset(se, 0,sizeof (*se));
1825 se->Address = htole32(dm_segs->ds_addr);
1827 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1829 if (seg == first_lim - 1) {
1830 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1832 if (seg == nseg - 1) {
1833 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1834 MPI_SGE_FLAGS_END_OF_BUFFER;
1836 MPI_pSGE_SET_FLAGS(se, tf);
1837 se->FlagsLength = htole32(se->FlagsLength);
1845 * Tell the IOC where to find the first chain element.
1847 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1848 nxt_off = MPT_RQSL(mpt);
1852 * Make up the rest of the data segments out of a chain element
1853 * (contained in the current request frame) which points to
1854 * SIMPLE32 elements in the next request frame, possibly ending
1855 * with *another* chain element (if there's more).
1857 while (seg < nseg) {
1859 uint32_t tf, cur_off;
1860 bus_addr_t chain_list_addr;
1863 * Point to the chain descriptor. Note that the chain
1864 * descriptor is at the end of the *previous* list (whether
1867 ce = (SGE_CHAIN32 *) se;
1870 * Before we change our current pointer, make sure we won't
1871 * overflow the request area with this frame. Note that we
1872 * test against 'greater than' here as it's okay in this case
1873 * to have next offset be just outside the request area.
1875 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1876 nxt_off = MPT_REQUEST_AREA;
1881 * Set our SGE element pointer to the beginning of the chain
1882 * list and update our next chain list offset.
1884 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1886 nxt_off += MPT_RQSL(mpt);
1889 * Now initialize the chain descriptor.
1891 memset(ce, 0, sizeof (*ce));
1894 * Get the physical address of the chain list.
1896 chain_list_addr = trq->req_pbuf;
1897 chain_list_addr += cur_off;
1901 ce->Address = htole32(chain_list_addr);
1902 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1906 * If we have more than a frame's worth of segments left,
1907 * set up the chain list to have the last element be another
1910 if ((nseg - seg) > MPT_NSGL(mpt)) {
1911 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1913 * The length of the chain is the length in bytes of the
1914 * number of segments plus the next chain element.
1916 * The next chain descriptor offset is the length,
1917 * in words, of the number of segments.
1919 ce->Length = (this_seg_lim - seg) *
1920 sizeof (SGE_SIMPLE32);
1921 ce->NextChainOffset = ce->Length >> 2;
1922 ce->Length += sizeof (SGE_CHAIN32);
1924 this_seg_lim = nseg;
1925 ce->Length = (this_seg_lim - seg) *
1926 sizeof (SGE_SIMPLE32);
1928 ce->Length = htole16(ce->Length);
1931 * Fill in the chain list SGE elements with our segment data.
1933 * If we're the last element in this chain list, set the last
1934 * element flag. If we're the completely last element period,
1935 * set the end of list and end of buffer flags.
1937 while (seg < this_seg_lim) {
1938 memset(se, 0, sizeof (*se));
1939 se->Address = htole32(dm_segs->ds_addr);
1941 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1943 if (seg == this_seg_lim - 1) {
1944 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1946 if (seg == nseg - 1) {
1947 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1948 MPI_SGE_FLAGS_END_OF_BUFFER;
1950 MPI_pSGE_SET_FLAGS(se, tf);
1951 se->FlagsLength = htole32(se->FlagsLength);
1959 * If we have more segments to do and we've used up all of
1960 * the space in a request area, go allocate another one
1961 * and chain to that.
1963 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1966 nrq = mpt_get_request(mpt, FALSE);
1974 * Append the new request area on the tail of our list.
1976 if ((trq = req->chain) == NULL) {
1979 while (trq->chain != NULL) {
1985 mpt_off = trq->req_vbuf;
1986 if (mpt->verbose >= MPT_PRT_DEBUG) {
1987 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1995 * Last time we need to check if this CCB needs to be aborted.
1997 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1998 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1999 request_t *cmd_req =
2000 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2001 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2002 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2003 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2006 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2007 ccb->ccb_h.status & CAM_STATUS_MASK);
2008 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2009 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2011 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2012 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2014 mpt_free_request(mpt, req);
2018 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2019 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2020 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2023 if (mpt->verbose > MPT_PRT_DEBUG) {
2025 mpt_print_request(req->req_vbuf);
2026 for (trq = req->chain; trq; trq = trq->chain) {
2027 kprintf(" Additional Chain Area %d\n", nc++);
2028 mpt_dump_sgl(trq->req_vbuf, 0);
2032 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2033 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2034 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2035 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2036 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2037 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2038 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2040 tgt->state = TGT_STATE_MOVING_DATA;
2043 tgt->state = TGT_STATE_MOVING_DATA;
2046 mpt_send_cmd(mpt, req);
2050 mpt_start(struct cam_sim *sim, union ccb *ccb)
2053 struct mpt_softc *mpt;
2054 MSG_SCSI_IO_REQUEST *mpt_req;
2055 struct ccb_scsiio *csio = &ccb->csio;
2056 struct ccb_hdr *ccbh = &ccb->ccb_h;
2057 bus_dmamap_callback_t *cb;
2061 /* Get the pointer for the physical addapter */
2062 mpt = ccb->ccb_h.ccb_mpt_ptr;
2063 raid_passthru = (sim == mpt->phydisk_sim);
2065 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2066 if (mpt->outofbeer == 0) {
2068 xpt_freeze_simq(mpt->sim, 1);
2069 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2071 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2072 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2077 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2080 if (sizeof (bus_addr_t) > 4) {
2081 cb = mpt_execute_req_a64;
2083 cb = mpt_execute_req;
2087 * Link the ccb and the request structure so we can find
2088 * the other knowing either the request or the ccb
2091 ccb->ccb_h.ccb_req_ptr = req;
2093 /* Now we build the command for the IOC */
2094 mpt_req = req->req_vbuf;
2095 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2097 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2098 if (raid_passthru) {
2099 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2100 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2101 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2102 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2106 mpt_req->Bus = 0; /* we never set bus here */
2108 tgt = ccb->ccb_h.target_id;
2109 mpt_req->Bus = 0; /* XXX */
2112 mpt_req->SenseBufferLength =
2113 (csio->sense_len < MPT_SENSE_SIZE) ?
2114 csio->sense_len : MPT_SENSE_SIZE;
2117 * We use the message context to find the request structure when we
2118 * Get the command completion interrupt from the IOC.
2120 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2122 /* Which physical device to do the I/O on */
2123 mpt_req->TargetID = tgt;
2125 /* We assume a single level LUN type */
2126 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2127 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2128 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2130 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2133 /* Set the direction of the transfer */
2134 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2135 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2136 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2137 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2139 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2142 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2143 switch(ccb->csio.tag_action) {
2144 case MSG_HEAD_OF_Q_TAG:
2145 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2148 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2150 case MSG_ORDERED_Q_TAG:
2151 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2153 case MSG_SIMPLE_Q_TAG:
2155 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2159 if (mpt->is_fc || mpt->is_sas) {
2160 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2162 /* XXX No such thing for a target doing packetized. */
2163 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2168 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2169 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2172 mpt_req->Control = htole32(mpt_req->Control);
2174 /* Copy the scsi command block into place */
2175 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2176 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2178 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2181 mpt_req->CDBLength = csio->cdb_len;
2182 mpt_req->DataLength = htole32(csio->dxfer_len);
2183 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2186 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2188 if (mpt->verbose == MPT_PRT_DEBUG) {
2190 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2191 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2192 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2193 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2194 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2195 mpt_prtc(mpt, "(%s %u byte%s ",
2196 (df == MPI_SCSIIO_CONTROL_READ)?
2197 "read" : "write", csio->dxfer_len,
2198 (csio->dxfer_len == 1)? ")" : "s)");
2200 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2201 ccb->ccb_h.target_lun, req, req->serno);
2205 * If we have any data to send with this command map it into bus space.
2207 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2208 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2210 * We've been given a pointer to a single buffer.
2212 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2214 * Virtual address that needs to translated into
2215 * one or more physical address ranges.
2219 error = bus_dmamap_load(mpt->buffer_dmat,
2220 req->dmap, csio->data_ptr, csio->dxfer_len,
2223 if (error == EINPROGRESS) {
2225 * So as to maintain ordering,
2226 * freeze the controller queue
2227 * until our mapping is
2230 xpt_freeze_simq(mpt->sim, 1);
2231 ccbh->status |= CAM_RELEASE_SIMQ;
2235 * We have been given a pointer to single
2238 struct bus_dma_segment seg;
2240 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2241 seg.ds_len = csio->dxfer_len;
2242 (*cb)(req, &seg, 1, 0);
2246 * We have been given a list of addresses.
2247 * This case could be easily supported but they are not
2248 * currently generated by the CAM subsystem so there
2249 * is no point in wasting the time right now.
2251 struct bus_dma_segment *segs;
2252 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2253 (*cb)(req, NULL, 0, EFAULT);
2255 /* Just use the segments provided */
2256 segs = (struct bus_dma_segment *)csio->data_ptr;
2257 (*cb)(req, segs, csio->sglist_cnt, 0);
2261 (*cb)(req, NULL, 0, 0);
2266 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2273 error = mpt_scsi_send_tmf(mpt,
2274 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2275 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2276 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2277 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2278 0, /* XXX How do I get the channel ID? */
2279 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2280 lun != CAM_LUN_WILDCARD ? lun : 0,
2285 * mpt_scsi_send_tmf hard resets on failure, so no
2286 * need to do so here.
2289 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2293 /* Wait for bus reset to be processed by the IOC. */
2294 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2295 REQ_STATE_DONE, sleep_ok, 5000);
2297 status = le16toh(mpt->tmf_req->IOCStatus);
2298 response = mpt->tmf_req->ResponseCode;
2299 mpt->tmf_req->state = REQ_STATE_FREE;
2302 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2303 "Resetting controller.\n");
2304 mpt_reset(mpt, TRUE);
2308 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2309 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2310 "Resetting controller.\n", status);
2311 mpt_reset(mpt, TRUE);
2315 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2316 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2317 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2318 "Resetting controller.\n", response);
2319 mpt_reset(mpt, TRUE);
2326 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2330 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2332 req = mpt_get_request(mpt, FALSE);
2337 memset(fc, 0, sizeof(*fc));
2338 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2339 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2340 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2341 mpt_send_cmd(mpt, req);
2343 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2344 REQ_STATE_DONE, FALSE, 60 * 1000);
2346 mpt_free_request(mpt, req);
2353 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2355 xpt_free_path(ccb->ccb_h.path);
2360 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2361 MSG_EVENT_NOTIFY_REPLY *msg)
2363 uint32_t data0, data1;
2365 data0 = le32toh(msg->Data[0]);
2366 data1 = le32toh(msg->Data[1]);
2367 switch(msg->Event & 0xFF) {
2368 case MPI_EVENT_UNIT_ATTENTION:
2369 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2370 (data0 >> 8) & 0xff, data0 & 0xff);
2373 case MPI_EVENT_IOC_BUS_RESET:
2374 /* We generated a bus reset */
2375 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2376 (data0 >> 8) & 0xff);
2377 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2380 case MPI_EVENT_EXT_BUS_RESET:
2381 /* Someone else generated a bus reset */
2382 mpt_prt(mpt, "External Bus Reset Detected\n");
2384 * These replies don't return EventData like the MPI
2387 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2390 case MPI_EVENT_RESCAN:
2395 * In general this means a device has been added to the loop.
2397 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2398 if (mpt->ready == 0) {
2401 if (mpt->phydisk_sim) {
2402 pathid = cam_sim_path(mpt->phydisk_sim);
2404 pathid = cam_sim_path(mpt->sim);
2407 * Allocate a CCB, create a wildcard path for this bus,
2408 * and schedule a rescan.
2410 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
2412 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2413 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2414 mpt_prt(mpt, "unable to create path for rescan\n");
2419 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
2420 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2421 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2422 ccb->crcn.flags = CAM_FLAG_NONE;
2425 /* scan is now in progress */
2429 case MPI_EVENT_LINK_STATUS_CHANGE:
2430 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2431 (data1 >> 8) & 0xff,
2432 ((data0 & 0xff) == 0)? "Failed" : "Active");
2435 case MPI_EVENT_LOOP_STATE_CHANGE:
2436 switch ((data0 >> 16) & 0xff) {
2439 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2440 "(Loop Initialization)\n",
2441 (data1 >> 8) & 0xff,
2442 (data0 >> 8) & 0xff,
2444 switch ((data0 >> 8) & 0xff) {
2446 if ((data0 & 0xff) == 0xF7) {
2447 mpt_prt(mpt, "Device needs AL_PA\n");
2449 mpt_prt(mpt, "Device %02x doesn't like "
2455 if ((data0 & 0xff) == 0xF7) {
2456 mpt_prt(mpt, "Device had loop failure "
2457 "at its receiver prior to acquiring"
2460 mpt_prt(mpt, "Device %02x detected loop"
2461 " failure at its receiver\n",
2466 mpt_prt(mpt, "Device %02x requests that device "
2467 "%02x reset itself\n",
2469 (data0 >> 8) & 0xFF);
2474 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2475 "LPE(%02x,%02x) (Loop Port Enable)\n",
2476 (data1 >> 8) & 0xff, /* Port */
2477 (data0 >> 8) & 0xff, /* Character 3 */
2478 (data0 ) & 0xff /* Character 4 */);
2481 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2482 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2483 (data1 >> 8) & 0xff, /* Port */
2484 (data0 >> 8) & 0xff, /* Character 3 */
2485 (data0 ) & 0xff /* Character 4 */);
2488 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2489 "FC event (%02x %02x %02x)\n",
2490 (data1 >> 8) & 0xff, /* Port */
2491 (data0 >> 16) & 0xff, /* Event */
2492 (data0 >> 8) & 0xff, /* Character 3 */
2493 (data0 ) & 0xff /* Character 4 */);
2497 case MPI_EVENT_LOGOUT:
2498 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2499 (data1 >> 8) & 0xff, data0);
2501 case MPI_EVENT_QUEUE_FULL:
2503 struct cam_sim *sim;
2504 struct cam_path *tmppath;
2505 struct ccb_relsim crs;
2506 PTR_EVENT_DATA_QUEUE_FULL pqf;
2509 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2510 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2511 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2512 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2513 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2514 pqf->TargetID) != 0) {
2515 sim = mpt->phydisk_sim;
2519 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2520 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2521 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2522 mpt_prt(mpt, "unable to create a path to send "
2526 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2527 crs.ccb_h.func_code = XPT_REL_SIMQ;
2528 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2529 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2530 crs.openings = pqf->CurrentDepth - 1;
2531 xpt_action((union ccb *)&crs);
2532 if (crs.ccb_h.status != CAM_REQ_CMP) {
2533 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2535 xpt_free_path(tmppath);
2539 case MPI_EVENT_IR_RESYNC_UPDATE:
2540 mpt_prt(mpt, "IR resync update %d completed\n",
2541 (data0 >> 16) & 0xff);
2543 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2546 struct cam_sim *sim;
2547 struct cam_path *tmppath;
2548 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2550 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2551 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2552 psdsc->TargetID) != 0)
2553 sim = mpt->phydisk_sim;
2556 switch(psdsc->ReasonCode) {
2557 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2558 ccb = kmalloc(sizeof(union ccb), M_TEMP,
2560 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2561 cam_sim_path(sim), psdsc->TargetID,
2562 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2564 "unable to create path for rescan\n");
2568 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
2569 5/*priority (low)*/);
2570 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2571 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2572 ccb->crcn.flags = CAM_FLAG_NONE;
2575 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2576 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2577 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2580 "unable to create path for async event");
2583 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2584 xpt_free_path(tmppath);
2586 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2587 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2588 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2591 mpt_lprt(mpt, MPT_PRT_WARN,
2592 "SAS device status change: Bus: 0x%02x TargetID: "
2593 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2594 psdsc->TargetID, psdsc->ReasonCode);
2599 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2601 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2603 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2604 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2605 mpt_lprt(mpt, MPT_PRT_WARN,
2606 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2607 pde->Port, pde->DiscoveryStatus);
2610 case MPI_EVENT_EVENT_CHANGE:
2611 case MPI_EVENT_INTEGRATED_RAID:
2613 case MPI_EVENT_LOG_ENTRY_ADDED:
2614 case MPI_EVENT_SAS_DISCOVERY:
2615 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2616 case MPI_EVENT_SAS_SES:
2619 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2627 * Reply path for all SCSI I/O requests, called from our
2628 * interrupt handler by extracting our handler index from
2629 * the MsgContext field of the reply from the IOC.
2631 * This routine is optimized for the common case of a
2632 * completion without error. All exception handling is
2633 * offloaded to non-inlined helper routines to minimize
2637 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2638 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2640 MSG_SCSI_IO_REQUEST *scsi_req;
2643 if (req->state == REQ_STATE_FREE) {
2644 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2648 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2651 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2656 mpt_req_untimeout(req, mpt_timeout, ccb);
2657 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2659 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2660 bus_dmasync_op_t op;
2662 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2663 op = BUS_DMASYNC_POSTREAD;
2665 op = BUS_DMASYNC_POSTWRITE;
2666 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2667 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2670 if (reply_frame == NULL) {
2672 * Context only reply, completion without error status.
2674 ccb->csio.resid = 0;
2675 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2676 ccb->csio.scsi_status = SCSI_STATUS_OK;
2678 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2681 if (mpt->outofbeer) {
2682 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2684 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2686 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2687 struct scsi_inquiry_data *iq =
2688 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2689 if (scsi_req->Function ==
2690 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2692 * Fake out the device type so that only the
2693 * pass-thru device will attach.
2695 iq->device &= ~0x1F;
2696 iq->device |= T_NODEVICE;
2699 if (mpt->verbose == MPT_PRT_DEBUG) {
2700 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2703 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2705 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2706 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2708 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2710 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2712 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2713 ("CCB req needed wakeup"));
2715 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2717 mpt_free_request(mpt, req);
2722 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2723 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2725 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2727 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2729 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2731 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2732 /* Record IOC Status and Response Code of TMF for any waiters. */
2733 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2734 req->ResponseCode = tmf_reply->ResponseCode;
2736 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2737 req, req->serno, le16toh(tmf_reply->IOCStatus));
2738 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2739 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2740 req->state |= REQ_STATE_DONE;
2743 mpt->tmf_req->state = REQ_STATE_FREE;
2749 * XXX: Move to definitions file
2767 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2768 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2771 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2772 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2775 * We are going to reuse the ELS request to send this response back.
2778 memset(rsp, 0, sizeof(*rsp));
2780 #ifdef USE_IMMEDIATE_LINK_DATA
2782 * Apparently the IMMEDIATE stuff doesn't seem to work.
2784 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2786 rsp->RspLength = length;
2787 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2788 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2791 * Copy over information from the original reply frame to
2792 * it's correct place in the response.
2794 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2797 * And now copy back the temporary area to the original frame.
2799 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2800 rsp = req->req_vbuf;
2802 #ifdef USE_IMMEDIATE_LINK_DATA
2803 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2806 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2807 bus_addr_t paddr = req->req_pbuf;
2808 paddr += MPT_RQSL(mpt);
2811 MPI_SGE_FLAGS_HOST_TO_IOC |
2812 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2813 MPI_SGE_FLAGS_LAST_ELEMENT |
2814 MPI_SGE_FLAGS_END_OF_LIST |
2815 MPI_SGE_FLAGS_END_OF_BUFFER;
2816 fl <<= MPI_SGE_FLAGS_SHIFT;
2818 se->FlagsLength = htole32(fl);
2819 se->Address = htole32((uint32_t) paddr);
2826 mpt_send_cmd(mpt, req);
2830 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2831 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2833 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2834 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2838 U16 status = le16toh(reply_frame->IOCStatus);
2841 int do_refresh = TRUE;
2844 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2845 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2846 req, req->serno, rp->Function));
2847 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2848 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2850 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2853 mpt_lprt(mpt, MPT_PRT_DEBUG,
2854 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2855 req, req->serno, reply_frame, reply_frame->Function);
2857 if (status != MPI_IOCSTATUS_SUCCESS) {
2858 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2859 status, reply_frame->Function);
2860 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2862 * XXX: to get around shutdown issue
2871 * If the function of a link service response, we recycle the
2872 * response to be a refresh for a new link service request.
2874 * The request pointer is bogus in this case and we have to fetch
2875 * it based upon the TransactionContext.
2877 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2878 /* Freddie Uncle Charlie Katie */
2879 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2880 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2881 if (mpt->els_cmd_ptrs[ioindex] == req) {
2885 KASSERT(ioindex < mpt->els_cmds_allocated,
2886 ("can't find my mommie!"));
2888 /* remove from active list as we're going to re-post it */
2889 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2890 req->state &= ~REQ_STATE_QUEUED;
2891 req->state |= REQ_STATE_DONE;
2892 mpt_fc_post_els(mpt, req, ioindex);
2896 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2897 /* remove from active list as we're done */
2898 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2899 req->state &= ~REQ_STATE_QUEUED;
2900 req->state |= REQ_STATE_DONE;
2901 if (req->state & REQ_STATE_TIMEDOUT) {
2902 mpt_lprt(mpt, MPT_PRT_DEBUG,
2903 "Sync Primitive Send Completed After Timeout\n");
2904 mpt_free_request(mpt, req);
2905 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2906 mpt_lprt(mpt, MPT_PRT_DEBUG,
2907 "Async Primitive Send Complete\n");
2908 mpt_free_request(mpt, req);
2910 mpt_lprt(mpt, MPT_PRT_DEBUG,
2911 "Sync Primitive Send Complete- Waking Waiter\n");
2917 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2918 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2919 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2920 rp->MsgLength, rp->MsgFlags);
2924 if (rp->MsgLength <= 5) {
2926 * This is just a ack of an original ELS buffer post
2928 mpt_lprt(mpt, MPT_PRT_DEBUG,
2929 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2934 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2935 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2937 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2938 cmd = be32toh(elsbuf[0]) >> 24;
2940 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2941 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2945 ioindex = le32toh(rp->TransactionContext);
2946 req = mpt->els_cmd_ptrs[ioindex];
2948 if (rctl == ELS && type == 1) {
2952 * Send back a PRLI ACC
2954 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2955 le32toh(rp->Wwn.PortNameHigh),
2956 le32toh(rp->Wwn.PortNameLow));
2957 elsbuf[0] = htobe32(0x02100014);
2958 elsbuf[1] |= htobe32(0x00000100);
2959 elsbuf[4] = htobe32(0x00000002);
2960 if (mpt->role & MPT_ROLE_TARGET)
2961 elsbuf[4] |= htobe32(0x00000010);
2962 if (mpt->role & MPT_ROLE_INITIATOR)
2963 elsbuf[4] |= htobe32(0x00000020);
2964 /* remove from active list as we're done */
2965 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2966 req->state &= ~REQ_STATE_QUEUED;
2967 req->state |= REQ_STATE_DONE;
2968 mpt_fc_els_send_response(mpt, req, rp, 20);
2972 memset(elsbuf, 0, 5 * (sizeof (U32)));
2973 elsbuf[0] = htobe32(0x02100014);
2974 elsbuf[1] = htobe32(0x08000100);
2975 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2976 le32toh(rp->Wwn.PortNameHigh),
2977 le32toh(rp->Wwn.PortNameLow));
2978 /* remove from active list as we're done */
2979 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2980 req->state &= ~REQ_STATE_QUEUED;
2981 req->state |= REQ_STATE_DONE;
2982 mpt_fc_els_send_response(mpt, req, rp, 20);
2986 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2989 } else if (rctl == ABTS && type == 0) {
2990 uint16_t rx_id = le16toh(rp->Rxid);
2991 uint16_t ox_id = le16toh(rp->Oxid);
2992 request_t *tgt_req = NULL;
2995 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2996 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2997 le32toh(rp->Wwn.PortNameLow));
2998 if (rx_id >= mpt->mpt_max_tgtcmds) {
2999 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
3000 } else if (mpt->tgt_cmd_ptrs == NULL) {
3001 mpt_prt(mpt, "No TGT CMD PTRS\n");
3003 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
3006 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
3011 * Check to make sure we have the correct command
3012 * The reply descriptor in the target state should
3013 * should contain an IoIndex that should match the
3016 * It'd be nice to have OX_ID to crosscheck with
3019 ct_id = GET_IO_INDEX(tgt->reply_desc);
3021 if (ct_id != rx_id) {
3022 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
3023 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3031 "CCB (%p): lun %u flags %x status %x\n",
3032 ccb, ccb->ccb_h.target_lun,
3033 ccb->ccb_h.flags, ccb->ccb_h.status);
3035 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3036 "%x nxfers %x\n", tgt->state,
3037 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3040 if (mpt_abort_target_cmd(mpt, tgt_req)) {
3041 mpt_prt(mpt, "unable to start TargetAbort\n");
3044 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3046 memset(elsbuf, 0, 5 * (sizeof (U32)));
3047 elsbuf[0] = htobe32(0);
3048 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3049 elsbuf[2] = htobe32(0x000ffff);
3051 * Dork with the reply frame so that the response to it
3054 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3055 /* remove from active list as we're done */
3056 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3057 req->state &= ~REQ_STATE_QUEUED;
3058 req->state |= REQ_STATE_DONE;
3059 mpt_fc_els_send_response(mpt, req, rp, 12);
3062 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3064 if (do_refresh == TRUE) {
3065 /* remove from active list as we're done */
3066 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3067 req->state &= ~REQ_STATE_QUEUED;
3068 req->state |= REQ_STATE_DONE;
3069 mpt_fc_post_els(mpt, req, ioindex);
3075 * Clean up all SCSI Initiator personality state in response
3076 * to a controller reset.
3079 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3083 * The pending list is already run down by
3084 * the generic handler. Perform the same
3085 * operation on the timed out request list.
3087 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3088 MPI_IOCSTATUS_INVALID_STATE);
3091 * XXX: We need to repost ELS and Target Command Buffers?
3095 * Inform the XPT that a bus reset has occurred.
3097 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3101 * Parse additional completion information in the reply
3102 * frame for SCSI I/O requests.
3105 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3106 MSG_DEFAULT_REPLY *reply_frame)
3109 MSG_SCSI_IO_REPLY *scsi_io_reply;
3113 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3114 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3115 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3116 ("MPT SCSI I/O Handler called with incorrect reply type"));
3117 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3118 ("MPT SCSI I/O Handler called with continuation reply"));
3120 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3121 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3122 ioc_status &= MPI_IOCSTATUS_MASK;
3123 sstate = scsi_io_reply->SCSIState;
3127 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3129 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3130 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3131 uint32_t sense_returned;
3133 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3135 sense_returned = le32toh(scsi_io_reply->SenseCount);
3136 if (sense_returned < ccb->csio.sense_len)
3137 ccb->csio.sense_resid = ccb->csio.sense_len -
3140 ccb->csio.sense_resid = 0;
3142 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3143 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3144 min(ccb->csio.sense_len, sense_returned));
3147 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3149 * Tag messages rejected, but non-tagged retry
3152 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3156 switch(ioc_status) {
3157 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3160 * Linux driver indicates that a zero
3161 * transfer length with this error code
3162 * indicates a CRC error.
3164 * No need to swap the bytes for checking
3167 if (scsi_io_reply->TransferCount == 0) {
3168 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3172 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3173 case MPI_IOCSTATUS_SUCCESS:
3174 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3175 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3177 * Status was never returned for this transaction.
3179 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3180 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3181 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3182 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3183 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3184 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3185 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3187 /* XXX Handle SPI-Packet and FCP-2 response info. */
3188 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3190 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3192 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3193 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3195 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3196 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3198 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3200 * Since selection timeouts and "device really not
3201 * there" are grouped into this error code, report
3202 * selection timeout. Selection timeouts are
3203 * typically retried before giving up on the device
3204 * whereas "device not there" errors are considered
3207 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3209 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3210 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3212 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3213 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3215 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3216 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3218 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3219 ccb->ccb_h.status = CAM_UA_TERMIO;
3221 case MPI_IOCSTATUS_INVALID_STATE:
3223 * The IOC has been reset. Emulate a bus reset.
3226 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3227 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3229 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3230 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3232 * Don't clobber any timeout status that has
3233 * already been set for this transaction. We
3234 * want the SCSI layer to be able to differentiate
3235 * between the command we aborted due to timeout
3236 * and any innocent bystanders.
3238 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3240 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3243 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3244 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3246 case MPI_IOCSTATUS_BUSY:
3247 mpt_set_ccb_status(ccb, CAM_BUSY);
3249 case MPI_IOCSTATUS_INVALID_FUNCTION:
3250 case MPI_IOCSTATUS_INVALID_SGL:
3251 case MPI_IOCSTATUS_INTERNAL_ERROR:
3252 case MPI_IOCSTATUS_INVALID_FIELD:
3255 * Some of the above may need to kick
3256 * of a recovery action!!!!
3258 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3262 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3263 mpt_freeze_ccb(ccb);
3270 mpt_action(struct cam_sim *sim, union ccb *ccb)
3272 struct mpt_softc *mpt;
3273 struct ccb_trans_settings *cts;
3278 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3280 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3281 raid_passthru = (sim == mpt->phydisk_sim);
3282 MPT_LOCK_ASSERT(mpt);
3284 tgt = ccb->ccb_h.target_id;
3285 lun = ccb->ccb_h.target_lun;
3286 if (raid_passthru &&
3287 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3288 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3289 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3290 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3291 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3292 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3297 ccb->ccb_h.ccb_mpt_ptr = mpt;
3299 switch (ccb->ccb_h.func_code) {
3300 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3302 * Do a couple of preliminary checks...
3304 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3305 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3306 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3307 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3311 /* Max supported CDB length is 16 bytes */
3312 /* XXX Unless we implement the new 32byte message type */
3313 if (ccb->csio.cdb_len >
3314 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3315 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3316 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3319 #ifdef MPT_TEST_MULTIPATH
3320 if (mpt->failure_id == ccb->ccb_h.target_id) {
3321 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3322 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3326 ccb->csio.scsi_status = SCSI_STATUS_OK;
3327 mpt_start(sim, ccb);
3331 if (raid_passthru) {
3332 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3333 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3337 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3339 xpt_print(ccb->ccb_h.path, "reset bus\n");
3342 xpt_print(ccb->ccb_h.path, "reset device\n");
3344 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3347 * mpt_bus_reset is always successful in that it
3348 * will fall back to a hard reset should a bus
3349 * reset attempt fail.
3351 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3352 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3357 union ccb *accb = ccb->cab.abort_ccb;
3358 switch (accb->ccb_h.func_code) {
3359 case XPT_ACCEPT_TARGET_IO:
3360 case XPT_IMMED_NOTIFY:
3361 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3363 case XPT_CONT_TARGET_IO:
3364 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3365 ccb->ccb_h.status = CAM_UA_ABORT;
3368 ccb->ccb_h.status = CAM_UA_ABORT;
3371 ccb->ccb_h.status = CAM_REQ_INVALID;
3377 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3378 #define DP_DISC_ENABLE 0x1
3379 #define DP_DISC_DISABL 0x2
3380 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3382 #define DP_TQING_ENABLE 0x4
3383 #define DP_TQING_DISABL 0x8
3384 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3386 #define DP_WIDE 0x10
3387 #define DP_NARROW 0x20
3388 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3390 #define DP_SYNC 0x40
3392 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3394 struct ccb_trans_settings_scsi *scsi;
3395 struct ccb_trans_settings_spi *spi;
3403 if (mpt->is_fc || mpt->is_sas) {
3404 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3408 scsi = &cts->proto_specific.scsi;
3409 spi = &cts->xport_specific.spi;
3412 * We can be called just to valid transport and proto versions
3414 if (scsi->valid == 0 && spi->valid == 0) {
3415 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3420 * Skip attempting settings on RAID volume disks.
3421 * Other devices on the bus get the normal treatment.
3423 if (mpt->phydisk_sim && raid_passthru == 0 &&
3424 mpt_is_raid_volume(mpt, tgt) != 0) {
3425 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3426 "no transfer settings for RAID vols\n");
3427 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3431 i = mpt->mpt_port_page2.PortSettings &
3432 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3433 j = mpt->mpt_port_page2.PortFlags &
3434 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3435 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3436 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3437 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3438 "honoring BIOS transfer negotiations\n");
3439 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3447 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3448 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3449 DP_DISC_ENABLE : DP_DISC_DISABL;
3452 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3453 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3454 DP_TQING_ENABLE : DP_TQING_DISABL;
3457 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3458 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3459 DP_WIDE : DP_NARROW;
3462 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3464 offset = spi->sync_offset;
3466 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3467 &mpt->mpt_dev_page1[tgt];
3468 offset = ptr->RequestedParameters;
3469 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3470 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3472 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3474 period = spi->sync_period;
3476 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3477 &mpt->mpt_dev_page1[tgt];
3478 period = ptr->RequestedParameters;
3479 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3480 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3482 if (dval & DP_DISC_ENABLE) {
3483 mpt->mpt_disc_enable |= (1 << tgt);
3484 } else if (dval & DP_DISC_DISABL) {
3485 mpt->mpt_disc_enable &= ~(1 << tgt);
3487 if (dval & DP_TQING_ENABLE) {
3488 mpt->mpt_tag_enable |= (1 << tgt);
3489 } else if (dval & DP_TQING_DISABL) {
3490 mpt->mpt_tag_enable &= ~(1 << tgt);
3492 if (dval & DP_WIDTH) {
3493 mpt_setwidth(mpt, tgt, 1);
3495 if (dval & DP_SYNC) {
3496 mpt_setsync(mpt, tgt, period, offset);
3499 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3502 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3503 "set [%d]: 0x%x period 0x%x offset %d\n",
3504 tgt, dval, period, offset);
3505 if (mpt_update_spi_config(mpt, tgt)) {
3506 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3508 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3512 case XPT_GET_TRAN_SETTINGS:
3514 struct ccb_trans_settings_scsi *scsi;
3516 cts->protocol = PROTO_SCSI;
3518 struct ccb_trans_settings_fc *fc =
3519 &cts->xport_specific.fc;
3520 cts->protocol_version = SCSI_REV_SPC;
3521 cts->transport = XPORT_FC;
3522 cts->transport_version = 0;
3523 fc->valid = CTS_FC_VALID_SPEED;
3524 fc->bitrate = 100000;
3525 } else if (mpt->is_sas) {
3526 struct ccb_trans_settings_sas *sas =
3527 &cts->xport_specific.sas;
3528 cts->protocol_version = SCSI_REV_SPC2;
3529 cts->transport = XPORT_SAS;
3530 cts->transport_version = 0;
3531 sas->valid = CTS_SAS_VALID_SPEED;
3532 sas->bitrate = 300000;
3534 cts->protocol_version = SCSI_REV_2;
3535 cts->transport = XPORT_SPI;
3536 cts->transport_version = 2;
3537 if (mpt_get_spi_settings(mpt, cts) != 0) {
3538 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3542 scsi = &cts->proto_specific.scsi;
3543 scsi->valid = CTS_SCSI_VALID_TQ;
3544 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3545 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3548 case XPT_CALC_GEOMETRY:
3550 struct ccb_calc_geometry *ccg;
3553 if (ccg->block_size == 0) {
3554 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3555 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3558 cam_calc_geometry(ccg, /*extended*/1);
3559 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3562 case XPT_PATH_INQ: /* Path routing inquiry */
3564 struct ccb_pathinq *cpi = &ccb->cpi;
3566 cpi->version_num = 1;
3567 cpi->target_sprt = 0;
3568 cpi->hba_eng_cnt = 0;
3569 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3570 #if 0 /* XXX swildner */
3571 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3574 * FC cards report MAX_DEVICES of 512, but
3575 * the MSG_SCSI_IO_REQUEST target id field
3576 * is only 8 bits. Until we fix the driver
3577 * to support 'channels' for bus overflow,
3580 if (cpi->max_target > 255) {
3581 cpi->max_target = 255;
3585 * VMware ESX reports > 16 devices and then dies when we probe.
3587 if (mpt->is_spi && cpi->max_target > 15) {
3588 cpi->max_target = 15;
3593 cpi->max_lun = MPT_MAX_LUNS;
3594 cpi->initiator_id = mpt->mpt_ini_id;
3595 cpi->bus_id = cam_sim_bus(sim);
3598 * The base speed is the speed of the underlying connection.
3600 cpi->protocol = PROTO_SCSI;
3602 cpi->hba_misc = PIM_NOBUSRESET;
3603 cpi->base_transfer_speed = 100000;
3604 cpi->hba_inquiry = PI_TAG_ABLE;
3605 cpi->transport = XPORT_FC;
3606 cpi->transport_version = 0;
3607 cpi->protocol_version = SCSI_REV_SPC;
3608 } else if (mpt->is_sas) {
3609 cpi->hba_misc = PIM_NOBUSRESET;
3610 cpi->base_transfer_speed = 300000;
3611 cpi->hba_inquiry = PI_TAG_ABLE;
3612 cpi->transport = XPORT_SAS;
3613 cpi->transport_version = 0;
3614 cpi->protocol_version = SCSI_REV_SPC2;
3616 cpi->hba_misc = PIM_SEQSCAN;
3617 cpi->base_transfer_speed = 3300;
3618 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3619 cpi->transport = XPORT_SPI;
3620 cpi->transport_version = 2;
3621 cpi->protocol_version = SCSI_REV_2;
3625 * We give our fake RAID passhtru bus a width that is MaxVolumes
3626 * wide and restrict it to one lun.
3628 if (raid_passthru) {
3629 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3630 cpi->initiator_id = cpi->max_target + 1;
3634 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3635 cpi->hba_misc |= PIM_NOINITIATOR;
3637 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3639 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3641 cpi->target_sprt = 0;
3643 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3644 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3645 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3646 cpi->unit_number = cam_sim_unit(sim);
3647 cpi->ccb_h.status = CAM_REQ_CMP;
3650 case XPT_EN_LUN: /* Enable LUN as a target */
3654 if (ccb->cel.enable)
3655 result = mpt_enable_lun(mpt,
3656 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3658 result = mpt_disable_lun(mpt,
3659 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3661 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3663 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3667 case XPT_NOTIFY_ACK: /* recycle notify ack */
3668 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3669 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3671 tgt_resource_t *trtp;
3672 lun_id_t lun = ccb->ccb_h.target_lun;
3673 ccb->ccb_h.sim_priv.entries[0].field = 0;
3674 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3675 ccb->ccb_h.flags = 0;
3677 if (lun == CAM_LUN_WILDCARD) {
3678 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3679 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3682 trtp = &mpt->trt_wildcard;
3683 } else if (lun >= MPT_MAX_LUNS) {
3684 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3687 trtp = &mpt->trt[lun];
3689 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3690 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3691 "Put FREE ATIO %p lun %d\n", ccb, lun);
3692 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3694 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3695 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3696 "Put FREE INOT lun %d\n", lun);
3697 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3700 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3702 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3705 case XPT_CONT_TARGET_IO:
3706 mpt_target_start_io(mpt, ccb);
3710 ccb->ccb_h.status = CAM_REQ_INVALID;
3717 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3719 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3720 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3722 uint32_t dval, pval, oval;
3725 if (IS_CURRENT_SETTINGS(cts) == 0) {
3726 tgt = cts->ccb_h.target_id;
3727 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3728 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3732 tgt = cts->ccb_h.target_id;
3736 * We aren't looking at Port Page 2 BIOS settings here-
3737 * sometimes these have been known to be bogus XXX.
3739 * For user settings, we pick the max from port page 0
3741 * For current settings we read the current settings out from
3742 * device page 0 for that target.
3744 if (IS_CURRENT_SETTINGS(cts)) {
3745 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3748 tmp = mpt->mpt_dev_page0[tgt];
3749 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3750 sizeof(tmp), FALSE, 5000);
3752 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3755 mpt2host_config_page_scsi_device_0(&tmp);
3757 mpt_lprt(mpt, MPT_PRT_DEBUG,
3758 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3759 tmp.NegotiatedParameters, tmp.Information);
3760 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3761 DP_WIDE : DP_NARROW;
3762 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3763 DP_DISC_ENABLE : DP_DISC_DISABL;
3764 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3765 DP_TQING_ENABLE : DP_TQING_DISABL;
3766 oval = tmp.NegotiatedParameters;
3767 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3768 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3769 pval = tmp.NegotiatedParameters;
3770 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3771 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3772 mpt->mpt_dev_page0[tgt] = tmp;
3774 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3775 oval = mpt->mpt_port_page0.Capabilities;
3776 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3777 pval = mpt->mpt_port_page0.Capabilities;
3778 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3785 spi->sync_offset = oval;
3786 spi->sync_period = pval;
3787 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3788 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3789 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3790 if (dval & DP_WIDE) {
3791 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3793 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3795 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3796 scsi->valid = CTS_SCSI_VALID_TQ;
3797 if (dval & DP_TQING_ENABLE) {
3798 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3800 spi->valid |= CTS_SPI_VALID_DISC;
3801 if (dval & DP_DISC_ENABLE) {
3802 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3805 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3806 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3807 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3812 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3814 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3816 ptr = &mpt->mpt_dev_page1[tgt];
3818 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3820 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3825 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3827 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3829 ptr = &mpt->mpt_dev_page1[tgt];
3830 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3831 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3832 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3833 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3834 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3838 ptr->RequestedParameters |=
3839 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3840 ptr->RequestedParameters |=
3841 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3843 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3846 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3847 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3852 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3854 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3857 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3858 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3859 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3860 tmp = mpt->mpt_dev_page1[tgt];
3861 host2mpt_config_page_scsi_device_1(&tmp);
3862 rv = mpt_write_cur_cfg_page(mpt, tgt,
3863 &tmp.Header, sizeof(tmp), FALSE, 5000);
3865 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3871 /****************************** Timeout Recovery ******************************/
3873 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3877 error = kthread_create(mpt_recovery_thread, mpt,
3878 &mpt->recovery_thread, "mpt_recovery%d", mpt->unit);
3883 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3886 if (mpt->recovery_thread == NULL) {
3889 mpt->shutdwn_recovery = 1;
3892 * Sleep on a slightly different location
3893 * for this interlock just for added safety.
3895 mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
3899 mpt_recovery_thread(void *arg)
3901 struct mpt_softc *mpt;
3903 mpt = (struct mpt_softc *)arg;
3906 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3907 if (mpt->shutdwn_recovery == 0) {
3908 mpt_sleep(mpt, mpt, 0, "idle", 0);
3911 if (mpt->shutdwn_recovery != 0) {
3914 mpt_recover_commands(mpt);
3916 mpt->recovery_thread = NULL;
3917 wakeup(&mpt->recovery_thread);
3923 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3924 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3926 MSG_SCSI_TASK_MGMT *tmf_req;
3930 * Wait for any current TMF request to complete.
3931 * We're only allowed to issue one TMF at a time.
3933 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3934 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3936 mpt_reset(mpt, TRUE);
3940 mpt_assign_serno(mpt, mpt->tmf_req);
3941 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3943 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3944 memset(tmf_req, 0, sizeof(*tmf_req));
3945 tmf_req->TargetID = target;
3946 tmf_req->Bus = channel;
3947 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3948 tmf_req->TaskType = type;
3949 tmf_req->MsgFlags = flags;
3950 tmf_req->MsgContext =
3951 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3952 if (lun > MPT_MAX_LUNS) {
3953 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3954 tmf_req->LUN[1] = lun & 0xff;
3956 tmf_req->LUN[1] = lun;
3958 tmf_req->TaskMsgContext = abort_ctx;
3960 mpt_lprt(mpt, MPT_PRT_DEBUG,
3961 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3962 mpt->tmf_req->serno, tmf_req->MsgContext);
3963 if (mpt->verbose > MPT_PRT_DEBUG) {
3964 mpt_print_request(tmf_req);
3967 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3968 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3969 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3970 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3971 if (error != MPT_OK) {
3972 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3973 mpt->tmf_req->state = REQ_STATE_FREE;
3974 mpt_reset(mpt, TRUE);
3980 * When a command times out, it is placed on the requeust_timeout_list
3981 * and we wake our recovery thread. The MPT-Fusion architecture supports
3982 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3983 * the timedout transactions. The next TMF is issued either by the
3984 * completion handler of the current TMF waking our recovery thread,
3985 * or the TMF timeout handler causing a hard reset sequence.
3988 mpt_recover_commands(struct mpt_softc *mpt)
3994 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3996 * No work to do- leave.
3998 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4003 * Flush any commands whose completion coincides with their timeout.
4007 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4009 * The timedout commands have already
4010 * completed. This typically means
4011 * that either the timeout value was on
4012 * the hairy edge of what the device
4013 * requires or - more likely - interrupts
4014 * are not happening.
4016 mpt_prt(mpt, "Timedout requests already complete. "
4017 "Interrupts may not be functioning.\n");
4018 mpt_enable_ints(mpt);
4023 * We have no visibility into the current state of the
4024 * controller, so attempt to abort the commands in the
4025 * order they timed-out. For initiator commands, we
4026 * depend on the reply handler pulling requests off
4029 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4032 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4034 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4035 req, req->serno, hdrp->Function);
4038 mpt_prt(mpt, "null ccb in timed out request. "
4039 "Resetting Controller.\n");
4040 mpt_reset(mpt, TRUE);
4043 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4046 * Check to see if this is not an initiator command and
4047 * deal with it differently if it is.
4049 switch (hdrp->Function) {
4050 case MPI_FUNCTION_SCSI_IO_REQUEST:
4051 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4055 * XXX: FIX ME: need to abort target assists...
4057 mpt_prt(mpt, "just putting it back on the pend q\n");
4058 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4059 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4064 error = mpt_scsi_send_tmf(mpt,
4065 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4066 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4067 htole32(req->index | scsi_io_handler_id), TRUE);
4071 * mpt_scsi_send_tmf hard resets on failure, so no
4072 * need to do so here. Our queue should be emptied
4073 * by the hard reset.
4078 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4079 REQ_STATE_DONE, TRUE, 500);
4081 status = le16toh(mpt->tmf_req->IOCStatus);
4082 response = mpt->tmf_req->ResponseCode;
4083 mpt->tmf_req->state = REQ_STATE_FREE;
4087 * If we've errored out,, reset the controller.
4089 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4090 "Resetting controller\n");
4091 mpt_reset(mpt, TRUE);
4095 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4096 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4097 "Resetting controller.\n", status);
4098 mpt_reset(mpt, TRUE);
4102 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4103 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4104 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4105 "Resetting controller.\n", response);
4106 mpt_reset(mpt, TRUE);
4109 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4113 /************************ Target Mode Support ****************************/
4115 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4117 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4118 PTR_SGE_TRANSACTION32 tep;
4119 PTR_SGE_SIMPLE32 se;
4123 paddr = req->req_pbuf;
4124 paddr += MPT_RQSL(mpt);
4127 memset(fc, 0, MPT_REQUEST_AREA);
4128 fc->BufferCount = 1;
4129 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4130 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4133 * Okay, set up ELS buffer pointers. ELS buffer pointers
4134 * consist of a TE SGL element (with details length of zero)
4135 * followed by a SIMPLE SGL element which holds the address
4139 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4141 tep->ContextSize = 4;
4143 tep->TransactionContext[0] = htole32(ioindex);
4145 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4147 MPI_SGE_FLAGS_HOST_TO_IOC |
4148 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4149 MPI_SGE_FLAGS_LAST_ELEMENT |
4150 MPI_SGE_FLAGS_END_OF_LIST |
4151 MPI_SGE_FLAGS_END_OF_BUFFER;
4152 fl <<= MPI_SGE_FLAGS_SHIFT;
4153 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4154 se->FlagsLength = htole32(fl);
4155 se->Address = htole32((uint32_t) paddr);
4156 mpt_lprt(mpt, MPT_PRT_DEBUG,
4157 "add ELS index %d ioindex %d for %p:%u\n",
4158 req->index, ioindex, req, req->serno);
4159 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4160 ("mpt_fc_post_els: request not locked"));
4161 mpt_send_cmd(mpt, req);
4165 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4167 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4168 PTR_CMD_BUFFER_DESCRIPTOR cb;
4171 paddr = req->req_pbuf;
4172 paddr += MPT_RQSL(mpt);
4173 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4174 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4177 fc->BufferCount = 1;
4178 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4179 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4181 cb = &fc->Buffer[0];
4182 cb->IoIndex = htole16(ioindex);
4183 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4185 mpt_check_doorbell(mpt);
4186 mpt_send_cmd(mpt, req);
4190 mpt_add_els_buffers(struct mpt_softc *mpt)
4194 if (mpt->is_fc == 0) {
4198 if (mpt->els_cmds_allocated) {
4202 mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *),
4203 M_DEVBUF, M_NOWAIT | M_ZERO);
4205 if (mpt->els_cmd_ptrs == NULL) {
4210 * Feed the chip some ELS buffer resources
4212 for (i = 0; i < MPT_MAX_ELS; i++) {
4213 request_t *req = mpt_get_request(mpt, FALSE);
4217 req->state |= REQ_STATE_LOCKED;
4218 mpt->els_cmd_ptrs[i] = req;
4219 mpt_fc_post_els(mpt, req, i);
4223 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4224 kfree(mpt->els_cmd_ptrs, M_DEVBUF);
4225 mpt->els_cmd_ptrs = NULL;
4228 if (i != MPT_MAX_ELS) {
4229 mpt_lprt(mpt, MPT_PRT_INFO,
4230 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4232 mpt->els_cmds_allocated = i;
4237 mpt_add_target_commands(struct mpt_softc *mpt)
4241 if (mpt->tgt_cmd_ptrs) {
4245 max = MPT_MAX_REQUESTS(mpt) >> 1;
4246 if (max > mpt->mpt_max_tgtcmds) {
4247 max = mpt->mpt_max_tgtcmds;
4250 kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4251 if (mpt->tgt_cmd_ptrs == NULL) {
4253 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4257 for (i = 0; i < max; i++) {
4260 req = mpt_get_request(mpt, FALSE);
4264 req->state |= REQ_STATE_LOCKED;
4265 mpt->tgt_cmd_ptrs[i] = req;
4266 mpt_post_target_command(mpt, req, i);
4271 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4272 kfree(mpt->tgt_cmd_ptrs, M_DEVBUF);
4273 mpt->tgt_cmd_ptrs = NULL;
4277 mpt->tgt_cmds_allocated = i;
4280 mpt_lprt(mpt, MPT_PRT_INFO,
4281 "added %d of %d target bufs\n", i, max);
4287 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4290 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4292 } else if (lun >= MPT_MAX_LUNS) {
4294 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4297 if (mpt->tenabled == 0) {
4299 (void) mpt_fc_reset_link(mpt, 0);
4303 if (lun == CAM_LUN_WILDCARD) {
4304 mpt->trt_wildcard.enabled = 1;
4306 mpt->trt[lun].enabled = 1;
4312 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4316 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4318 } else if (lun >= MPT_MAX_LUNS) {
4320 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4323 if (lun == CAM_LUN_WILDCARD) {
4324 mpt->trt_wildcard.enabled = 0;
4326 mpt->trt[lun].enabled = 0;
4328 for (i = 0; i < MPT_MAX_LUNS; i++) {
4329 if (mpt->trt[lun].enabled) {
4333 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4335 (void) mpt_fc_reset_link(mpt, 0);
4343 * Called with MPT lock held
4346 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4348 struct ccb_scsiio *csio = &ccb->csio;
4349 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4350 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4352 switch (tgt->state) {
4353 case TGT_STATE_IN_CAM:
4355 case TGT_STATE_MOVING_DATA:
4356 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4357 xpt_freeze_simq(mpt->sim, 1);
4358 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4359 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4363 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4364 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4365 mpt_tgt_dump_req_state(mpt, cmd_req);
4366 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4371 if (csio->dxfer_len) {
4372 bus_dmamap_callback_t *cb;
4373 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4376 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4377 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4379 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4380 if (mpt->outofbeer == 0) {
4382 xpt_freeze_simq(mpt->sim, 1);
4383 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4385 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4386 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4390 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4391 if (sizeof (bus_addr_t) > 4) {
4392 cb = mpt_execute_req_a64;
4394 cb = mpt_execute_req;
4398 ccb->ccb_h.ccb_req_ptr = req;
4401 * Record the currently active ccb and the
4402 * request for it in our target state area.
4407 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4411 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4413 ta->QueueTag = ssp->InitiatorTag;
4414 } else if (mpt->is_spi) {
4415 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4417 ta->QueueTag = sp->Tag;
4419 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4420 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4421 ta->ReplyWord = htole32(tgt->reply_desc);
4422 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4424 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4425 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4427 ta->LUN[1] = csio->ccb_h.target_lun;
4430 ta->RelativeOffset = tgt->bytes_xfered;
4431 ta->DataLength = ccb->csio.dxfer_len;
4432 if (ta->DataLength > tgt->resid) {
4433 ta->DataLength = tgt->resid;
4437 * XXX Should be done after data transfer completes?
4439 tgt->resid -= csio->dxfer_len;
4440 tgt->bytes_xfered += csio->dxfer_len;
4442 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4443 ta->TargetAssistFlags |=
4444 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4447 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4448 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4449 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4450 ta->TargetAssistFlags |=
4451 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4454 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4456 mpt_lprt(mpt, MPT_PRT_DEBUG,
4457 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4458 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4459 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4461 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4462 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4465 error = bus_dmamap_load(mpt->buffer_dmat,
4466 req->dmap, csio->data_ptr, csio->dxfer_len,
4469 if (error == EINPROGRESS) {
4470 xpt_freeze_simq(mpt->sim, 1);
4471 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4475 * We have been given a pointer to single
4478 struct bus_dma_segment seg;
4479 seg.ds_addr = (bus_addr_t)
4480 (vm_offset_t)csio->data_ptr;
4481 seg.ds_len = csio->dxfer_len;
4482 (*cb)(req, &seg, 1, 0);
4486 * We have been given a list of addresses.
4487 * This case could be easily supported but they are not
4488 * currently generated by the CAM subsystem so there
4489 * is no point in wasting the time right now.
4491 struct bus_dma_segment *sgs;
4492 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4493 (*cb)(req, NULL, 0, EFAULT);
4495 /* Just use the segments provided */
4496 sgs = (struct bus_dma_segment *)csio->data_ptr;
4497 (*cb)(req, sgs, csio->sglist_cnt, 0);
4501 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4504 * XXX: I don't know why this seems to happen, but
4505 * XXX: completing the CCB seems to make things happy.
4506 * XXX: This seems to happen if the initiator requests
4507 * XXX: enough data that we have to do multiple CTIOs.
4509 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4510 mpt_lprt(mpt, MPT_PRT_DEBUG,
4511 "Meaningless STATUS CCB (%p): flags %x status %x "
4512 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4513 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4514 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4515 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4519 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4521 memcpy(sp, &csio->sense_data,
4522 min(csio->sense_len, MPT_SENSE_SIZE));
4524 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4529 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4530 uint32_t lun, int send, uint8_t *data, size_t length)
4532 mpt_tgt_state_t *tgt;
4533 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4541 * We enter with resid set to the data load for the command.
4543 tgt = MPT_TGT_STATE(mpt, cmd_req);
4544 if (length == 0 || tgt->resid == 0) {
4546 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4550 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4551 mpt_prt(mpt, "out of resources- dropping local response\n");
4557 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4561 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4562 ta->QueueTag = ssp->InitiatorTag;
4563 } else if (mpt->is_spi) {
4564 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4565 ta->QueueTag = sp->Tag;
4567 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4568 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4569 ta->ReplyWord = htole32(tgt->reply_desc);
4570 if (lun > MPT_MAX_LUNS) {
4571 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4572 ta->LUN[1] = lun & 0xff;
4576 ta->RelativeOffset = 0;
4577 ta->DataLength = length;
4579 dptr = req->req_vbuf;
4580 dptr += MPT_RQSL(mpt);
4581 pptr = req->req_pbuf;
4582 pptr += MPT_RQSL(mpt);
4583 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4585 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4586 memset(se, 0,sizeof (*se));
4588 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4590 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4591 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4594 MPI_pSGE_SET_LENGTH(se, length);
4595 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4596 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4597 MPI_pSGE_SET_FLAGS(se, flags);
4601 tgt->resid -= length;
4602 tgt->bytes_xfered = length;
4603 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4604 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4606 tgt->state = TGT_STATE_MOVING_DATA;
4608 mpt_send_cmd(mpt, req);
4612 * Abort queued up CCBs
4615 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4617 struct mpt_hdr_stailq *lp;
4618 struct ccb_hdr *srch;
4620 union ccb *accb = ccb->cab.abort_ccb;
4621 tgt_resource_t *trtp;
4623 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4625 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4626 trtp = &mpt->trt_wildcard;
4628 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4631 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4633 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4636 return (CAM_REQ_INVALID);
4639 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4640 if (srch == &accb->ccb_h) {
4642 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4647 accb->ccb_h.status = CAM_REQ_ABORTED;
4649 return (CAM_REQ_CMP);
4651 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4652 return (CAM_PATH_INVALID);
4656 * Ask the MPT to abort the current target command
4659 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4663 PTR_MSG_TARGET_MODE_ABORT abtp;
4665 req = mpt_get_request(mpt, FALSE);
4669 abtp = req->req_vbuf;
4670 memset(abtp, 0, sizeof (*abtp));
4672 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4673 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4674 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4675 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4677 if (mpt->is_fc || mpt->is_sas) {
4678 mpt_send_cmd(mpt, req);
4680 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4686 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4687 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4688 * FC929 to set bogus FC_RSP fields (nonzero residuals
4689 * but w/o RESID fields set). This causes QLogic initiators
4690 * to think maybe that a frame was lost.
4692 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4693 * we use allocated requests to do TARGET_ASSIST and we
4694 * need to know when to release them.
4698 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4699 uint8_t status, uint8_t const *sense_data)
4702 mpt_tgt_state_t *tgt;
4703 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4709 cmd_vbuf = cmd_req->req_vbuf;
4710 cmd_vbuf += MPT_RQSL(mpt);
4711 tgt = MPT_TGT_STATE(mpt, cmd_req);
4713 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4714 if (mpt->outofbeer == 0) {
4716 xpt_freeze_simq(mpt->sim, 1);
4717 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4720 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4721 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4725 "could not allocate status request- dropping\n");
4731 ccb->ccb_h.ccb_mpt_ptr = mpt;
4732 ccb->ccb_h.ccb_req_ptr = req;
4736 * Record the currently active ccb, if any, and the
4737 * request for it in our target state area.
4741 tgt->state = TGT_STATE_SENDING_STATUS;
4744 paddr = req->req_pbuf;
4745 paddr += MPT_RQSL(mpt);
4747 memset(tp, 0, sizeof (*tp));
4748 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4750 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4751 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4755 sts_vbuf = req->req_vbuf;
4756 sts_vbuf += MPT_RQSL(mpt);
4757 rsp = (uint32_t *) sts_vbuf;
4758 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4761 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4762 * It has to be big-endian in memory and is organized
4763 * in 32 bit words, which are much easier to deal with
4764 * as words which are swizzled as needed.
4766 * All we're filling here is the FC_RSP payload.
4767 * We may just have the chip synthesize it if
4768 * we have no residual and an OK status.
4771 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4775 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4776 rsp[3] = htobe32(tgt->resid);
4777 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4778 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4781 if (status == SCSI_STATUS_CHECK_COND) {
4784 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4785 rsp[4] = htobe32(MPT_SENSE_SIZE);
4787 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4789 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4790 "TION but no sense data?\n");
4791 memset(&rsp, 0, MPT_SENSE_SIZE);
4793 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4794 rsp[i] = htobe32(rsp[i]);
4796 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4797 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4800 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4801 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4803 rsp[2] = htobe32(rsp[2]);
4804 } else if (mpt->is_sas) {
4805 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4806 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4807 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4809 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4810 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4811 tp->StatusCode = status;
4812 tp->QueueTag = htole16(sp->Tag);
4813 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4816 tp->ReplyWord = htole32(tgt->reply_desc);
4817 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4819 #ifdef WE_CAN_USE_AUTO_REPOST
4820 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4822 if (status == SCSI_STATUS_OK && resplen == 0) {
4823 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4825 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4827 MPI_SGE_FLAGS_HOST_TO_IOC |
4828 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4829 MPI_SGE_FLAGS_LAST_ELEMENT |
4830 MPI_SGE_FLAGS_END_OF_LIST |
4831 MPI_SGE_FLAGS_END_OF_BUFFER;
4832 fl <<= MPI_SGE_FLAGS_SHIFT;
4834 tp->StatusDataSGE.FlagsLength = htole32(fl);
4837 mpt_lprt(mpt, MPT_PRT_DEBUG,
4838 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4839 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4840 req->serno, tgt->resid);
4842 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4843 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4845 mpt_send_cmd(mpt, req);
4849 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4850 tgt_resource_t *trtp, int init_id)
4852 struct ccb_immed_notify *inot;
4853 mpt_tgt_state_t *tgt;
4855 tgt = MPT_TGT_STATE(mpt, req);
4856 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4858 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4859 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4862 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4863 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4864 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4866 memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4867 inot->sense_len = 0;
4868 memset(inot->message_args, 0, sizeof (inot->message_args));
4869 inot->initiator_id = init_id; /* XXX */
4872 * This is a somewhat grotesque attempt to map from task management
4873 * to old style SCSI messages. God help us all.
4876 case MPT_ABORT_TASK_SET:
4877 inot->message_args[0] = MSG_ABORT_TAG;
4879 case MPT_CLEAR_TASK_SET:
4880 inot->message_args[0] = MSG_CLEAR_TASK_SET;
4882 case MPT_TARGET_RESET:
4883 inot->message_args[0] = MSG_TARGET_RESET;
4886 inot->message_args[0] = MSG_CLEAR_ACA;
4888 case MPT_TERMINATE_TASK:
4889 inot->message_args[0] = MSG_ABORT_TAG;
4892 inot->message_args[0] = MSG_NOOP;
4895 tgt->ccb = (union ccb *) inot;
4896 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4897 xpt_done((union ccb *)inot);
4901 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4903 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4904 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4905 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4906 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4907 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4910 struct ccb_accept_tio *atiop;
4913 mpt_tgt_state_t *tgt;
4914 tgt_resource_t *trtp = NULL;
4919 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4923 * Stash info for the current command where we can get at it later.
4925 vbuf = req->req_vbuf;
4926 vbuf += MPT_RQSL(mpt);
4929 * Get our state pointer set up.
4931 tgt = MPT_TGT_STATE(mpt, req);
4932 if (tgt->state != TGT_STATE_LOADED) {
4933 mpt_tgt_dump_req_state(mpt, req);
4934 panic("bad target state in mpt_scsi_tgt_atio");
4936 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4937 tgt->state = TGT_STATE_IN_CAM;
4938 tgt->reply_desc = reply_desc;
4939 ioindex = GET_IO_INDEX(reply_desc);
4940 if (mpt->verbose >= MPT_PRT_DEBUG) {
4941 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4942 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4943 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4944 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4947 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4948 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4949 if (fc->FcpCntl[2]) {
4951 * Task Management Request
4953 switch (fc->FcpCntl[2]) {
4955 fct = MPT_ABORT_TASK_SET;
4958 fct = MPT_CLEAR_TASK_SET;
4961 fct = MPT_TARGET_RESET;
4964 fct = MPT_CLEAR_ACA;
4967 fct = MPT_TERMINATE_TASK;
4970 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4972 mpt_scsi_tgt_status(mpt, 0, req,
4977 switch (fc->FcpCntl[1]) {
4979 tag_action = MSG_SIMPLE_Q_TAG;
4982 tag_action = MSG_HEAD_OF_Q_TAG;
4985 tag_action = MSG_ORDERED_Q_TAG;
4989 * Bah. Ignore Untagged Queing and ACA
4991 tag_action = MSG_SIMPLE_Q_TAG;
4995 tgt->resid = be32toh(fc->FcpDl);
4997 lunptr = fc->FcpLun;
4998 itag = be16toh(fc->OptionalOxid);
4999 } else if (mpt->is_sas) {
5000 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5001 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5003 lunptr = ssp->LogicalUnitNumber;
5004 itag = ssp->InitiatorTag;
5006 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5007 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5009 lunptr = sp->LogicalUnitNumber;
5014 * Generate a simple lun
5016 switch (lunptr[0] & 0xc0) {
5018 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5024 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5030 * Deal with non-enabled or bad luns here.
5032 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5033 mpt->trt[lun].enabled == 0) {
5034 if (mpt->twildcard) {
5035 trtp = &mpt->trt_wildcard;
5036 } else if (fct == MPT_NIL_TMT_VALUE) {
5038 * In this case, we haven't got an upstream listener
5039 * for either a specific lun or wildcard luns. We
5040 * have to make some sensible response. For regular
5041 * inquiry, just return some NOT HERE inquiry data.
5042 * For VPD inquiry, report illegal field in cdb.
5043 * For REQUEST SENSE, just return NO SENSE data.
5044 * REPORT LUNS gets illegal command.
5045 * All other commands get 'no such device'.
5047 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5050 memset(buf, 0, MPT_SENSE_SIZE);
5051 cond = SCSI_STATUS_CHECK_COND;
5056 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5066 len = min(tgt->resid, cdbp[4]);
5067 len = min(len, sizeof (null_iqd));
5068 mpt_lprt(mpt, MPT_PRT_DEBUG,
5069 "local inquiry %ld bytes\n", (long) len);
5070 mpt_scsi_tgt_local(mpt, req, lun, 1,
5077 len = min(tgt->resid, cdbp[4]);
5078 len = min(len, sizeof (buf));
5079 mpt_lprt(mpt, MPT_PRT_DEBUG,
5080 "local reqsense %ld bytes\n", (long) len);
5081 mpt_scsi_tgt_local(mpt, req, lun, 1,
5086 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5090 mpt_lprt(mpt, MPT_PRT_DEBUG,
5091 "CMD 0x%x to unmanaged lun %u\n",
5096 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5099 /* otherwise, leave trtp NULL */
5101 trtp = &mpt->trt[lun];
5105 * Deal with any task management
5107 if (fct != MPT_NIL_TMT_VALUE) {
5109 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5111 mpt_scsi_tgt_status(mpt, 0, req,
5114 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5115 GET_INITIATOR_INDEX(reply_desc));
5121 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5122 if (atiop == NULL) {
5123 mpt_lprt(mpt, MPT_PRT_WARN,
5124 "no ATIOs for lun %u- sending back %s\n", lun,
5125 mpt->tenabled? "QUEUE FULL" : "BUSY");
5126 mpt_scsi_tgt_status(mpt, NULL, req,
5127 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5131 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5132 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5133 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5134 atiop->ccb_h.ccb_mpt_ptr = mpt;
5135 atiop->ccb_h.status = CAM_CDB_RECVD;
5136 atiop->ccb_h.target_lun = lun;
5137 atiop->sense_len = 0;
5138 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5139 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5140 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5143 * The tag we construct here allows us to find the
5144 * original request that the command came in with.
5146 * This way we don't have to depend on anything but the
5147 * tag to find things when CCBs show back up from CAM.
5149 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5150 tgt->tag_id = atiop->tag_id;
5152 atiop->tag_action = tag_action;
5153 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5155 if (mpt->verbose >= MPT_PRT_DEBUG) {
5157 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5158 atiop->ccb_h.target_lun);
5159 for (i = 0; i < atiop->cdb_len; i++) {
5160 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5161 (i == (atiop->cdb_len - 1))? '>' : ' ');
5163 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5164 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5167 xpt_done((union ccb *)atiop);
5171 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5173 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5175 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5176 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5177 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5178 tgt->tag_id, tgt->state);
5182 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5185 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5186 req->index, req->index, req->state);
5187 mpt_tgt_dump_tgt_state(mpt, req);
5191 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5192 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5198 if (reply_frame == NULL) {
5200 * Figure out what the state of the command is.
5202 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5205 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5207 mpt_req_not_spcl(mpt, tgt->req,
5208 "turbo scsi_tgt_reply associated req", __LINE__);
5211 switch(tgt->state) {
5212 case TGT_STATE_LOADED:
5214 * This is a new command starting.
5216 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5218 case TGT_STATE_MOVING_DATA:
5220 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5223 if (tgt->req == NULL) {
5224 panic("mpt: turbo target reply with null "
5225 "associated request moving data");
5229 if (tgt->is_local == 0) {
5230 panic("mpt: turbo target reply with "
5231 "null associated ccb moving data");
5234 mpt_lprt(mpt, MPT_PRT_DEBUG,
5235 "TARGET_ASSIST local done\n");
5236 TAILQ_REMOVE(&mpt->request_pending_list,
5238 mpt_free_request(mpt, tgt->req);
5240 mpt_scsi_tgt_status(mpt, NULL, req,
5246 mpt_req_untimeout(req, mpt_timeout, ccb);
5247 mpt_lprt(mpt, MPT_PRT_DEBUG,
5248 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5249 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5251 * Free the Target Assist Request
5253 KASSERT(tgt->req->ccb == ccb,
5254 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5255 tgt->req->serno, tgt->req->ccb));
5256 TAILQ_REMOVE(&mpt->request_pending_list,
5258 mpt_free_request(mpt, tgt->req);
5262 * Do we need to send status now? That is, are
5263 * we done with all our data transfers?
5265 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5266 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5268 KASSERT(ccb->ccb_h.status,
5269 ("zero ccb sts at %d", __LINE__));
5270 tgt->state = TGT_STATE_IN_CAM;
5271 if (mpt->outofbeer) {
5272 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5274 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5280 * Otherwise, send status (and sense)
5282 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5284 memcpy(sp, &ccb->csio.sense_data,
5285 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5287 mpt_scsi_tgt_status(mpt, ccb, req,
5288 ccb->csio.scsi_status, sp);
5291 case TGT_STATE_SENDING_STATUS:
5292 case TGT_STATE_MOVING_DATA_AND_STATUS:
5297 if (tgt->req == NULL) {
5298 panic("mpt: turbo target reply with null "
5299 "associated request sending status");
5306 TGT_STATE_MOVING_DATA_AND_STATUS) {
5309 mpt_req_untimeout(req, mpt_timeout, ccb);
5310 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5311 ccb->ccb_h.status |= CAM_SENT_SENSE;
5313 mpt_lprt(mpt, MPT_PRT_DEBUG,
5314 "TARGET_STATUS tag %x sts %x flgs %x req "
5315 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5316 ccb->ccb_h.flags, tgt->req);
5318 * Free the Target Send Status Request
5320 KASSERT(tgt->req->ccb == ccb,
5321 ("tgt->req %p:%u tgt->req->ccb %p",
5322 tgt->req, tgt->req->serno, tgt->req->ccb));
5324 * Notify CAM that we're done
5326 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5327 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5328 KASSERT(ccb->ccb_h.status,
5329 ("ZERO ccb sts at %d", __LINE__));
5332 mpt_lprt(mpt, MPT_PRT_DEBUG,
5333 "TARGET_STATUS non-CAM for req %p:%u\n",
5334 tgt->req, tgt->req->serno);
5336 TAILQ_REMOVE(&mpt->request_pending_list,
5338 mpt_free_request(mpt, tgt->req);
5342 * And re-post the Command Buffer.
5343 * This will reset the state.
5345 ioindex = GET_IO_INDEX(reply_desc);
5346 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5348 mpt_post_target_command(mpt, req, ioindex);
5351 * And post a done for anyone who cares
5354 if (mpt->outofbeer) {
5355 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5357 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5363 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5364 tgt->state = TGT_STATE_LOADED;
5367 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5368 "Reply Function\n", tgt->state);
5373 status = le16toh(reply_frame->IOCStatus);
5374 if (status != MPI_IOCSTATUS_SUCCESS) {
5375 dbg = MPT_PRT_ERROR;
5377 dbg = MPT_PRT_DEBUG1;
5381 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5382 req, req->serno, reply_frame, reply_frame->Function, status);
5384 switch (reply_frame->Function) {
5385 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5387 mpt_tgt_state_t *tgt;
5389 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5391 if (status != MPI_IOCSTATUS_SUCCESS) {
5397 tgt = MPT_TGT_STATE(mpt, req);
5398 KASSERT(tgt->state == TGT_STATE_LOADING,
5399 ("bad state 0x%x on reply to buffer post", tgt->state));
5400 mpt_assign_serno(mpt, req);
5401 tgt->state = TGT_STATE_LOADED;
5404 case MPI_FUNCTION_TARGET_ASSIST:
5406 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5408 mpt_prt(mpt, "target assist completion\n");
5409 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5410 mpt_free_request(mpt, req);
5412 case MPI_FUNCTION_TARGET_STATUS_SEND:
5414 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5416 mpt_prt(mpt, "status send completion\n");
5417 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5418 mpt_free_request(mpt, req);
5420 case MPI_FUNCTION_TARGET_MODE_ABORT:
5422 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5423 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5424 PTR_MSG_TARGET_MODE_ABORT abtp =
5425 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5426 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5428 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5430 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5431 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5432 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5433 mpt_free_request(mpt, req);
5437 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5438 "0x%x\n", reply_frame->Function);