2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $
99 #include <dev/disk/mpt/mpt.h>
100 #include <dev/disk/mpt/mpt_cam.h>
101 #include <dev/disk/mpt/mpt_raid.h>
103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/disk/mpt/mpilib/mpi_init.h"
105 #include "dev/disk/mpt/mpilib/mpi_targ.h"
106 #include "dev/disk/mpt/mpilib/mpi_fc.h"
107 #include "dev/disk/mpt/mpilib/mpi_sas.h"
108 #include <sys/sysctl.h>
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
112 #ifndef CAM_NEW_TRAN_CODE
113 #define CAM_NEW_TRAN_CODE 1
116 static void mpt_poll(struct cam_sim *);
117 static timeout_t mpt_timeout;
118 static void mpt_action(struct cam_sim *, union ccb *);
120 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
121 static void mpt_setwidth(struct mpt_softc *, int, int);
122 static void mpt_setsync(struct mpt_softc *, int, int, int);
123 static int mpt_update_spi_config(struct mpt_softc *, int);
124 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
126 static mpt_reply_handler_t mpt_scsi_reply_handler;
127 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
128 static mpt_reply_handler_t mpt_fc_els_reply_handler;
129 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
130 MSG_DEFAULT_REPLY *);
131 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
132 static int mpt_fc_reset_link(struct mpt_softc *, int);
134 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
135 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
136 static void mpt_recovery_thread(void *arg);
137 static void mpt_recover_commands(struct mpt_softc *mpt);
139 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
140 u_int, u_int, u_int, int);
142 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
143 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
144 static int mpt_add_els_buffers(struct mpt_softc *mpt);
145 static int mpt_add_target_commands(struct mpt_softc *mpt);
146 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
147 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
148 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
149 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
150 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
151 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
152 uint8_t, uint8_t const *);
154 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
155 tgt_resource_t *, int);
156 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
157 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
158 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
159 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
161 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
162 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
163 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
164 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
166 static mpt_probe_handler_t mpt_cam_probe;
167 static mpt_attach_handler_t mpt_cam_attach;
168 static mpt_enable_handler_t mpt_cam_enable;
169 static mpt_ready_handler_t mpt_cam_ready;
170 static mpt_event_handler_t mpt_cam_event;
171 static mpt_reset_handler_t mpt_cam_ioc_reset;
172 static mpt_detach_handler_t mpt_cam_detach;
174 static struct mpt_personality mpt_cam_personality =
177 .probe = mpt_cam_probe,
178 .attach = mpt_cam_attach,
179 .enable = mpt_cam_enable,
180 .ready = mpt_cam_ready,
181 .event = mpt_cam_event,
182 .reset = mpt_cam_ioc_reset,
183 .detach = mpt_cam_detach,
186 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
187 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
189 int mpt_enable_sata_wc = -1;
190 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
193 mpt_cam_probe(struct mpt_softc *mpt)
198 * Only attach to nodes that support the initiator or target role
199 * (or want to) or have RAID physical devices that need CAM pass-thru
202 if (mpt->do_cfg_role) {
203 role = mpt->cfg_role;
207 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
208 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
215 mpt_cam_attach(struct mpt_softc *mpt)
217 struct cam_devq *devq;
218 mpt_handler_t handler;
223 TAILQ_INIT(&mpt->request_timeout_list);
224 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
225 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
227 handler.reply_handler = mpt_scsi_reply_handler;
228 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 &scsi_io_handler_id);
235 handler.reply_handler = mpt_scsi_tmf_reply_handler;
236 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
237 &scsi_tmf_handler_id);
244 * If we're fibre channel and could support target mode, we register
245 * an ELS reply handler and give it resources.
247 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
248 handler.reply_handler = mpt_fc_els_reply_handler;
249 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
255 if (mpt_add_els_buffers(mpt) == FALSE) {
260 maxq -= mpt->els_cmds_allocated;
264 * If we support target mode, we register a reply handler for it,
265 * but don't add command resources until we actually enable target
268 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
269 handler.reply_handler = mpt_scsi_tgt_reply_handler;
270 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
271 &mpt->scsi_tgt_handler_id);
279 handler.reply_handler = mpt_sata_pass_reply_handler;
280 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
281 &sata_pass_handler_id);
289 * We keep one request reserved for timeout TMF requests.
291 mpt->tmf_req = mpt_get_request(mpt, FALSE);
292 if (mpt->tmf_req == NULL) {
293 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
300 * Mark the request as free even though not on the free list.
301 * There is only one TMF request allowed to be outstanding at
302 * a time and the TMF routines perform their own allocation
303 * tracking using the standard state flags.
305 mpt->tmf_req->state = REQ_STATE_FREE;
309 * The rest of this is CAM foo, for which we need to drop our lock
313 if (mpt_spawn_recovery_thread(mpt) != 0) {
314 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
320 * Create the device queue for our SIM(s).
322 devq = cam_simq_alloc(maxq);
324 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
330 * Construct our SIM entry.
333 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
334 if (mpt->sim == NULL) {
335 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
336 cam_devq_release(devq);
342 * Register exactly this bus.
345 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
346 mpt_prt(mpt, "Bus registration Failed!\n");
352 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
353 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
354 mpt_prt(mpt, "Unable to allocate Path!\n");
362 * Only register a second bus for RAID physical
363 * devices if the controller supports RAID.
365 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
370 * Create a "bus" to export all hidden disks to CAM.
373 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
374 if (mpt->phydisk_sim == NULL) {
375 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
384 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
386 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
392 if (xpt_create_path(&mpt->phydisk_path, NULL,
393 cam_sim_path(mpt->phydisk_sim),
394 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
395 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
401 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
410 * Read FC configuration information
413 mpt_read_config_info_fc(struct mpt_softc *mpt)
415 char *topology = NULL;
418 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
419 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
423 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
424 mpt->mpt_fcport_page0.Header.PageVersion,
425 mpt->mpt_fcport_page0.Header.PageLength,
426 mpt->mpt_fcport_page0.Header.PageNumber,
427 mpt->mpt_fcport_page0.Header.PageType);
430 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
431 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
433 mpt_prt(mpt, "failed to read FC Port Page 0\n");
436 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
438 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
440 switch (mpt->mpt_fcport_page0.Flags &
441 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
442 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
443 mpt->mpt_fcport_speed = 0;
444 topology = "<NO LOOP>";
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
449 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
450 topology = "NL-Port";
452 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
455 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
456 topology = "FL-Port";
459 mpt->mpt_fcport_speed = 0;
464 mpt_lprt(mpt, MPT_PRT_INFO,
465 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
466 "Speed %u-Gbit\n", topology,
467 mpt->mpt_fcport_page0.WWNN.High,
468 mpt->mpt_fcport_page0.WWNN.Low,
469 mpt->mpt_fcport_page0.WWPN.High,
470 mpt->mpt_fcport_page0.WWPN.Low,
471 mpt->mpt_fcport_speed);
474 ksnprintf(mpt->scinfo.fc.wwnn,
475 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
476 mpt->mpt_fcport_page0.WWNN.High,
477 mpt->mpt_fcport_page0.WWNN.Low);
479 ksnprintf(mpt->scinfo.fc.wwpn,
480 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
481 mpt->mpt_fcport_page0.WWPN.High,
482 mpt->mpt_fcport_page0.WWPN.Low);
484 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
485 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
486 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
487 "World Wide Node Name");
489 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
490 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
491 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
492 "World Wide Port Name");
500 * Set FC configuration information.
503 mpt_set_initial_config_fc(struct mpt_softc *mpt)
505 CONFIG_PAGE_FC_PORT_1 fc;
510 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
511 &fc.Header, FALSE, 5000);
513 mpt_prt(mpt, "failed to read FC page 1 header\n");
514 return (mpt_fc_reset_link(mpt, 1));
517 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
518 &fc.Header, sizeof (fc), FALSE, 5000);
520 mpt_prt(mpt, "failed to read FC page 1\n");
521 return (mpt_fc_reset_link(mpt, 1));
523 mpt2host_config_page_fc_port_1(&fc);
526 * Check our flags to make sure we support the role we want.
532 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
533 role |= MPT_ROLE_INITIATOR;
535 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
536 role |= MPT_ROLE_TARGET;
539 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
541 if (mpt->do_cfg_role == 0) {
542 role = mpt->cfg_role;
544 mpt->do_cfg_role = 0;
547 if (role != mpt->cfg_role) {
548 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
549 if ((role & MPT_ROLE_INITIATOR) == 0) {
550 mpt_prt(mpt, "adding initiator role\n");
551 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
554 mpt_prt(mpt, "keeping initiator role\n");
556 } else if (role & MPT_ROLE_INITIATOR) {
557 mpt_prt(mpt, "removing initiator role\n");
560 if (mpt->cfg_role & MPT_ROLE_TARGET) {
561 if ((role & MPT_ROLE_TARGET) == 0) {
562 mpt_prt(mpt, "adding target role\n");
563 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
566 mpt_prt(mpt, "keeping target role\n");
568 } else if (role & MPT_ROLE_TARGET) {
569 mpt_prt(mpt, "removing target role\n");
572 mpt->role = mpt->cfg_role;
575 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
576 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
577 mpt_prt(mpt, "adding OXID option\n");
578 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
585 host2mpt_config_page_fc_port_1(&fc);
586 r = mpt_write_cfg_page(mpt,
587 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
588 sizeof(fc), FALSE, 5000);
590 mpt_prt(mpt, "failed to update NVRAM with changes\n");
593 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
594 "effect until next reboot or IOC reset\n");
600 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
602 ConfigExtendedPageHeader_t hdr;
603 struct mptsas_phyinfo *phyinfo;
604 SasIOUnitPage0_t *buffer;
607 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
608 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
612 if (hdr.ExtPageLength == 0) {
617 len = hdr.ExtPageLength * 4;
618 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
619 if (buffer == NULL) {
624 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
625 0, &hdr, buffer, len, 0, 10000);
627 kfree(buffer, M_DEVBUF);
631 portinfo->num_phys = buffer->NumPhys;
632 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
633 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
634 if (portinfo->phy_info == NULL) {
635 kfree(buffer, M_DEVBUF);
640 for (i = 0; i < portinfo->num_phys; i++) {
641 phyinfo = &portinfo->phy_info[i];
642 phyinfo->phy_num = i;
643 phyinfo->port_id = buffer->PhyData[i].Port;
644 phyinfo->negotiated_link_rate =
645 buffer->PhyData[i].NegotiatedLinkRate;
647 le16toh(buffer->PhyData[i].ControllerDevHandle);
650 kfree(buffer, M_DEVBUF);
656 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
657 uint32_t form, uint32_t form_specific)
659 ConfigExtendedPageHeader_t hdr;
660 SasPhyPage0_t *buffer;
663 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
664 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
668 if (hdr.ExtPageLength == 0) {
673 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
674 if (buffer == NULL) {
679 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
680 form + form_specific, &hdr, buffer,
681 sizeof(SasPhyPage0_t), 0, 10000);
683 kfree(buffer, M_DEVBUF);
687 phy_info->hw_link_rate = buffer->HwLinkRate;
688 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
689 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
690 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
692 kfree(buffer, M_DEVBUF);
698 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
699 uint32_t form, uint32_t form_specific)
701 ConfigExtendedPageHeader_t hdr;
702 SasDevicePage0_t *buffer;
703 uint64_t sas_address;
706 bzero(device_info, sizeof(*device_info));
707 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
708 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
712 if (hdr.ExtPageLength == 0) {
717 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
718 if (buffer == NULL) {
723 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
724 form + form_specific, &hdr, buffer,
725 sizeof(SasDevicePage0_t), 0, 10000);
727 kfree(buffer, M_DEVBUF);
731 device_info->dev_handle = le16toh(buffer->DevHandle);
732 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
733 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
734 device_info->slot = le16toh(buffer->Slot);
735 device_info->phy_num = buffer->PhyNum;
736 device_info->physical_port = buffer->PhysicalPort;
737 device_info->target_id = buffer->TargetID;
738 device_info->bus = buffer->Bus;
739 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
740 device_info->sas_address = le64toh(sas_address);
741 device_info->device_info = le32toh(buffer->DeviceInfo);
743 kfree(buffer, M_DEVBUF);
749 * Read SAS configuration information. Nothing to do yet.
752 mpt_read_config_info_sas(struct mpt_softc *mpt)
754 struct mptsas_portinfo *portinfo;
755 struct mptsas_phyinfo *phyinfo;
758 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
759 if (portinfo == NULL)
762 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
764 kfree(portinfo, M_DEVBUF);
768 for (i = 0; i < portinfo->num_phys; i++) {
769 phyinfo = &portinfo->phy_info[i];
770 error = mptsas_sas_phy_pg0(mpt, phyinfo,
771 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
772 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
775 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
776 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
777 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
781 phyinfo->identify.phy_num = phyinfo->phy_num = i;
782 if (phyinfo->attached.dev_handle)
783 error = mptsas_sas_device_pg0(mpt,
785 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
786 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
787 phyinfo->attached.dev_handle);
791 mpt->sas_portinfo = portinfo;
796 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
799 SataPassthroughRequest_t *pass;
803 req = mpt_get_request(mpt, 0);
807 pass = req->req_vbuf;
808 bzero(pass, sizeof(SataPassthroughRequest_t));
809 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
810 pass->TargetID = devinfo->target_id;
811 pass->Bus = devinfo->bus;
812 pass->PassthroughFlags = 0;
813 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
814 pass->DataLength = 0;
815 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
816 pass->CommandFIS[0] = 0x27;
817 pass->CommandFIS[1] = 0x80;
818 pass->CommandFIS[2] = 0xef;
819 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
820 pass->CommandFIS[7] = 0x40;
821 pass->CommandFIS[15] = 0x08;
823 mpt_check_doorbell(mpt);
824 mpt_send_cmd(mpt, req);
825 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
828 mpt_free_request(mpt, req);
829 kprintf("error %d sending passthrough\n", error);
833 status = le16toh(req->IOCStatus);
834 if (status != MPI_IOCSTATUS_SUCCESS) {
835 mpt_free_request(mpt, req);
836 kprintf("IOCSTATUS %d\n", status);
840 mpt_free_request(mpt, req);
844 * Set SAS configuration information. Nothing to do yet.
847 mpt_set_initial_config_sas(struct mpt_softc *mpt)
849 struct mptsas_phyinfo *phyinfo;
852 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
853 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
854 phyinfo = &mpt->sas_portinfo->phy_info[i];
855 if (phyinfo->attached.dev_handle == 0)
857 if ((phyinfo->attached.device_info &
858 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
861 device_printf(mpt->dev,
862 "%sabling SATA WC on phy %d\n",
863 (mpt_enable_sata_wc) ? "En" : "Dis", i);
864 mptsas_set_sata_wc(mpt, &phyinfo->attached,
873 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
874 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
878 if (reply_frame != NULL) {
879 req->IOCStatus = le16toh(reply_frame->IOCStatus);
881 req->state &= ~REQ_STATE_QUEUED;
882 req->state |= REQ_STATE_DONE;
883 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
884 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
886 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
888 * Whew- we can free this request (late completion)
890 mpt_free_request(mpt, req);
898 * Read SCSI configuration information
901 mpt_read_config_info_spi(struct mpt_softc *mpt)
905 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
906 &mpt->mpt_port_page0.Header, FALSE, 5000);
910 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
911 mpt->mpt_port_page0.Header.PageVersion,
912 mpt->mpt_port_page0.Header.PageLength,
913 mpt->mpt_port_page0.Header.PageNumber,
914 mpt->mpt_port_page0.Header.PageType);
916 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
917 &mpt->mpt_port_page1.Header, FALSE, 5000);
921 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
922 mpt->mpt_port_page1.Header.PageVersion,
923 mpt->mpt_port_page1.Header.PageLength,
924 mpt->mpt_port_page1.Header.PageNumber,
925 mpt->mpt_port_page1.Header.PageType);
927 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
928 &mpt->mpt_port_page2.Header, FALSE, 5000);
932 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
933 mpt->mpt_port_page2.Header.PageVersion,
934 mpt->mpt_port_page2.Header.PageLength,
935 mpt->mpt_port_page2.Header.PageNumber,
936 mpt->mpt_port_page2.Header.PageType);
938 for (i = 0; i < 16; i++) {
939 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
940 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
944 mpt_lprt(mpt, MPT_PRT_DEBUG,
945 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
946 mpt->mpt_dev_page0[i].Header.PageVersion,
947 mpt->mpt_dev_page0[i].Header.PageLength,
948 mpt->mpt_dev_page0[i].Header.PageNumber,
949 mpt->mpt_dev_page0[i].Header.PageType);
951 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
952 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
956 mpt_lprt(mpt, MPT_PRT_DEBUG,
957 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
958 mpt->mpt_dev_page1[i].Header.PageVersion,
959 mpt->mpt_dev_page1[i].Header.PageLength,
960 mpt->mpt_dev_page1[i].Header.PageNumber,
961 mpt->mpt_dev_page1[i].Header.PageType);
965 * At this point, we don't *have* to fail. As long as we have
966 * valid config header information, we can (barely) lurch
970 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
971 sizeof(mpt->mpt_port_page0), FALSE, 5000);
973 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
975 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
976 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
977 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
978 mpt->mpt_port_page0.Capabilities,
979 mpt->mpt_port_page0.PhysicalInterface);
982 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
983 sizeof(mpt->mpt_port_page1), FALSE, 5000);
985 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
987 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
988 mpt_lprt(mpt, MPT_PRT_DEBUG,
989 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
990 mpt->mpt_port_page1.Configuration,
991 mpt->mpt_port_page1.OnBusTimerValue);
994 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
995 sizeof(mpt->mpt_port_page2), FALSE, 5000);
997 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
999 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1000 "Port Page 2: Flags %x Settings %x\n",
1001 mpt->mpt_port_page2.PortFlags,
1002 mpt->mpt_port_page2.PortSettings);
1003 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1004 for (i = 0; i < 16; i++) {
1005 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1006 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1007 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1008 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1009 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1013 for (i = 0; i < 16; i++) {
1014 rv = mpt_read_cur_cfg_page(mpt, i,
1015 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1019 "cannot read SPI Target %d Device Page 0\n", i);
1022 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1023 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1024 "target %d page 0: Negotiated Params %x Information %x\n",
1025 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1026 mpt->mpt_dev_page0[i].Information);
1028 rv = mpt_read_cur_cfg_page(mpt, i,
1029 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1033 "cannot read SPI Target %d Device Page 1\n", i);
1036 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1037 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1038 "target %d page 1: Requested Params %x Configuration %x\n",
1039 i, mpt->mpt_dev_page1[i].RequestedParameters,
1040 mpt->mpt_dev_page1[i].Configuration);
1046 * Validate SPI configuration information.
1048 * In particular, validate SPI Port Page 1.
1051 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1053 int error, i, pp1val;
1055 mpt->mpt_disc_enable = 0xff;
1056 mpt->mpt_tag_enable = 0;
1058 pp1val = ((1 << mpt->mpt_ini_id) <<
1059 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1060 if (mpt->mpt_port_page1.Configuration != pp1val) {
1061 CONFIG_PAGE_SCSI_PORT_1 tmp;
1063 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1064 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1065 tmp = mpt->mpt_port_page1;
1066 tmp.Configuration = pp1val;
1067 host2mpt_config_page_scsi_port_1(&tmp);
1068 error = mpt_write_cur_cfg_page(mpt, 0,
1069 &tmp.Header, sizeof(tmp), FALSE, 5000);
1073 error = mpt_read_cur_cfg_page(mpt, 0,
1074 &tmp.Header, sizeof(tmp), FALSE, 5000);
1078 mpt2host_config_page_scsi_port_1(&tmp);
1079 if (tmp.Configuration != pp1val) {
1081 "failed to reset SPI Port Page 1 Config value\n");
1084 mpt->mpt_port_page1 = tmp;
1088 * The purpose of this exercise is to get
1089 * all targets back to async/narrow.
1091 * We skip this step if the BIOS has already negotiated
1092 * speeds with the targets.
1094 i = mpt->mpt_port_page2.PortSettings &
1095 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1096 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1097 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1098 "honoring BIOS transfer negotiations\n");
1100 for (i = 0; i < 16; i++) {
1101 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1102 mpt->mpt_dev_page1[i].Configuration = 0;
1103 (void) mpt_update_spi_config(mpt, i);
1110 mpt_cam_enable(struct mpt_softc *mpt)
1118 if (mpt_read_config_info_fc(mpt)) {
1121 if (mpt_set_initial_config_fc(mpt)) {
1124 } else if (mpt->is_sas) {
1125 if (mpt_read_config_info_sas(mpt)) {
1128 if (mpt_set_initial_config_sas(mpt)) {
1131 } else if (mpt->is_spi) {
1132 if (mpt_read_config_info_spi(mpt)) {
1135 if (mpt_set_initial_config_spi(mpt)) {
1147 mpt_cam_ready(struct mpt_softc *mpt)
1151 * If we're in target mode, hang out resources now
1152 * so we don't cause the world to hang talking to us.
1154 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1156 * Try to add some target command resources
1159 if (mpt_add_target_commands(mpt) == FALSE) {
1160 mpt_prt(mpt, "failed to add target commands\n");
1168 mpt_cam_detach(struct mpt_softc *mpt)
1170 mpt_handler_t handler;
1174 mpt_terminate_recovery_thread(mpt);
1176 handler.reply_handler = mpt_scsi_reply_handler;
1177 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1178 scsi_io_handler_id);
1179 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1180 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1181 scsi_tmf_handler_id);
1182 handler.reply_handler = mpt_fc_els_reply_handler;
1183 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1186 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1187 mpt->scsi_tgt_handler_id);
1188 handler.reply_handler = mpt_sata_pass_reply_handler;
1189 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1190 sata_pass_handler_id);
1192 if (mpt->tmf_req != NULL) {
1193 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1194 mpt_free_request(mpt, mpt->tmf_req);
1195 mpt->tmf_req = NULL;
1197 if (mpt->sas_portinfo != NULL) {
1198 kfree(mpt->sas_portinfo, M_DEVBUF);
1199 mpt->sas_portinfo = NULL;
1202 if (mpt->sim != NULL) {
1203 xpt_free_path(mpt->path);
1204 xpt_bus_deregister(cam_sim_path(mpt->sim));
1205 cam_sim_free(mpt->sim);
1209 if (mpt->phydisk_sim != NULL) {
1210 xpt_free_path(mpt->phydisk_path);
1211 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1212 cam_sim_free(mpt->phydisk_sim);
1213 mpt->phydisk_sim = NULL;
1218 /* This routine is used after a system crash to dump core onto the swap device.
1221 mpt_poll(struct cam_sim *sim)
1223 struct mpt_softc *mpt;
1225 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1230 * Watchdog timeout routine for SCSI requests.
1233 mpt_timeout(void *arg)
1236 struct mpt_softc *mpt;
1239 ccb = (union ccb *)arg;
1240 mpt = ccb->ccb_h.ccb_mpt_ptr;
1243 req = ccb->ccb_h.ccb_req_ptr;
1244 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1245 req->serno, ccb, req->ccb);
1246 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1247 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1248 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1249 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1250 req->state |= REQ_STATE_TIMEDOUT;
1251 mpt_wakeup_recovery_thread(mpt);
1257 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1259 * Takes a list of physical segments and builds the SGL for SCSI IO command
1260 * and forwards the commard to the IOC after one last check that CAM has not
1261 * aborted the transaction.
1264 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1266 request_t *req, *trq;
1269 struct mpt_softc *mpt;
1270 bus_addr_t chain_list_addr;
1271 int first_lim, seg, this_seg_lim;
1272 uint32_t addr, cur_off, flags, nxt_off, tf;
1274 MSG_REQUEST_HEADER *hdrp;
1279 req = (request_t *)arg;
1282 mpt = ccb->ccb_h.ccb_mpt_ptr;
1283 req = ccb->ccb_h.ccb_req_ptr;
1285 hdrp = req->req_vbuf;
1286 mpt_off = req->req_vbuf;
1288 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1293 switch (hdrp->Function) {
1294 case MPI_FUNCTION_SCSI_IO_REQUEST:
1295 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1297 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1299 case MPI_FUNCTION_TARGET_ASSIST:
1301 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1304 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1311 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1313 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1314 nseg, mpt->max_seg_cnt);
1319 if (error != EFBIG && error != ENOMEM) {
1320 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1322 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1324 mpt_freeze_ccb(ccb);
1325 if (error == EFBIG) {
1326 status = CAM_REQ_TOO_BIG;
1327 } else if (error == ENOMEM) {
1328 if (mpt->outofbeer == 0) {
1330 xpt_freeze_simq(mpt->sim, 1);
1331 mpt_lprt(mpt, MPT_PRT_DEBUG,
1334 status = CAM_REQUEUE_REQ;
1336 status = CAM_REQ_CMP_ERR;
1338 mpt_set_ccb_status(ccb, status);
1340 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1341 request_t *cmd_req =
1342 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1343 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1344 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1345 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1347 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1348 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1350 CAMLOCK_2_MPTLOCK(mpt);
1351 mpt_free_request(mpt, req);
1352 MPTLOCK_2_CAMLOCK(mpt);
1357 * No data to transfer?
1358 * Just make a single simple SGL with zero length.
1361 if (mpt->verbose >= MPT_PRT_DEBUG) {
1362 int tidx = ((char *)sglp) - mpt_off;
1363 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1367 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1368 MPI_pSGE_SET_FLAGS(se1,
1369 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1370 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1371 se1->FlagsLength = htole32(se1->FlagsLength);
1376 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1378 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1379 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1383 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1387 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1388 bus_dmasync_op_t op;
1390 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1391 op = BUS_DMASYNC_PREREAD;
1393 op = BUS_DMASYNC_PREWRITE;
1396 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1397 op = BUS_DMASYNC_PREWRITE;
1399 op = BUS_DMASYNC_PREREAD;
1402 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1406 * Okay, fill in what we can at the end of the command frame.
1407 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1408 * the command frame.
1410 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1411 * SIMPLE64 pointers and start doing CHAIN64 entries after
1415 if (nseg < MPT_NSGL_FIRST(mpt)) {
1419 * Leave room for CHAIN element
1421 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1424 se = (SGE_SIMPLE64 *) sglp;
1425 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1427 memset(se, 0, sizeof (*se));
1428 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1429 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1430 if (sizeof(bus_addr_t) > 4) {
1431 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1432 /* SAS1078 36GB limitation WAR */
1433 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1434 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1436 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1438 se->Address.High = htole32(addr);
1440 if (seg == first_lim - 1) {
1441 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1443 if (seg == nseg - 1) {
1444 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1445 MPI_SGE_FLAGS_END_OF_BUFFER;
1447 MPI_pSGE_SET_FLAGS(se, tf);
1448 se->FlagsLength = htole32(se->FlagsLength);
1456 * Tell the IOC where to find the first chain element.
1458 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1459 nxt_off = MPT_RQSL(mpt);
1463 * Make up the rest of the data segments out of a chain element
1464 * (contained in the current request frame) which points to
1465 * SIMPLE64 elements in the next request frame, possibly ending
1466 * with *another* chain element (if there's more).
1468 while (seg < nseg) {
1470 * Point to the chain descriptor. Note that the chain
1471 * descriptor is at the end of the *previous* list (whether
1474 ce = (SGE_CHAIN64 *) se;
1477 * Before we change our current pointer, make sure we won't
1478 * overflow the request area with this frame. Note that we
1479 * test against 'greater than' here as it's okay in this case
1480 * to have next offset be just outside the request area.
1482 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1483 nxt_off = MPT_REQUEST_AREA;
1488 * Set our SGE element pointer to the beginning of the chain
1489 * list and update our next chain list offset.
1491 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1493 nxt_off += MPT_RQSL(mpt);
1496 * Now initialize the chain descriptor.
1498 memset(ce, 0, sizeof (*ce));
1501 * Get the physical address of the chain list.
1503 chain_list_addr = trq->req_pbuf;
1504 chain_list_addr += cur_off;
1505 if (sizeof (bus_addr_t) > 4) {
1507 htole32(((uint64_t)chain_list_addr) >> 32);
1509 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1510 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1511 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1514 * If we have more than a frame's worth of segments left,
1515 * set up the chain list to have the last element be another
1518 if ((nseg - seg) > MPT_NSGL(mpt)) {
1519 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1521 * The length of the chain is the length in bytes of the
1522 * number of segments plus the next chain element.
1524 * The next chain descriptor offset is the length,
1525 * in words, of the number of segments.
1527 ce->Length = (this_seg_lim - seg) *
1528 sizeof (SGE_SIMPLE64);
1529 ce->NextChainOffset = ce->Length >> 2;
1530 ce->Length += sizeof (SGE_CHAIN64);
1532 this_seg_lim = nseg;
1533 ce->Length = (this_seg_lim - seg) *
1534 sizeof (SGE_SIMPLE64);
1536 ce->Length = htole16(ce->Length);
1539 * Fill in the chain list SGE elements with our segment data.
1541 * If we're the last element in this chain list, set the last
1542 * element flag. If we're the completely last element period,
1543 * set the end of list and end of buffer flags.
1545 while (seg < this_seg_lim) {
1547 memset(se, 0, sizeof (*se));
1548 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1549 se->Address.Low = htole32(dm_segs->ds_addr &
1551 if (sizeof (bus_addr_t) > 4) {
1552 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1553 /* SAS1078 36GB limitation WAR */
1555 (((uint64_t)dm_segs->ds_addr +
1556 MPI_SGE_LENGTH(se->FlagsLength)) >>
1559 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1561 se->Address.High = htole32(addr);
1563 if (seg == this_seg_lim - 1) {
1564 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1566 if (seg == nseg - 1) {
1567 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1568 MPI_SGE_FLAGS_END_OF_BUFFER;
1570 MPI_pSGE_SET_FLAGS(se, tf);
1571 se->FlagsLength = htole32(se->FlagsLength);
1579 * If we have more segments to do and we've used up all of
1580 * the space in a request area, go allocate another one
1581 * and chain to that.
1583 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1586 CAMLOCK_2_MPTLOCK(mpt);
1587 nrq = mpt_get_request(mpt, FALSE);
1588 MPTLOCK_2_CAMLOCK(mpt);
1596 * Append the new request area on the tail of our list.
1598 if ((trq = req->chain) == NULL) {
1601 while (trq->chain != NULL) {
1607 mpt_off = trq->req_vbuf;
1608 if (mpt->verbose >= MPT_PRT_DEBUG) {
1609 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1617 * Last time we need to check if this CCB needs to be aborted.
1619 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1620 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1621 request_t *cmd_req =
1622 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1623 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1624 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1625 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1628 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1629 ccb->ccb_h.status & CAM_STATUS_MASK);
1630 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1631 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1633 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1634 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1636 CAMLOCK_2_MPTLOCK(mpt);
1637 mpt_free_request(mpt, req);
1638 MPTLOCK_2_CAMLOCK(mpt);
1642 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1643 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1644 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1647 if (mpt->verbose > MPT_PRT_DEBUG) {
1649 mpt_print_request(req->req_vbuf);
1650 for (trq = req->chain; trq; trq = trq->chain) {
1651 kprintf(" Additional Chain Area %d\n", nc++);
1652 mpt_dump_sgl(trq->req_vbuf, 0);
1656 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1657 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1658 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1659 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1660 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1661 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1662 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1664 tgt->state = TGT_STATE_MOVING_DATA;
1667 tgt->state = TGT_STATE_MOVING_DATA;
1670 CAMLOCK_2_MPTLOCK(mpt);
1671 mpt_send_cmd(mpt, req);
1672 MPTLOCK_2_CAMLOCK(mpt);
1676 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1678 request_t *req, *trq;
1681 struct mpt_softc *mpt;
1683 uint32_t flags, nxt_off;
1685 MSG_REQUEST_HEADER *hdrp;
1690 req = (request_t *)arg;
1693 mpt = ccb->ccb_h.ccb_mpt_ptr;
1694 req = ccb->ccb_h.ccb_req_ptr;
1696 hdrp = req->req_vbuf;
1697 mpt_off = req->req_vbuf;
1700 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1705 switch (hdrp->Function) {
1706 case MPI_FUNCTION_SCSI_IO_REQUEST:
1707 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1708 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1710 case MPI_FUNCTION_TARGET_ASSIST:
1712 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1715 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1722 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1724 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1725 nseg, mpt->max_seg_cnt);
1730 if (error != EFBIG && error != ENOMEM) {
1731 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1733 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1735 mpt_freeze_ccb(ccb);
1736 if (error == EFBIG) {
1737 status = CAM_REQ_TOO_BIG;
1738 } else if (error == ENOMEM) {
1739 if (mpt->outofbeer == 0) {
1741 xpt_freeze_simq(mpt->sim, 1);
1742 mpt_lprt(mpt, MPT_PRT_DEBUG,
1745 status = CAM_REQUEUE_REQ;
1747 status = CAM_REQ_CMP_ERR;
1749 mpt_set_ccb_status(ccb, status);
1751 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1752 request_t *cmd_req =
1753 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1754 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1755 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1756 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1758 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1759 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1761 CAMLOCK_2_MPTLOCK(mpt);
1762 mpt_free_request(mpt, req);
1763 MPTLOCK_2_CAMLOCK(mpt);
1768 * No data to transfer?
1769 * Just make a single simple SGL with zero length.
1772 if (mpt->verbose >= MPT_PRT_DEBUG) {
1773 int tidx = ((char *)sglp) - mpt_off;
1774 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1778 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1779 MPI_pSGE_SET_FLAGS(se1,
1780 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1781 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1782 se1->FlagsLength = htole32(se1->FlagsLength);
1787 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1789 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1790 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1793 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1794 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1798 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1799 bus_dmasync_op_t op;
1801 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1802 op = BUS_DMASYNC_PREREAD;
1804 op = BUS_DMASYNC_PREWRITE;
1807 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1808 op = BUS_DMASYNC_PREWRITE;
1810 op = BUS_DMASYNC_PREREAD;
1813 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1817 * Okay, fill in what we can at the end of the command frame.
1818 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1819 * the command frame.
1821 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1822 * SIMPLE32 pointers and start doing CHAIN32 entries after
1826 if (nseg < MPT_NSGL_FIRST(mpt)) {
1830 * Leave room for CHAIN element
1832 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1835 se = (SGE_SIMPLE32 *) sglp;
1836 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1839 memset(se, 0,sizeof (*se));
1840 se->Address = htole32(dm_segs->ds_addr);
1842 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1844 if (seg == first_lim - 1) {
1845 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1847 if (seg == nseg - 1) {
1848 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1849 MPI_SGE_FLAGS_END_OF_BUFFER;
1851 MPI_pSGE_SET_FLAGS(se, tf);
1852 se->FlagsLength = htole32(se->FlagsLength);
1860 * Tell the IOC where to find the first chain element.
1862 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1863 nxt_off = MPT_RQSL(mpt);
1867 * Make up the rest of the data segments out of a chain element
1868 * (contained in the current request frame) which points to
1869 * SIMPLE32 elements in the next request frame, possibly ending
1870 * with *another* chain element (if there's more).
1872 while (seg < nseg) {
1874 uint32_t tf, cur_off;
1875 bus_addr_t chain_list_addr;
1878 * Point to the chain descriptor. Note that the chain
1879 * descriptor is at the end of the *previous* list (whether
1882 ce = (SGE_CHAIN32 *) se;
1885 * Before we change our current pointer, make sure we won't
1886 * overflow the request area with this frame. Note that we
1887 * test against 'greater than' here as it's okay in this case
1888 * to have next offset be just outside the request area.
1890 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1891 nxt_off = MPT_REQUEST_AREA;
1896 * Set our SGE element pointer to the beginning of the chain
1897 * list and update our next chain list offset.
1899 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1901 nxt_off += MPT_RQSL(mpt);
1904 * Now initialize the chain descriptor.
1906 memset(ce, 0, sizeof (*ce));
1909 * Get the physical address of the chain list.
1911 chain_list_addr = trq->req_pbuf;
1912 chain_list_addr += cur_off;
1916 ce->Address = htole32(chain_list_addr);
1917 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1921 * If we have more than a frame's worth of segments left,
1922 * set up the chain list to have the last element be another
1925 if ((nseg - seg) > MPT_NSGL(mpt)) {
1926 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1928 * The length of the chain is the length in bytes of the
1929 * number of segments plus the next chain element.
1931 * The next chain descriptor offset is the length,
1932 * in words, of the number of segments.
1934 ce->Length = (this_seg_lim - seg) *
1935 sizeof (SGE_SIMPLE32);
1936 ce->NextChainOffset = ce->Length >> 2;
1937 ce->Length += sizeof (SGE_CHAIN32);
1939 this_seg_lim = nseg;
1940 ce->Length = (this_seg_lim - seg) *
1941 sizeof (SGE_SIMPLE32);
1943 ce->Length = htole16(ce->Length);
1946 * Fill in the chain list SGE elements with our segment data.
1948 * If we're the last element in this chain list, set the last
1949 * element flag. If we're the completely last element period,
1950 * set the end of list and end of buffer flags.
1952 while (seg < this_seg_lim) {
1953 memset(se, 0, sizeof (*se));
1954 se->Address = htole32(dm_segs->ds_addr);
1956 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1958 if (seg == this_seg_lim - 1) {
1959 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1961 if (seg == nseg - 1) {
1962 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1963 MPI_SGE_FLAGS_END_OF_BUFFER;
1965 MPI_pSGE_SET_FLAGS(se, tf);
1966 se->FlagsLength = htole32(se->FlagsLength);
1974 * If we have more segments to do and we've used up all of
1975 * the space in a request area, go allocate another one
1976 * and chain to that.
1978 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1981 CAMLOCK_2_MPTLOCK(mpt);
1982 nrq = mpt_get_request(mpt, FALSE);
1983 MPTLOCK_2_CAMLOCK(mpt);
1991 * Append the new request area on the tail of our list.
1993 if ((trq = req->chain) == NULL) {
1996 while (trq->chain != NULL) {
2002 mpt_off = trq->req_vbuf;
2003 if (mpt->verbose >= MPT_PRT_DEBUG) {
2004 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2012 * Last time we need to check if this CCB needs to be aborted.
2014 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2015 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2016 request_t *cmd_req =
2017 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2018 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2019 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2020 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2023 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2024 ccb->ccb_h.status & CAM_STATUS_MASK);
2025 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2026 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2028 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2029 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2031 CAMLOCK_2_MPTLOCK(mpt);
2032 mpt_free_request(mpt, req);
2033 MPTLOCK_2_CAMLOCK(mpt);
2037 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2038 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2039 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2042 if (mpt->verbose > MPT_PRT_DEBUG) {
2044 mpt_print_request(req->req_vbuf);
2045 for (trq = req->chain; trq; trq = trq->chain) {
2046 kprintf(" Additional Chain Area %d\n", nc++);
2047 mpt_dump_sgl(trq->req_vbuf, 0);
2051 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2052 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2053 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2054 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2055 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2056 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2057 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2059 tgt->state = TGT_STATE_MOVING_DATA;
2062 tgt->state = TGT_STATE_MOVING_DATA;
2065 CAMLOCK_2_MPTLOCK(mpt);
2066 mpt_send_cmd(mpt, req);
2067 MPTLOCK_2_CAMLOCK(mpt);
2071 mpt_start(struct cam_sim *sim, union ccb *ccb)
2074 struct mpt_softc *mpt;
2075 MSG_SCSI_IO_REQUEST *mpt_req;
2076 struct ccb_scsiio *csio = &ccb->csio;
2077 struct ccb_hdr *ccbh = &ccb->ccb_h;
2078 bus_dmamap_callback_t *cb;
2082 /* Get the pointer for the physical addapter */
2083 mpt = ccb->ccb_h.ccb_mpt_ptr;
2084 raid_passthru = (sim == mpt->phydisk_sim);
2086 CAMLOCK_2_MPTLOCK(mpt);
2087 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2088 if (mpt->outofbeer == 0) {
2090 xpt_freeze_simq(mpt->sim, 1);
2091 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2093 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2094 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2095 MPTLOCK_2_CAMLOCK(mpt);
2100 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2102 MPTLOCK_2_CAMLOCK(mpt);
2104 if (sizeof (bus_addr_t) > 4) {
2105 cb = mpt_execute_req_a64;
2107 cb = mpt_execute_req;
2111 * Link the ccb and the request structure so we can find
2112 * the other knowing either the request or the ccb
2115 ccb->ccb_h.ccb_req_ptr = req;
2117 /* Now we build the command for the IOC */
2118 mpt_req = req->req_vbuf;
2119 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2121 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2122 if (raid_passthru) {
2123 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2124 CAMLOCK_2_MPTLOCK(mpt);
2125 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2126 MPTLOCK_2_CAMLOCK(mpt);
2127 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2128 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2132 MPTLOCK_2_CAMLOCK(mpt);
2133 mpt_req->Bus = 0; /* we never set bus here */
2135 tgt = ccb->ccb_h.target_id;
2136 mpt_req->Bus = 0; /* XXX */
2139 mpt_req->SenseBufferLength =
2140 (csio->sense_len < MPT_SENSE_SIZE) ?
2141 csio->sense_len : MPT_SENSE_SIZE;
2144 * We use the message context to find the request structure when we
2145 * Get the command completion interrupt from the IOC.
2147 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2149 /* Which physical device to do the I/O on */
2150 mpt_req->TargetID = tgt;
2152 /* We assume a single level LUN type */
2153 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2154 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2155 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2157 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2160 /* Set the direction of the transfer */
2161 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2162 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2163 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2164 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2166 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2169 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2170 switch(ccb->csio.tag_action) {
2171 case MSG_HEAD_OF_Q_TAG:
2172 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2175 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2177 case MSG_ORDERED_Q_TAG:
2178 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2180 case MSG_SIMPLE_Q_TAG:
2182 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2186 if (mpt->is_fc || mpt->is_sas) {
2187 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2189 /* XXX No such thing for a target doing packetized. */
2190 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2195 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2196 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2199 mpt_req->Control = htole32(mpt_req->Control);
2201 /* Copy the scsi command block into place */
2202 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2203 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2205 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2208 mpt_req->CDBLength = csio->cdb_len;
2209 mpt_req->DataLength = htole32(csio->dxfer_len);
2210 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2213 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2215 if (mpt->verbose == MPT_PRT_DEBUG) {
2217 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2218 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2219 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2220 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2221 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2222 mpt_prtc(mpt, "(%s %u byte%s ",
2223 (df == MPI_SCSIIO_CONTROL_READ)?
2224 "read" : "write", csio->dxfer_len,
2225 (csio->dxfer_len == 1)? ")" : "s)");
2227 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2228 ccb->ccb_h.target_lun, req, req->serno);
2232 * If we have any data to send with this command map it into bus space.
2234 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2235 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2237 * We've been given a pointer to a single buffer.
2239 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2241 * Virtual address that needs to translated into
2242 * one or more physical address ranges.
2246 error = bus_dmamap_load(mpt->buffer_dmat,
2247 req->dmap, csio->data_ptr, csio->dxfer_len,
2250 if (error == EINPROGRESS) {
2252 * So as to maintain ordering,
2253 * freeze the controller queue
2254 * until our mapping is
2257 xpt_freeze_simq(mpt->sim, 1);
2258 ccbh->status |= CAM_RELEASE_SIMQ;
2262 * We have been given a pointer to single
2265 struct bus_dma_segment seg;
2267 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2268 seg.ds_len = csio->dxfer_len;
2269 (*cb)(req, &seg, 1, 0);
2273 * We have been given a list of addresses.
2274 * This case could be easily supported but they are not
2275 * currently generated by the CAM subsystem so there
2276 * is no point in wasting the time right now.
2278 struct bus_dma_segment *segs;
2279 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2280 (*cb)(req, NULL, 0, EFAULT);
2282 /* Just use the segments provided */
2283 segs = (struct bus_dma_segment *)csio->data_ptr;
2284 (*cb)(req, segs, csio->sglist_cnt, 0);
2288 (*cb)(req, NULL, 0, 0);
2293 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2300 error = mpt_scsi_send_tmf(mpt,
2301 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2302 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2303 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2304 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2305 0, /* XXX How do I get the channel ID? */
2306 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2307 lun != CAM_LUN_WILDCARD ? lun : 0,
2312 * mpt_scsi_send_tmf hard resets on failure, so no
2313 * need to do so here.
2316 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2320 /* Wait for bus reset to be processed by the IOC. */
2321 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2322 REQ_STATE_DONE, sleep_ok, 5000);
2324 status = le16toh(mpt->tmf_req->IOCStatus);
2325 response = mpt->tmf_req->ResponseCode;
2326 mpt->tmf_req->state = REQ_STATE_FREE;
2329 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2330 "Resetting controller.\n");
2331 mpt_reset(mpt, TRUE);
2335 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2336 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2337 "Resetting controller.\n", status);
2338 mpt_reset(mpt, TRUE);
2342 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2343 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2344 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2345 "Resetting controller.\n", response);
2346 mpt_reset(mpt, TRUE);
2353 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2357 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2359 req = mpt_get_request(mpt, FALSE);
2364 memset(fc, 0, sizeof(*fc));
2365 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2366 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2367 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2368 mpt_send_cmd(mpt, req);
2370 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2371 REQ_STATE_DONE, FALSE, 60 * 1000);
2373 mpt_free_request(mpt, req);
2380 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2382 xpt_free_path(ccb->ccb_h.path);
2387 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2388 MSG_EVENT_NOTIFY_REPLY *msg)
2390 uint32_t data0, data1;
2392 data0 = le32toh(msg->Data[0]);
2393 data1 = le32toh(msg->Data[1]);
2394 switch(msg->Event & 0xFF) {
2395 case MPI_EVENT_UNIT_ATTENTION:
2396 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2397 (data0 >> 8) & 0xff, data0 & 0xff);
2400 case MPI_EVENT_IOC_BUS_RESET:
2401 /* We generated a bus reset */
2402 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2403 (data0 >> 8) & 0xff);
2404 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2407 case MPI_EVENT_EXT_BUS_RESET:
2408 /* Someone else generated a bus reset */
2409 mpt_prt(mpt, "External Bus Reset Detected\n");
2411 * These replies don't return EventData like the MPI
2414 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2417 case MPI_EVENT_RESCAN:
2422 * In general this means a device has been added to the loop.
2424 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2425 if (mpt->ready == 0) {
2428 if (mpt->phydisk_sim) {
2429 pathid = cam_sim_path(mpt->phydisk_sim);
2431 pathid = cam_sim_path(mpt->sim);
2433 MPTLOCK_2_CAMLOCK(mpt);
2435 * Allocate a CCB, create a wildcard path for this bus,
2436 * and schedule a rescan.
2438 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
2440 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2441 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2442 CAMLOCK_2_MPTLOCK(mpt);
2443 mpt_prt(mpt, "unable to create path for rescan\n");
2448 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
2449 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2450 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2451 ccb->crcn.flags = CAM_FLAG_NONE;
2454 /* scan is now in progress */
2456 CAMLOCK_2_MPTLOCK(mpt);
2459 case MPI_EVENT_LINK_STATUS_CHANGE:
2460 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2461 (data1 >> 8) & 0xff,
2462 ((data0 & 0xff) == 0)? "Failed" : "Active");
2465 case MPI_EVENT_LOOP_STATE_CHANGE:
2466 switch ((data0 >> 16) & 0xff) {
2469 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2470 "(Loop Initialization)\n",
2471 (data1 >> 8) & 0xff,
2472 (data0 >> 8) & 0xff,
2474 switch ((data0 >> 8) & 0xff) {
2476 if ((data0 & 0xff) == 0xF7) {
2477 mpt_prt(mpt, "Device needs AL_PA\n");
2479 mpt_prt(mpt, "Device %02x doesn't like "
2485 if ((data0 & 0xff) == 0xF7) {
2486 mpt_prt(mpt, "Device had loop failure "
2487 "at its receiver prior to acquiring"
2490 mpt_prt(mpt, "Device %02x detected loop"
2491 " failure at its receiver\n",
2496 mpt_prt(mpt, "Device %02x requests that device "
2497 "%02x reset itself\n",
2499 (data0 >> 8) & 0xFF);
2504 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2505 "LPE(%02x,%02x) (Loop Port Enable)\n",
2506 (data1 >> 8) & 0xff, /* Port */
2507 (data0 >> 8) & 0xff, /* Character 3 */
2508 (data0 ) & 0xff /* Character 4 */);
2511 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2512 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2513 (data1 >> 8) & 0xff, /* Port */
2514 (data0 >> 8) & 0xff, /* Character 3 */
2515 (data0 ) & 0xff /* Character 4 */);
2518 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2519 "FC event (%02x %02x %02x)\n",
2520 (data1 >> 8) & 0xff, /* Port */
2521 (data0 >> 16) & 0xff, /* Event */
2522 (data0 >> 8) & 0xff, /* Character 3 */
2523 (data0 ) & 0xff /* Character 4 */);
2527 case MPI_EVENT_LOGOUT:
2528 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2529 (data1 >> 8) & 0xff, data0);
2531 case MPI_EVENT_QUEUE_FULL:
2533 struct cam_sim *sim;
2534 struct cam_path *tmppath;
2535 struct ccb_relsim crs;
2536 PTR_EVENT_DATA_QUEUE_FULL pqf;
2539 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2540 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2541 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2542 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2543 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2544 pqf->TargetID) != 0) {
2545 sim = mpt->phydisk_sim;
2549 MPTLOCK_2_CAMLOCK(mpt);
2550 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2551 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2552 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2553 mpt_prt(mpt, "unable to create a path to send "
2555 CAMLOCK_2_MPTLOCK(mpt);
2558 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2559 crs.ccb_h.func_code = XPT_REL_SIMQ;
2560 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2561 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2562 crs.openings = pqf->CurrentDepth - 1;
2563 xpt_action((union ccb *)&crs);
2564 if (crs.ccb_h.status != CAM_REQ_CMP) {
2565 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2567 xpt_free_path(tmppath);
2569 CAMLOCK_2_MPTLOCK(mpt);
2572 case MPI_EVENT_IR_RESYNC_UPDATE:
2573 mpt_prt(mpt, "IR resync update %d completed\n",
2574 (data0 >> 16) & 0xff);
2576 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2579 struct cam_sim *sim;
2580 struct cam_path *tmppath;
2581 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2583 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2584 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2585 psdsc->TargetID) != 0)
2586 sim = mpt->phydisk_sim;
2589 switch(psdsc->ReasonCode) {
2590 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2591 MPTLOCK_2_CAMLOCK(mpt);
2592 ccb = kmalloc(sizeof(union ccb), M_TEMP,
2594 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2595 cam_sim_path(sim), psdsc->TargetID,
2596 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2597 CAMLOCK_2_MPTLOCK(mpt);
2599 "unable to create path for rescan\n");
2603 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
2604 5/*priority (low)*/);
2605 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2606 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2607 ccb->crcn.flags = CAM_FLAG_NONE;
2609 CAMLOCK_2_MPTLOCK(mpt);
2611 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2612 MPTLOCK_2_CAMLOCK(mpt);
2613 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2614 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2617 "unable to create path for async event");
2618 CAMLOCK_2_MPTLOCK(mpt);
2621 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2622 xpt_free_path(tmppath);
2623 CAMLOCK_2_MPTLOCK(mpt);
2625 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2626 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2627 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2630 mpt_lprt(mpt, MPT_PRT_WARN,
2631 "SAS device status change: Bus: 0x%02x TargetID: "
2632 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2633 psdsc->TargetID, psdsc->ReasonCode);
2638 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2640 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2642 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2643 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2644 mpt_lprt(mpt, MPT_PRT_WARN,
2645 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2646 pde->Port, pde->DiscoveryStatus);
2649 case MPI_EVENT_EVENT_CHANGE:
2650 case MPI_EVENT_INTEGRATED_RAID:
2652 case MPI_EVENT_LOG_ENTRY_ADDED:
2653 case MPI_EVENT_SAS_DISCOVERY:
2654 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2655 case MPI_EVENT_SAS_SES:
2658 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2666 * Reply path for all SCSI I/O requests, called from our
2667 * interrupt handler by extracting our handler index from
2668 * the MsgContext field of the reply from the IOC.
2670 * This routine is optimized for the common case of a
2671 * completion without error. All exception handling is
2672 * offloaded to non-inlined helper routines to minimize
2676 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2677 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2679 MSG_SCSI_IO_REQUEST *scsi_req;
2682 if (req->state == REQ_STATE_FREE) {
2683 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2687 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2690 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2695 mpt_req_untimeout(req, mpt_timeout, ccb);
2696 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2698 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2699 bus_dmasync_op_t op;
2701 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2702 op = BUS_DMASYNC_POSTREAD;
2704 op = BUS_DMASYNC_POSTWRITE;
2705 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2706 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2709 if (reply_frame == NULL) {
2711 * Context only reply, completion without error status.
2713 ccb->csio.resid = 0;
2714 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2715 ccb->csio.scsi_status = SCSI_STATUS_OK;
2717 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2720 if (mpt->outofbeer) {
2721 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2723 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2725 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2726 struct scsi_inquiry_data *iq =
2727 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2728 if (scsi_req->Function ==
2729 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2731 * Fake out the device type so that only the
2732 * pass-thru device will attach.
2734 iq->device &= ~0x1F;
2735 iq->device |= T_NODEVICE;
2738 if (mpt->verbose == MPT_PRT_DEBUG) {
2739 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2742 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2743 MPTLOCK_2_CAMLOCK(mpt);
2745 CAMLOCK_2_MPTLOCK(mpt);
2746 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2747 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2749 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2751 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2753 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2754 ("CCB req needed wakeup"));
2756 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2758 mpt_free_request(mpt, req);
2763 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2764 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2766 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2768 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2770 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2772 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2773 /* Record IOC Status and Response Code of TMF for any waiters. */
2774 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2775 req->ResponseCode = tmf_reply->ResponseCode;
2777 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2778 req, req->serno, le16toh(tmf_reply->IOCStatus));
2779 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2780 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2781 req->state |= REQ_STATE_DONE;
2784 mpt->tmf_req->state = REQ_STATE_FREE;
2790 * XXX: Move to definitions file
2808 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2809 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2812 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2813 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2816 * We are going to reuse the ELS request to send this response back.
2819 memset(rsp, 0, sizeof(*rsp));
2821 #ifdef USE_IMMEDIATE_LINK_DATA
2823 * Apparently the IMMEDIATE stuff doesn't seem to work.
2825 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2827 rsp->RspLength = length;
2828 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2829 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2832 * Copy over information from the original reply frame to
2833 * it's correct place in the response.
2835 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2838 * And now copy back the temporary area to the original frame.
2840 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2841 rsp = req->req_vbuf;
2843 #ifdef USE_IMMEDIATE_LINK_DATA
2844 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2847 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2848 bus_addr_t paddr = req->req_pbuf;
2849 paddr += MPT_RQSL(mpt);
2852 MPI_SGE_FLAGS_HOST_TO_IOC |
2853 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2854 MPI_SGE_FLAGS_LAST_ELEMENT |
2855 MPI_SGE_FLAGS_END_OF_LIST |
2856 MPI_SGE_FLAGS_END_OF_BUFFER;
2857 fl <<= MPI_SGE_FLAGS_SHIFT;
2859 se->FlagsLength = htole32(fl);
2860 se->Address = htole32((uint32_t) paddr);
2867 mpt_send_cmd(mpt, req);
2871 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2872 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2874 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2875 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2879 U16 status = le16toh(reply_frame->IOCStatus);
2882 int do_refresh = TRUE;
2885 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2886 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2887 req, req->serno, rp->Function));
2888 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2889 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2891 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2894 mpt_lprt(mpt, MPT_PRT_DEBUG,
2895 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2896 req, req->serno, reply_frame, reply_frame->Function);
2898 if (status != MPI_IOCSTATUS_SUCCESS) {
2899 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2900 status, reply_frame->Function);
2901 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2903 * XXX: to get around shutdown issue
2912 * If the function of a link service response, we recycle the
2913 * response to be a refresh for a new link service request.
2915 * The request pointer is bogus in this case and we have to fetch
2916 * it based upon the TransactionContext.
2918 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2919 /* Freddie Uncle Charlie Katie */
2920 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2921 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2922 if (mpt->els_cmd_ptrs[ioindex] == req) {
2926 KASSERT(ioindex < mpt->els_cmds_allocated,
2927 ("can't find my mommie!"));
2929 /* remove from active list as we're going to re-post it */
2930 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2931 req->state &= ~REQ_STATE_QUEUED;
2932 req->state |= REQ_STATE_DONE;
2933 mpt_fc_post_els(mpt, req, ioindex);
2937 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2938 /* remove from active list as we're done */
2939 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2940 req->state &= ~REQ_STATE_QUEUED;
2941 req->state |= REQ_STATE_DONE;
2942 if (req->state & REQ_STATE_TIMEDOUT) {
2943 mpt_lprt(mpt, MPT_PRT_DEBUG,
2944 "Sync Primitive Send Completed After Timeout\n");
2945 mpt_free_request(mpt, req);
2946 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2947 mpt_lprt(mpt, MPT_PRT_DEBUG,
2948 "Async Primitive Send Complete\n");
2949 mpt_free_request(mpt, req);
2951 mpt_lprt(mpt, MPT_PRT_DEBUG,
2952 "Sync Primitive Send Complete- Waking Waiter\n");
2958 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2959 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2960 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2961 rp->MsgLength, rp->MsgFlags);
2965 if (rp->MsgLength <= 5) {
2967 * This is just a ack of an original ELS buffer post
2969 mpt_lprt(mpt, MPT_PRT_DEBUG,
2970 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2975 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2976 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2978 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2979 cmd = be32toh(elsbuf[0]) >> 24;
2981 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2982 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2986 ioindex = le32toh(rp->TransactionContext);
2987 req = mpt->els_cmd_ptrs[ioindex];
2989 if (rctl == ELS && type == 1) {
2993 * Send back a PRLI ACC
2995 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2996 le32toh(rp->Wwn.PortNameHigh),
2997 le32toh(rp->Wwn.PortNameLow));
2998 elsbuf[0] = htobe32(0x02100014);
2999 elsbuf[1] |= htobe32(0x00000100);
3000 elsbuf[4] = htobe32(0x00000002);
3001 if (mpt->role & MPT_ROLE_TARGET)
3002 elsbuf[4] |= htobe32(0x00000010);
3003 if (mpt->role & MPT_ROLE_INITIATOR)
3004 elsbuf[4] |= htobe32(0x00000020);
3005 /* remove from active list as we're done */
3006 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3007 req->state &= ~REQ_STATE_QUEUED;
3008 req->state |= REQ_STATE_DONE;
3009 mpt_fc_els_send_response(mpt, req, rp, 20);
3013 memset(elsbuf, 0, 5 * (sizeof (U32)));
3014 elsbuf[0] = htobe32(0x02100014);
3015 elsbuf[1] = htobe32(0x08000100);
3016 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
3017 le32toh(rp->Wwn.PortNameHigh),
3018 le32toh(rp->Wwn.PortNameLow));
3019 /* remove from active list as we're done */
3020 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3021 req->state &= ~REQ_STATE_QUEUED;
3022 req->state |= REQ_STATE_DONE;
3023 mpt_fc_els_send_response(mpt, req, rp, 20);
3027 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
3030 } else if (rctl == ABTS && type == 0) {
3031 uint16_t rx_id = le16toh(rp->Rxid);
3032 uint16_t ox_id = le16toh(rp->Oxid);
3033 request_t *tgt_req = NULL;
3036 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
3037 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
3038 le32toh(rp->Wwn.PortNameLow));
3039 if (rx_id >= mpt->mpt_max_tgtcmds) {
3040 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
3041 } else if (mpt->tgt_cmd_ptrs == NULL) {
3042 mpt_prt(mpt, "No TGT CMD PTRS\n");
3044 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
3047 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
3052 * Check to make sure we have the correct command
3053 * The reply descriptor in the target state should
3054 * should contain an IoIndex that should match the
3057 * It'd be nice to have OX_ID to crosscheck with
3060 ct_id = GET_IO_INDEX(tgt->reply_desc);
3062 if (ct_id != rx_id) {
3063 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
3064 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3072 "CCB (%p): lun %u flags %x status %x\n",
3073 ccb, ccb->ccb_h.target_lun,
3074 ccb->ccb_h.flags, ccb->ccb_h.status);
3076 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3077 "%x nxfers %x\n", tgt->state,
3078 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3081 if (mpt_abort_target_cmd(mpt, tgt_req)) {
3082 mpt_prt(mpt, "unable to start TargetAbort\n");
3085 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3087 memset(elsbuf, 0, 5 * (sizeof (U32)));
3088 elsbuf[0] = htobe32(0);
3089 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3090 elsbuf[2] = htobe32(0x000ffff);
3092 * Dork with the reply frame so that the response to it
3095 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3096 /* remove from active list as we're done */
3097 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3098 req->state &= ~REQ_STATE_QUEUED;
3099 req->state |= REQ_STATE_DONE;
3100 mpt_fc_els_send_response(mpt, req, rp, 12);
3103 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3105 if (do_refresh == TRUE) {
3106 /* remove from active list as we're done */
3107 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3108 req->state &= ~REQ_STATE_QUEUED;
3109 req->state |= REQ_STATE_DONE;
3110 mpt_fc_post_els(mpt, req, ioindex);
3116 * Clean up all SCSI Initiator personality state in response
3117 * to a controller reset.
3120 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3124 * The pending list is already run down by
3125 * the generic handler. Perform the same
3126 * operation on the timed out request list.
3128 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3129 MPI_IOCSTATUS_INVALID_STATE);
3132 * XXX: We need to repost ELS and Target Command Buffers?
3136 * Inform the XPT that a bus reset has occurred.
3138 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3142 * Parse additional completion information in the reply
3143 * frame for SCSI I/O requests.
3146 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3147 MSG_DEFAULT_REPLY *reply_frame)
3150 MSG_SCSI_IO_REPLY *scsi_io_reply;
3154 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3155 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3156 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3157 ("MPT SCSI I/O Handler called with incorrect reply type"));
3158 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3159 ("MPT SCSI I/O Handler called with continuation reply"));
3161 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3162 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3163 ioc_status &= MPI_IOCSTATUS_MASK;
3164 sstate = scsi_io_reply->SCSIState;
3168 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3170 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3171 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3172 uint32_t sense_returned;
3174 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3176 sense_returned = le32toh(scsi_io_reply->SenseCount);
3177 if (sense_returned < ccb->csio.sense_len)
3178 ccb->csio.sense_resid = ccb->csio.sense_len -
3181 ccb->csio.sense_resid = 0;
3183 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3184 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3185 min(ccb->csio.sense_len, sense_returned));
3188 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3190 * Tag messages rejected, but non-tagged retry
3193 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3197 switch(ioc_status) {
3198 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3201 * Linux driver indicates that a zero
3202 * transfer length with this error code
3203 * indicates a CRC error.
3205 * No need to swap the bytes for checking
3208 if (scsi_io_reply->TransferCount == 0) {
3209 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3213 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3214 case MPI_IOCSTATUS_SUCCESS:
3215 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3216 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3218 * Status was never returned for this transaction.
3220 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3221 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3222 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3223 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3224 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3225 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3226 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3228 /* XXX Handle SPI-Packet and FCP-2 response info. */
3229 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3231 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3233 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3234 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3236 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3237 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3239 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3241 * Since selection timeouts and "device really not
3242 * there" are grouped into this error code, report
3243 * selection timeout. Selection timeouts are
3244 * typically retried before giving up on the device
3245 * whereas "device not there" errors are considered
3248 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3250 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3251 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3253 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3254 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3256 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3257 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3259 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3260 ccb->ccb_h.status = CAM_UA_TERMIO;
3262 case MPI_IOCSTATUS_INVALID_STATE:
3264 * The IOC has been reset. Emulate a bus reset.
3267 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3268 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3270 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3271 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3273 * Don't clobber any timeout status that has
3274 * already been set for this transaction. We
3275 * want the SCSI layer to be able to differentiate
3276 * between the command we aborted due to timeout
3277 * and any innocent bystanders.
3279 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3281 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3284 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3285 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3287 case MPI_IOCSTATUS_BUSY:
3288 mpt_set_ccb_status(ccb, CAM_BUSY);
3290 case MPI_IOCSTATUS_INVALID_FUNCTION:
3291 case MPI_IOCSTATUS_INVALID_SGL:
3292 case MPI_IOCSTATUS_INTERNAL_ERROR:
3293 case MPI_IOCSTATUS_INVALID_FIELD:
3296 * Some of the above may need to kick
3297 * of a recovery action!!!!
3299 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3303 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3304 mpt_freeze_ccb(ccb);
3311 mpt_action(struct cam_sim *sim, union ccb *ccb)
3313 struct mpt_softc *mpt;
3314 struct ccb_trans_settings *cts;
3319 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3321 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3322 raid_passthru = (sim == mpt->phydisk_sim);
3323 MPT_LOCK_ASSERT(mpt);
3325 tgt = ccb->ccb_h.target_id;
3326 lun = ccb->ccb_h.target_lun;
3327 if (raid_passthru &&
3328 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3329 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3330 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3331 CAMLOCK_2_MPTLOCK(mpt);
3332 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3333 MPTLOCK_2_CAMLOCK(mpt);
3334 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3335 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3339 MPTLOCK_2_CAMLOCK(mpt);
3341 ccb->ccb_h.ccb_mpt_ptr = mpt;
3343 switch (ccb->ccb_h.func_code) {
3344 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3346 * Do a couple of preliminary checks...
3348 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3349 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3350 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3351 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3355 /* Max supported CDB length is 16 bytes */
3356 /* XXX Unless we implement the new 32byte message type */
3357 if (ccb->csio.cdb_len >
3358 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3359 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3360 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3363 #ifdef MPT_TEST_MULTIPATH
3364 if (mpt->failure_id == ccb->ccb_h.target_id) {
3365 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3366 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3370 ccb->csio.scsi_status = SCSI_STATUS_OK;
3371 mpt_start(sim, ccb);
3375 if (raid_passthru) {
3376 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3377 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3381 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3383 xpt_print(ccb->ccb_h.path, "reset bus\n");
3386 xpt_print(ccb->ccb_h.path, "reset device\n");
3388 CAMLOCK_2_MPTLOCK(mpt);
3389 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3390 MPTLOCK_2_CAMLOCK(mpt);
3393 * mpt_bus_reset is always successful in that it
3394 * will fall back to a hard reset should a bus
3395 * reset attempt fail.
3397 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3398 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3403 union ccb *accb = ccb->cab.abort_ccb;
3404 CAMLOCK_2_MPTLOCK(mpt);
3405 switch (accb->ccb_h.func_code) {
3406 case XPT_ACCEPT_TARGET_IO:
3407 case XPT_IMMED_NOTIFY:
3408 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3410 case XPT_CONT_TARGET_IO:
3411 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3412 ccb->ccb_h.status = CAM_UA_ABORT;
3415 ccb->ccb_h.status = CAM_UA_ABORT;
3418 ccb->ccb_h.status = CAM_REQ_INVALID;
3421 MPTLOCK_2_CAMLOCK(mpt);
3425 #ifdef CAM_NEW_TRAN_CODE
3426 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3428 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3430 #define DP_DISC_ENABLE 0x1
3431 #define DP_DISC_DISABL 0x2
3432 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3434 #define DP_TQING_ENABLE 0x4
3435 #define DP_TQING_DISABL 0x8
3436 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3438 #define DP_WIDE 0x10
3439 #define DP_NARROW 0x20
3440 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3442 #define DP_SYNC 0x40
3444 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3446 #ifdef CAM_NEW_TRAN_CODE
3447 struct ccb_trans_settings_scsi *scsi;
3448 struct ccb_trans_settings_spi *spi;
3457 if (mpt->is_fc || mpt->is_sas) {
3458 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3462 #ifdef CAM_NEW_TRAN_CODE
3463 scsi = &cts->proto_specific.scsi;
3464 spi = &cts->xport_specific.spi;
3467 * We can be called just to valid transport and proto versions
3469 if (scsi->valid == 0 && spi->valid == 0) {
3470 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3476 * Skip attempting settings on RAID volume disks.
3477 * Other devices on the bus get the normal treatment.
3479 if (mpt->phydisk_sim && raid_passthru == 0 &&
3480 mpt_is_raid_volume(mpt, tgt) != 0) {
3481 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3482 "no transfer settings for RAID vols\n");
3483 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3487 i = mpt->mpt_port_page2.PortSettings &
3488 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3489 j = mpt->mpt_port_page2.PortFlags &
3490 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3491 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3492 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3493 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3494 "honoring BIOS transfer negotiations\n");
3495 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3503 #ifndef CAM_NEW_TRAN_CODE
3504 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3505 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3506 DP_DISC_ENABLE : DP_DISC_DISABL;
3509 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3510 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3511 DP_TQING_ENABLE : DP_TQING_DISABL;
3514 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3515 dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3518 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3519 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3521 period = cts->sync_period;
3522 offset = cts->sync_offset;
3525 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3526 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3527 DP_DISC_ENABLE : DP_DISC_DISABL;
3530 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3531 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3532 DP_TQING_ENABLE : DP_TQING_DISABL;
3535 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3536 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3537 DP_WIDE : DP_NARROW;
3540 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3542 offset = spi->sync_offset;
3544 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3545 &mpt->mpt_dev_page1[tgt];
3546 offset = ptr->RequestedParameters;
3547 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3548 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3550 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3552 period = spi->sync_period;
3554 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3555 &mpt->mpt_dev_page1[tgt];
3556 period = ptr->RequestedParameters;
3557 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3558 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3561 CAMLOCK_2_MPTLOCK(mpt);
3562 if (dval & DP_DISC_ENABLE) {
3563 mpt->mpt_disc_enable |= (1 << tgt);
3564 } else if (dval & DP_DISC_DISABL) {
3565 mpt->mpt_disc_enable &= ~(1 << tgt);
3567 if (dval & DP_TQING_ENABLE) {
3568 mpt->mpt_tag_enable |= (1 << tgt);
3569 } else if (dval & DP_TQING_DISABL) {
3570 mpt->mpt_tag_enable &= ~(1 << tgt);
3572 if (dval & DP_WIDTH) {
3573 mpt_setwidth(mpt, tgt, 1);
3575 if (dval & DP_SYNC) {
3576 mpt_setsync(mpt, tgt, period, offset);
3579 MPTLOCK_2_CAMLOCK(mpt);
3580 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3583 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3584 "set [%d]: 0x%x period 0x%x offset %d\n",
3585 tgt, dval, period, offset);
3586 if (mpt_update_spi_config(mpt, tgt)) {
3587 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3589 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3591 MPTLOCK_2_CAMLOCK(mpt);
3594 case XPT_GET_TRAN_SETTINGS:
3596 #ifdef CAM_NEW_TRAN_CODE
3597 struct ccb_trans_settings_scsi *scsi;
3599 cts->protocol = PROTO_SCSI;
3601 struct ccb_trans_settings_fc *fc =
3602 &cts->xport_specific.fc;
3603 cts->protocol_version = SCSI_REV_SPC;
3604 cts->transport = XPORT_FC;
3605 cts->transport_version = 0;
3606 fc->valid = CTS_FC_VALID_SPEED;
3607 fc->bitrate = 100000;
3608 } else if (mpt->is_sas) {
3609 struct ccb_trans_settings_sas *sas =
3610 &cts->xport_specific.sas;
3611 cts->protocol_version = SCSI_REV_SPC2;
3612 cts->transport = XPORT_SAS;
3613 cts->transport_version = 0;
3614 sas->valid = CTS_SAS_VALID_SPEED;
3615 sas->bitrate = 300000;
3617 cts->protocol_version = SCSI_REV_2;
3618 cts->transport = XPORT_SPI;
3619 cts->transport_version = 2;
3620 if (mpt_get_spi_settings(mpt, cts) != 0) {
3621 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3625 scsi = &cts->proto_specific.scsi;
3626 scsi->valid = CTS_SCSI_VALID_TQ;
3627 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3631 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3632 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3633 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3634 } else if (mpt->is_sas) {
3635 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3636 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3637 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3638 } else if (mpt_get_spi_settings(mpt, cts) != 0) {
3639 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3643 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3646 case XPT_CALC_GEOMETRY:
3648 struct ccb_calc_geometry *ccg;
3651 if (ccg->block_size == 0) {
3652 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3653 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3656 mpt_calc_geometry(ccg, /*extended*/1);
3657 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3660 case XPT_PATH_INQ: /* Path routing inquiry */
3662 struct ccb_pathinq *cpi = &ccb->cpi;
3664 cpi->version_num = 1;
3665 cpi->target_sprt = 0;
3666 cpi->hba_eng_cnt = 0;
3667 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3668 #if 0 /* XXX swildner */
3669 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3672 * FC cards report MAX_DEVICES of 512, but
3673 * the MSG_SCSI_IO_REQUEST target id field
3674 * is only 8 bits. Until we fix the driver
3675 * to support 'channels' for bus overflow,
3678 if (cpi->max_target > 255) {
3679 cpi->max_target = 255;
3683 * VMware ESX reports > 16 devices and then dies when we probe.
3685 if (mpt->is_spi && cpi->max_target > 15) {
3686 cpi->max_target = 15;
3691 cpi->max_lun = MPT_MAX_LUNS;
3692 cpi->initiator_id = mpt->mpt_ini_id;
3693 cpi->bus_id = cam_sim_bus(sim);
3696 * The base speed is the speed of the underlying connection.
3698 #ifdef CAM_NEW_TRAN_CODE
3699 cpi->protocol = PROTO_SCSI;
3701 cpi->hba_misc = PIM_NOBUSRESET;
3702 cpi->base_transfer_speed = 100000;
3703 cpi->hba_inquiry = PI_TAG_ABLE;
3704 cpi->transport = XPORT_FC;
3705 cpi->transport_version = 0;
3706 cpi->protocol_version = SCSI_REV_SPC;
3707 } else if (mpt->is_sas) {
3708 cpi->hba_misc = PIM_NOBUSRESET;
3709 cpi->base_transfer_speed = 300000;
3710 cpi->hba_inquiry = PI_TAG_ABLE;
3711 cpi->transport = XPORT_SAS;
3712 cpi->transport_version = 0;
3713 cpi->protocol_version = SCSI_REV_SPC2;
3715 cpi->hba_misc = PIM_SEQSCAN;
3716 cpi->base_transfer_speed = 3300;
3717 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3718 cpi->transport = XPORT_SPI;
3719 cpi->transport_version = 2;
3720 cpi->protocol_version = SCSI_REV_2;
3724 cpi->hba_misc = PIM_NOBUSRESET;
3725 cpi->base_transfer_speed = 100000;
3726 cpi->hba_inquiry = PI_TAG_ABLE;
3727 } else if (mpt->is_sas) {
3728 cpi->hba_misc = PIM_NOBUSRESET;
3729 cpi->base_transfer_speed = 300000;
3730 cpi->hba_inquiry = PI_TAG_ABLE;
3732 cpi->hba_misc = PIM_SEQSCAN;
3733 cpi->base_transfer_speed = 3300;
3734 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3739 * We give our fake RAID passhtru bus a width that is MaxVolumes
3740 * wide and restrict it to one lun.
3742 if (raid_passthru) {
3743 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3744 cpi->initiator_id = cpi->max_target + 1;
3748 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3749 cpi->hba_misc |= PIM_NOINITIATOR;
3751 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3753 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3755 cpi->target_sprt = 0;
3757 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3758 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3759 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3760 cpi->unit_number = cam_sim_unit(sim);
3761 cpi->ccb_h.status = CAM_REQ_CMP;
3764 case XPT_EN_LUN: /* Enable LUN as a target */
3768 CAMLOCK_2_MPTLOCK(mpt);
3769 if (ccb->cel.enable)
3770 result = mpt_enable_lun(mpt,
3771 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3773 result = mpt_disable_lun(mpt,
3774 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3775 MPTLOCK_2_CAMLOCK(mpt);
3777 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3779 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3783 case XPT_NOTIFY_ACK: /* recycle notify ack */
3784 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3785 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3787 tgt_resource_t *trtp;
3788 lun_id_t lun = ccb->ccb_h.target_lun;
3789 ccb->ccb_h.sim_priv.entries[0].field = 0;
3790 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3791 ccb->ccb_h.flags = 0;
3793 if (lun == CAM_LUN_WILDCARD) {
3794 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3795 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3798 trtp = &mpt->trt_wildcard;
3799 } else if (lun >= MPT_MAX_LUNS) {
3800 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3803 trtp = &mpt->trt[lun];
3805 CAMLOCK_2_MPTLOCK(mpt);
3806 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3807 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3808 "Put FREE ATIO %p lun %d\n", ccb, lun);
3809 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3811 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3812 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3813 "Put FREE INOT lun %d\n", lun);
3814 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3817 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3819 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3820 MPTLOCK_2_CAMLOCK(mpt);
3823 case XPT_CONT_TARGET_IO:
3824 CAMLOCK_2_MPTLOCK(mpt);
3825 mpt_target_start_io(mpt, ccb);
3826 MPTLOCK_2_CAMLOCK(mpt);
3830 ccb->ccb_h.status = CAM_REQ_INVALID;
3837 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3839 #ifdef CAM_NEW_TRAN_CODE
3840 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3841 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3844 uint32_t dval, pval, oval;
3847 if (IS_CURRENT_SETTINGS(cts) == 0) {
3848 tgt = cts->ccb_h.target_id;
3849 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3850 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3854 tgt = cts->ccb_h.target_id;
3858 * We aren't looking at Port Page 2 BIOS settings here-
3859 * sometimes these have been known to be bogus XXX.
3861 * For user settings, we pick the max from port page 0
3863 * For current settings we read the current settings out from
3864 * device page 0 for that target.
3866 if (IS_CURRENT_SETTINGS(cts)) {
3867 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3870 CAMLOCK_2_MPTLOCK(mpt);
3871 tmp = mpt->mpt_dev_page0[tgt];
3872 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3873 sizeof(tmp), FALSE, 5000);
3875 MPTLOCK_2_CAMLOCK(mpt);
3876 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3879 mpt2host_config_page_scsi_device_0(&tmp);
3881 MPTLOCK_2_CAMLOCK(mpt);
3882 mpt_lprt(mpt, MPT_PRT_DEBUG,
3883 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3884 tmp.NegotiatedParameters, tmp.Information);
3885 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3886 DP_WIDE : DP_NARROW;
3887 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3888 DP_DISC_ENABLE : DP_DISC_DISABL;
3889 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3890 DP_TQING_ENABLE : DP_TQING_DISABL;
3891 oval = tmp.NegotiatedParameters;
3892 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3893 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3894 pval = tmp.NegotiatedParameters;
3895 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3896 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3897 mpt->mpt_dev_page0[tgt] = tmp;
3899 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3900 oval = mpt->mpt_port_page0.Capabilities;
3901 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3902 pval = mpt->mpt_port_page0.Capabilities;
3903 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3906 #ifndef CAM_NEW_TRAN_CODE
3907 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3909 cts->sync_period = pval;
3910 cts->sync_offset = oval;
3911 cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3912 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3913 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3914 if (dval & DP_WIDE) {
3915 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3917 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3919 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3920 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3921 if (dval & DP_DISC_ENABLE) {
3922 cts->flags |= CCB_TRANS_DISC_ENB;
3924 if (dval & DP_TQING_ENABLE) {
3925 cts->flags |= CCB_TRANS_TAG_ENB;
3933 spi->sync_offset = oval;
3934 spi->sync_period = pval;
3935 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3936 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3937 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3938 if (dval & DP_WIDE) {
3939 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3941 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3943 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3944 scsi->valid = CTS_SCSI_VALID_TQ;
3945 if (dval & DP_TQING_ENABLE) {
3946 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3948 spi->valid |= CTS_SPI_VALID_DISC;
3949 if (dval & DP_DISC_ENABLE) {
3950 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3954 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3955 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3956 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3961 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3963 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3965 ptr = &mpt->mpt_dev_page1[tgt];
3967 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3969 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3974 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3976 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3978 ptr = &mpt->mpt_dev_page1[tgt];
3979 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3980 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3981 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3982 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3983 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3987 ptr->RequestedParameters |=
3988 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3989 ptr->RequestedParameters |=
3990 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3992 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3995 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3996 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
4001 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
4003 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
4006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
4007 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
4008 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
4009 tmp = mpt->mpt_dev_page1[tgt];
4010 host2mpt_config_page_scsi_device_1(&tmp);
4011 rv = mpt_write_cur_cfg_page(mpt, tgt,
4012 &tmp.Header, sizeof(tmp), FALSE, 5000);
4014 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
4021 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
4023 cam_calc_geometry(ccg, extended);
4025 uint32_t secs_per_cylinder;
4027 if (ccg->block_size == 0) {
4028 ccg->ccb_h.status = CAM_REQ_INVALID;
4031 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
4032 if (size_mb > 1024 && extended) {
4034 ccg->secs_per_track = 63;
4037 ccg->secs_per_track = 32;
4039 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
4040 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
4041 ccg->ccb_h.status = CAM_REQ_CMP;
4044 /****************************** Timeout Recovery ******************************/
4046 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
4050 error = mpt_kthread_create(mpt_recovery_thread, mpt,
4051 &mpt->recovery_thread, /*flags*/0,
4052 /*altstack*/0, "mpt_recovery%d", mpt->unit);
4057 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
4060 if (mpt->recovery_thread == NULL) {
4063 mpt->shutdwn_recovery = 1;
4066 * Sleep on a slightly different location
4067 * for this interlock just for added safety.
4069 mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
4073 mpt_recovery_thread(void *arg)
4075 struct mpt_softc *mpt;
4077 mpt = (struct mpt_softc *)arg;
4080 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4081 if (mpt->shutdwn_recovery == 0) {
4082 mpt_sleep(mpt, mpt, 0, "idle", 0);
4085 if (mpt->shutdwn_recovery != 0) {
4088 mpt_recover_commands(mpt);
4090 mpt->recovery_thread = NULL;
4091 wakeup(&mpt->recovery_thread);
4093 mpt_kthread_exit(0);
4097 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4098 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4100 MSG_SCSI_TASK_MGMT *tmf_req;
4104 * Wait for any current TMF request to complete.
4105 * We're only allowed to issue one TMF at a time.
4107 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4108 sleep_ok, MPT_TMF_MAX_TIMEOUT);
4110 mpt_reset(mpt, TRUE);
4114 mpt_assign_serno(mpt, mpt->tmf_req);
4115 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4117 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4118 memset(tmf_req, 0, sizeof(*tmf_req));
4119 tmf_req->TargetID = target;
4120 tmf_req->Bus = channel;
4121 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4122 tmf_req->TaskType = type;
4123 tmf_req->MsgFlags = flags;
4124 tmf_req->MsgContext =
4125 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4126 if (lun > MPT_MAX_LUNS) {
4127 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4128 tmf_req->LUN[1] = lun & 0xff;
4130 tmf_req->LUN[1] = lun;
4132 tmf_req->TaskMsgContext = abort_ctx;
4134 mpt_lprt(mpt, MPT_PRT_DEBUG,
4135 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4136 mpt->tmf_req->serno, tmf_req->MsgContext);
4137 if (mpt->verbose > MPT_PRT_DEBUG) {
4138 mpt_print_request(tmf_req);
4141 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4142 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4143 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4144 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4145 if (error != MPT_OK) {
4146 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4147 mpt->tmf_req->state = REQ_STATE_FREE;
4148 mpt_reset(mpt, TRUE);
4154 * When a command times out, it is placed on the requeust_timeout_list
4155 * and we wake our recovery thread. The MPT-Fusion architecture supports
4156 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4157 * the timedout transactions. The next TMF is issued either by the
4158 * completion handler of the current TMF waking our recovery thread,
4159 * or the TMF timeout handler causing a hard reset sequence.
4162 mpt_recover_commands(struct mpt_softc *mpt)
4168 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4170 * No work to do- leave.
4172 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4177 * Flush any commands whose completion coincides with their timeout.
4181 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4183 * The timedout commands have already
4184 * completed. This typically means
4185 * that either the timeout value was on
4186 * the hairy edge of what the device
4187 * requires or - more likely - interrupts
4188 * are not happening.
4190 mpt_prt(mpt, "Timedout requests already complete. "
4191 "Interrupts may not be functioning.\n");
4192 mpt_enable_ints(mpt);
4197 * We have no visibility into the current state of the
4198 * controller, so attempt to abort the commands in the
4199 * order they timed-out. For initiator commands, we
4200 * depend on the reply handler pulling requests off
4203 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4206 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4208 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4209 req, req->serno, hdrp->Function);
4212 mpt_prt(mpt, "null ccb in timed out request. "
4213 "Resetting Controller.\n");
4214 mpt_reset(mpt, TRUE);
4217 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4220 * Check to see if this is not an initiator command and
4221 * deal with it differently if it is.
4223 switch (hdrp->Function) {
4224 case MPI_FUNCTION_SCSI_IO_REQUEST:
4225 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4229 * XXX: FIX ME: need to abort target assists...
4231 mpt_prt(mpt, "just putting it back on the pend q\n");
4232 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4233 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4238 error = mpt_scsi_send_tmf(mpt,
4239 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4240 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4241 htole32(req->index | scsi_io_handler_id), TRUE);
4245 * mpt_scsi_send_tmf hard resets on failure, so no
4246 * need to do so here. Our queue should be emptied
4247 * by the hard reset.
4252 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4253 REQ_STATE_DONE, TRUE, 500);
4255 status = le16toh(mpt->tmf_req->IOCStatus);
4256 response = mpt->tmf_req->ResponseCode;
4257 mpt->tmf_req->state = REQ_STATE_FREE;
4261 * If we've errored out,, reset the controller.
4263 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4264 "Resetting controller\n");
4265 mpt_reset(mpt, TRUE);
4269 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4270 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4271 "Resetting controller.\n", status);
4272 mpt_reset(mpt, TRUE);
4276 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4277 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4278 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4279 "Resetting controller.\n", response);
4280 mpt_reset(mpt, TRUE);
4283 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4287 /************************ Target Mode Support ****************************/
4289 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4291 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4292 PTR_SGE_TRANSACTION32 tep;
4293 PTR_SGE_SIMPLE32 se;
4297 paddr = req->req_pbuf;
4298 paddr += MPT_RQSL(mpt);
4301 memset(fc, 0, MPT_REQUEST_AREA);
4302 fc->BufferCount = 1;
4303 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4304 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4307 * Okay, set up ELS buffer pointers. ELS buffer pointers
4308 * consist of a TE SGL element (with details length of zero)
4309 * followed by a SIMPLE SGL element which holds the address
4313 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4315 tep->ContextSize = 4;
4317 tep->TransactionContext[0] = htole32(ioindex);
4319 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4321 MPI_SGE_FLAGS_HOST_TO_IOC |
4322 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4323 MPI_SGE_FLAGS_LAST_ELEMENT |
4324 MPI_SGE_FLAGS_END_OF_LIST |
4325 MPI_SGE_FLAGS_END_OF_BUFFER;
4326 fl <<= MPI_SGE_FLAGS_SHIFT;
4327 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4328 se->FlagsLength = htole32(fl);
4329 se->Address = htole32((uint32_t) paddr);
4330 mpt_lprt(mpt, MPT_PRT_DEBUG,
4331 "add ELS index %d ioindex %d for %p:%u\n",
4332 req->index, ioindex, req, req->serno);
4333 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4334 ("mpt_fc_post_els: request not locked"));
4335 mpt_send_cmd(mpt, req);
4339 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4341 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4342 PTR_CMD_BUFFER_DESCRIPTOR cb;
4345 paddr = req->req_pbuf;
4346 paddr += MPT_RQSL(mpt);
4347 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4348 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4351 fc->BufferCount = 1;
4352 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4353 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4355 cb = &fc->Buffer[0];
4356 cb->IoIndex = htole16(ioindex);
4357 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4359 mpt_check_doorbell(mpt);
4360 mpt_send_cmd(mpt, req);
4364 mpt_add_els_buffers(struct mpt_softc *mpt)
4368 if (mpt->is_fc == 0) {
4372 if (mpt->els_cmds_allocated) {
4376 mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *),
4377 M_DEVBUF, M_NOWAIT | M_ZERO);
4379 if (mpt->els_cmd_ptrs == NULL) {
4384 * Feed the chip some ELS buffer resources
4386 for (i = 0; i < MPT_MAX_ELS; i++) {
4387 request_t *req = mpt_get_request(mpt, FALSE);
4391 req->state |= REQ_STATE_LOCKED;
4392 mpt->els_cmd_ptrs[i] = req;
4393 mpt_fc_post_els(mpt, req, i);
4397 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4398 kfree(mpt->els_cmd_ptrs, M_DEVBUF);
4399 mpt->els_cmd_ptrs = NULL;
4402 if (i != MPT_MAX_ELS) {
4403 mpt_lprt(mpt, MPT_PRT_INFO,
4404 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4406 mpt->els_cmds_allocated = i;
4411 mpt_add_target_commands(struct mpt_softc *mpt)
4415 if (mpt->tgt_cmd_ptrs) {
4419 max = MPT_MAX_REQUESTS(mpt) >> 1;
4420 if (max > mpt->mpt_max_tgtcmds) {
4421 max = mpt->mpt_max_tgtcmds;
4424 kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4425 if (mpt->tgt_cmd_ptrs == NULL) {
4427 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4431 for (i = 0; i < max; i++) {
4434 req = mpt_get_request(mpt, FALSE);
4438 req->state |= REQ_STATE_LOCKED;
4439 mpt->tgt_cmd_ptrs[i] = req;
4440 mpt_post_target_command(mpt, req, i);
4445 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4446 kfree(mpt->tgt_cmd_ptrs, M_DEVBUF);
4447 mpt->tgt_cmd_ptrs = NULL;
4451 mpt->tgt_cmds_allocated = i;
4454 mpt_lprt(mpt, MPT_PRT_INFO,
4455 "added %d of %d target bufs\n", i, max);
4461 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4464 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4466 } else if (lun >= MPT_MAX_LUNS) {
4468 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4471 if (mpt->tenabled == 0) {
4473 (void) mpt_fc_reset_link(mpt, 0);
4477 if (lun == CAM_LUN_WILDCARD) {
4478 mpt->trt_wildcard.enabled = 1;
4480 mpt->trt[lun].enabled = 1;
4486 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4490 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4492 } else if (lun >= MPT_MAX_LUNS) {
4494 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4497 if (lun == CAM_LUN_WILDCARD) {
4498 mpt->trt_wildcard.enabled = 0;
4500 mpt->trt[lun].enabled = 0;
4502 for (i = 0; i < MPT_MAX_LUNS; i++) {
4503 if (mpt->trt[lun].enabled) {
4507 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4509 (void) mpt_fc_reset_link(mpt, 0);
4517 * Called with MPT lock held
4520 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4522 struct ccb_scsiio *csio = &ccb->csio;
4523 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4524 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4526 switch (tgt->state) {
4527 case TGT_STATE_IN_CAM:
4529 case TGT_STATE_MOVING_DATA:
4530 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4531 xpt_freeze_simq(mpt->sim, 1);
4532 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4533 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4534 MPTLOCK_2_CAMLOCK(mpt);
4536 CAMLOCK_2_MPTLOCK(mpt);
4539 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4540 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4541 mpt_tgt_dump_req_state(mpt, cmd_req);
4542 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4543 MPTLOCK_2_CAMLOCK(mpt);
4545 CAMLOCK_2_MPTLOCK(mpt);
4549 if (csio->dxfer_len) {
4550 bus_dmamap_callback_t *cb;
4551 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4554 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4555 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4557 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4558 if (mpt->outofbeer == 0) {
4560 xpt_freeze_simq(mpt->sim, 1);
4561 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4563 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4564 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4565 MPTLOCK_2_CAMLOCK(mpt);
4567 CAMLOCK_2_MPTLOCK(mpt);
4570 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4571 if (sizeof (bus_addr_t) > 4) {
4572 cb = mpt_execute_req_a64;
4574 cb = mpt_execute_req;
4578 ccb->ccb_h.ccb_req_ptr = req;
4581 * Record the currently active ccb and the
4582 * request for it in our target state area.
4587 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4591 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4593 ta->QueueTag = ssp->InitiatorTag;
4594 } else if (mpt->is_spi) {
4595 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4597 ta->QueueTag = sp->Tag;
4599 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4600 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4601 ta->ReplyWord = htole32(tgt->reply_desc);
4602 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4604 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4605 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4607 ta->LUN[1] = csio->ccb_h.target_lun;
4610 ta->RelativeOffset = tgt->bytes_xfered;
4611 ta->DataLength = ccb->csio.dxfer_len;
4612 if (ta->DataLength > tgt->resid) {
4613 ta->DataLength = tgt->resid;
4617 * XXX Should be done after data transfer completes?
4619 tgt->resid -= csio->dxfer_len;
4620 tgt->bytes_xfered += csio->dxfer_len;
4622 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4623 ta->TargetAssistFlags |=
4624 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4627 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4628 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4629 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4630 ta->TargetAssistFlags |=
4631 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4634 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4636 mpt_lprt(mpt, MPT_PRT_DEBUG,
4637 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4638 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4639 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4641 MPTLOCK_2_CAMLOCK(mpt);
4642 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4643 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4646 error = bus_dmamap_load(mpt->buffer_dmat,
4647 req->dmap, csio->data_ptr, csio->dxfer_len,
4650 if (error == EINPROGRESS) {
4651 xpt_freeze_simq(mpt->sim, 1);
4652 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4656 * We have been given a pointer to single
4659 struct bus_dma_segment seg;
4660 seg.ds_addr = (bus_addr_t)
4661 (vm_offset_t)csio->data_ptr;
4662 seg.ds_len = csio->dxfer_len;
4663 (*cb)(req, &seg, 1, 0);
4667 * We have been given a list of addresses.
4668 * This case could be easily supported but they are not
4669 * currently generated by the CAM subsystem so there
4670 * is no point in wasting the time right now.
4672 struct bus_dma_segment *sgs;
4673 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4674 (*cb)(req, NULL, 0, EFAULT);
4676 /* Just use the segments provided */
4677 sgs = (struct bus_dma_segment *)csio->data_ptr;
4678 (*cb)(req, sgs, csio->sglist_cnt, 0);
4681 CAMLOCK_2_MPTLOCK(mpt);
4683 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4686 * XXX: I don't know why this seems to happen, but
4687 * XXX: completing the CCB seems to make things happy.
4688 * XXX: This seems to happen if the initiator requests
4689 * XXX: enough data that we have to do multiple CTIOs.
4691 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4692 mpt_lprt(mpt, MPT_PRT_DEBUG,
4693 "Meaningless STATUS CCB (%p): flags %x status %x "
4694 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4695 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4696 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4697 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4698 MPTLOCK_2_CAMLOCK(mpt);
4700 CAMLOCK_2_MPTLOCK(mpt);
4703 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4705 memcpy(sp, &csio->sense_data,
4706 min(csio->sense_len, MPT_SENSE_SIZE));
4708 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4713 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4714 uint32_t lun, int send, uint8_t *data, size_t length)
4716 mpt_tgt_state_t *tgt;
4717 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4725 * We enter with resid set to the data load for the command.
4727 tgt = MPT_TGT_STATE(mpt, cmd_req);
4728 if (length == 0 || tgt->resid == 0) {
4730 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4734 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4735 mpt_prt(mpt, "out of resources- dropping local response\n");
4741 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4745 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4746 ta->QueueTag = ssp->InitiatorTag;
4747 } else if (mpt->is_spi) {
4748 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4749 ta->QueueTag = sp->Tag;
4751 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4752 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4753 ta->ReplyWord = htole32(tgt->reply_desc);
4754 if (lun > MPT_MAX_LUNS) {
4755 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4756 ta->LUN[1] = lun & 0xff;
4760 ta->RelativeOffset = 0;
4761 ta->DataLength = length;
4763 dptr = req->req_vbuf;
4764 dptr += MPT_RQSL(mpt);
4765 pptr = req->req_pbuf;
4766 pptr += MPT_RQSL(mpt);
4767 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4769 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4770 memset(se, 0,sizeof (*se));
4772 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4774 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4775 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4778 MPI_pSGE_SET_LENGTH(se, length);
4779 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4780 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4781 MPI_pSGE_SET_FLAGS(se, flags);
4785 tgt->resid -= length;
4786 tgt->bytes_xfered = length;
4787 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4788 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4790 tgt->state = TGT_STATE_MOVING_DATA;
4792 mpt_send_cmd(mpt, req);
4796 * Abort queued up CCBs
4799 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4801 struct mpt_hdr_stailq *lp;
4802 struct ccb_hdr *srch;
4804 union ccb *accb = ccb->cab.abort_ccb;
4805 tgt_resource_t *trtp;
4807 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4809 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4810 trtp = &mpt->trt_wildcard;
4812 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4815 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4817 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4820 return (CAM_REQ_INVALID);
4823 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4824 if (srch == &accb->ccb_h) {
4826 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4831 accb->ccb_h.status = CAM_REQ_ABORTED;
4833 return (CAM_REQ_CMP);
4835 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4836 return (CAM_PATH_INVALID);
4840 * Ask the MPT to abort the current target command
4843 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4847 PTR_MSG_TARGET_MODE_ABORT abtp;
4849 req = mpt_get_request(mpt, FALSE);
4853 abtp = req->req_vbuf;
4854 memset(abtp, 0, sizeof (*abtp));
4856 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4857 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4858 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4859 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4861 if (mpt->is_fc || mpt->is_sas) {
4862 mpt_send_cmd(mpt, req);
4864 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4870 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4871 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4872 * FC929 to set bogus FC_RSP fields (nonzero residuals
4873 * but w/o RESID fields set). This causes QLogic initiators
4874 * to think maybe that a frame was lost.
4876 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4877 * we use allocated requests to do TARGET_ASSIST and we
4878 * need to know when to release them.
4882 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4883 uint8_t status, uint8_t const *sense_data)
4886 mpt_tgt_state_t *tgt;
4887 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4893 cmd_vbuf = cmd_req->req_vbuf;
4894 cmd_vbuf += MPT_RQSL(mpt);
4895 tgt = MPT_TGT_STATE(mpt, cmd_req);
4897 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4898 if (mpt->outofbeer == 0) {
4900 xpt_freeze_simq(mpt->sim, 1);
4901 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4904 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4905 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4906 MPTLOCK_2_CAMLOCK(mpt);
4908 CAMLOCK_2_MPTLOCK(mpt);
4911 "could not allocate status request- dropping\n");
4917 ccb->ccb_h.ccb_mpt_ptr = mpt;
4918 ccb->ccb_h.ccb_req_ptr = req;
4922 * Record the currently active ccb, if any, and the
4923 * request for it in our target state area.
4927 tgt->state = TGT_STATE_SENDING_STATUS;
4930 paddr = req->req_pbuf;
4931 paddr += MPT_RQSL(mpt);
4933 memset(tp, 0, sizeof (*tp));
4934 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4936 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4937 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4941 sts_vbuf = req->req_vbuf;
4942 sts_vbuf += MPT_RQSL(mpt);
4943 rsp = (uint32_t *) sts_vbuf;
4944 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4947 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4948 * It has to be big-endian in memory and is organized
4949 * in 32 bit words, which are much easier to deal with
4950 * as words which are swizzled as needed.
4952 * All we're filling here is the FC_RSP payload.
4953 * We may just have the chip synthesize it if
4954 * we have no residual and an OK status.
4957 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4961 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4962 rsp[3] = htobe32(tgt->resid);
4963 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4964 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4967 if (status == SCSI_STATUS_CHECK_COND) {
4970 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4971 rsp[4] = htobe32(MPT_SENSE_SIZE);
4973 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4975 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4976 "TION but no sense data?\n");
4977 memset(&rsp, 0, MPT_SENSE_SIZE);
4979 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4980 rsp[i] = htobe32(rsp[i]);
4982 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4983 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4986 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4987 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4989 rsp[2] = htobe32(rsp[2]);
4990 } else if (mpt->is_sas) {
4991 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4992 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4993 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4995 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4996 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4997 tp->StatusCode = status;
4998 tp->QueueTag = htole16(sp->Tag);
4999 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
5002 tp->ReplyWord = htole32(tgt->reply_desc);
5003 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
5005 #ifdef WE_CAN_USE_AUTO_REPOST
5006 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
5008 if (status == SCSI_STATUS_OK && resplen == 0) {
5009 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
5011 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
5013 MPI_SGE_FLAGS_HOST_TO_IOC |
5014 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
5015 MPI_SGE_FLAGS_LAST_ELEMENT |
5016 MPI_SGE_FLAGS_END_OF_LIST |
5017 MPI_SGE_FLAGS_END_OF_BUFFER;
5018 fl <<= MPI_SGE_FLAGS_SHIFT;
5020 tp->StatusDataSGE.FlagsLength = htole32(fl);
5023 mpt_lprt(mpt, MPT_PRT_DEBUG,
5024 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
5025 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
5026 req->serno, tgt->resid);
5028 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
5029 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
5031 mpt_send_cmd(mpt, req);
5035 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
5036 tgt_resource_t *trtp, int init_id)
5038 struct ccb_immed_notify *inot;
5039 mpt_tgt_state_t *tgt;
5041 tgt = MPT_TGT_STATE(mpt, req);
5042 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
5044 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
5045 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
5048 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
5049 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5050 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
5052 memset(&inot->sense_data, 0, sizeof (inot->sense_data));
5053 inot->sense_len = 0;
5054 memset(inot->message_args, 0, sizeof (inot->message_args));
5055 inot->initiator_id = init_id; /* XXX */
5058 * This is a somewhat grotesque attempt to map from task management
5059 * to old style SCSI messages. God help us all.
5062 case MPT_ABORT_TASK_SET:
5063 inot->message_args[0] = MSG_ABORT_TAG;
5065 case MPT_CLEAR_TASK_SET:
5066 inot->message_args[0] = MSG_CLEAR_TASK_SET;
5068 case MPT_TARGET_RESET:
5069 inot->message_args[0] = MSG_TARGET_RESET;
5072 inot->message_args[0] = MSG_CLEAR_ACA;
5074 case MPT_TERMINATE_TASK:
5075 inot->message_args[0] = MSG_ABORT_TAG;
5078 inot->message_args[0] = MSG_NOOP;
5081 tgt->ccb = (union ccb *) inot;
5082 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5083 MPTLOCK_2_CAMLOCK(mpt);
5084 xpt_done((union ccb *)inot);
5085 CAMLOCK_2_MPTLOCK(mpt);
5089 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
5091 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
5092 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
5093 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
5094 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
5095 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
5098 struct ccb_accept_tio *atiop;
5101 mpt_tgt_state_t *tgt;
5102 tgt_resource_t *trtp = NULL;
5107 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5111 * Stash info for the current command where we can get at it later.
5113 vbuf = req->req_vbuf;
5114 vbuf += MPT_RQSL(mpt);
5117 * Get our state pointer set up.
5119 tgt = MPT_TGT_STATE(mpt, req);
5120 if (tgt->state != TGT_STATE_LOADED) {
5121 mpt_tgt_dump_req_state(mpt, req);
5122 panic("bad target state in mpt_scsi_tgt_atio");
5124 memset(tgt, 0, sizeof (mpt_tgt_state_t));
5125 tgt->state = TGT_STATE_IN_CAM;
5126 tgt->reply_desc = reply_desc;
5127 ioindex = GET_IO_INDEX(reply_desc);
5128 if (mpt->verbose >= MPT_PRT_DEBUG) {
5129 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5130 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5131 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5132 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5135 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5136 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5137 if (fc->FcpCntl[2]) {
5139 * Task Management Request
5141 switch (fc->FcpCntl[2]) {
5143 fct = MPT_ABORT_TASK_SET;
5146 fct = MPT_CLEAR_TASK_SET;
5149 fct = MPT_TARGET_RESET;
5152 fct = MPT_CLEAR_ACA;
5155 fct = MPT_TERMINATE_TASK;
5158 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5160 mpt_scsi_tgt_status(mpt, 0, req,
5165 switch (fc->FcpCntl[1]) {
5167 tag_action = MSG_SIMPLE_Q_TAG;
5170 tag_action = MSG_HEAD_OF_Q_TAG;
5173 tag_action = MSG_ORDERED_Q_TAG;
5177 * Bah. Ignore Untagged Queing and ACA
5179 tag_action = MSG_SIMPLE_Q_TAG;
5183 tgt->resid = be32toh(fc->FcpDl);
5185 lunptr = fc->FcpLun;
5186 itag = be16toh(fc->OptionalOxid);
5187 } else if (mpt->is_sas) {
5188 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5189 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5191 lunptr = ssp->LogicalUnitNumber;
5192 itag = ssp->InitiatorTag;
5194 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5195 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5197 lunptr = sp->LogicalUnitNumber;
5202 * Generate a simple lun
5204 switch (lunptr[0] & 0xc0) {
5206 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5212 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5218 * Deal with non-enabled or bad luns here.
5220 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5221 mpt->trt[lun].enabled == 0) {
5222 if (mpt->twildcard) {
5223 trtp = &mpt->trt_wildcard;
5224 } else if (fct == MPT_NIL_TMT_VALUE) {
5226 * In this case, we haven't got an upstream listener
5227 * for either a specific lun or wildcard luns. We
5228 * have to make some sensible response. For regular
5229 * inquiry, just return some NOT HERE inquiry data.
5230 * For VPD inquiry, report illegal field in cdb.
5231 * For REQUEST SENSE, just return NO SENSE data.
5232 * REPORT LUNS gets illegal command.
5233 * All other commands get 'no such device'.
5235 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5238 memset(buf, 0, MPT_SENSE_SIZE);
5239 cond = SCSI_STATUS_CHECK_COND;
5244 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5254 len = min(tgt->resid, cdbp[4]);
5255 len = min(len, sizeof (null_iqd));
5256 mpt_lprt(mpt, MPT_PRT_DEBUG,
5257 "local inquiry %ld bytes\n", (long) len);
5258 mpt_scsi_tgt_local(mpt, req, lun, 1,
5265 len = min(tgt->resid, cdbp[4]);
5266 len = min(len, sizeof (buf));
5267 mpt_lprt(mpt, MPT_PRT_DEBUG,
5268 "local reqsense %ld bytes\n", (long) len);
5269 mpt_scsi_tgt_local(mpt, req, lun, 1,
5274 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5278 mpt_lprt(mpt, MPT_PRT_DEBUG,
5279 "CMD 0x%x to unmanaged lun %u\n",
5284 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5287 /* otherwise, leave trtp NULL */
5289 trtp = &mpt->trt[lun];
5293 * Deal with any task management
5295 if (fct != MPT_NIL_TMT_VALUE) {
5297 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5299 mpt_scsi_tgt_status(mpt, 0, req,
5302 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5303 GET_INITIATOR_INDEX(reply_desc));
5309 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5310 if (atiop == NULL) {
5311 mpt_lprt(mpt, MPT_PRT_WARN,
5312 "no ATIOs for lun %u- sending back %s\n", lun,
5313 mpt->tenabled? "QUEUE FULL" : "BUSY");
5314 mpt_scsi_tgt_status(mpt, NULL, req,
5315 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5319 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5320 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5321 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5322 atiop->ccb_h.ccb_mpt_ptr = mpt;
5323 atiop->ccb_h.status = CAM_CDB_RECVD;
5324 atiop->ccb_h.target_lun = lun;
5325 atiop->sense_len = 0;
5326 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5327 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5328 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5331 * The tag we construct here allows us to find the
5332 * original request that the command came in with.
5334 * This way we don't have to depend on anything but the
5335 * tag to find things when CCBs show back up from CAM.
5337 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5338 tgt->tag_id = atiop->tag_id;
5340 atiop->tag_action = tag_action;
5341 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5343 if (mpt->verbose >= MPT_PRT_DEBUG) {
5345 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5346 atiop->ccb_h.target_lun);
5347 for (i = 0; i < atiop->cdb_len; i++) {
5348 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5349 (i == (atiop->cdb_len - 1))? '>' : ' ');
5351 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5352 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5355 MPTLOCK_2_CAMLOCK(mpt);
5356 xpt_done((union ccb *)atiop);
5357 CAMLOCK_2_MPTLOCK(mpt);
5361 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5363 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5365 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5366 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5367 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5368 tgt->tag_id, tgt->state);
5372 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5375 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5376 req->index, req->index, req->state);
5377 mpt_tgt_dump_tgt_state(mpt, req);
5381 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5382 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5388 if (reply_frame == NULL) {
5390 * Figure out what the state of the command is.
5392 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5395 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5397 mpt_req_not_spcl(mpt, tgt->req,
5398 "turbo scsi_tgt_reply associated req", __LINE__);
5401 switch(tgt->state) {
5402 case TGT_STATE_LOADED:
5404 * This is a new command starting.
5406 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5408 case TGT_STATE_MOVING_DATA:
5410 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5413 if (tgt->req == NULL) {
5414 panic("mpt: turbo target reply with null "
5415 "associated request moving data");
5419 if (tgt->is_local == 0) {
5420 panic("mpt: turbo target reply with "
5421 "null associated ccb moving data");
5424 mpt_lprt(mpt, MPT_PRT_DEBUG,
5425 "TARGET_ASSIST local done\n");
5426 TAILQ_REMOVE(&mpt->request_pending_list,
5428 mpt_free_request(mpt, tgt->req);
5430 mpt_scsi_tgt_status(mpt, NULL, req,
5436 mpt_req_untimeout(req, mpt_timeout, ccb);
5437 mpt_lprt(mpt, MPT_PRT_DEBUG,
5438 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5439 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5441 * Free the Target Assist Request
5443 KASSERT(tgt->req->ccb == ccb,
5444 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5445 tgt->req->serno, tgt->req->ccb));
5446 TAILQ_REMOVE(&mpt->request_pending_list,
5448 mpt_free_request(mpt, tgt->req);
5452 * Do we need to send status now? That is, are
5453 * we done with all our data transfers?
5455 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5456 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5457 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5458 KASSERT(ccb->ccb_h.status,
5459 ("zero ccb sts at %d", __LINE__));
5460 tgt->state = TGT_STATE_IN_CAM;
5461 if (mpt->outofbeer) {
5462 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5464 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5466 MPTLOCK_2_CAMLOCK(mpt);
5468 CAMLOCK_2_MPTLOCK(mpt);
5472 * Otherwise, send status (and sense)
5474 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5476 memcpy(sp, &ccb->csio.sense_data,
5477 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5479 mpt_scsi_tgt_status(mpt, ccb, req,
5480 ccb->csio.scsi_status, sp);
5483 case TGT_STATE_SENDING_STATUS:
5484 case TGT_STATE_MOVING_DATA_AND_STATUS:
5489 if (tgt->req == NULL) {
5490 panic("mpt: turbo target reply with null "
5491 "associated request sending status");
5498 TGT_STATE_MOVING_DATA_AND_STATUS) {
5501 mpt_req_untimeout(req, mpt_timeout, ccb);
5502 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5503 ccb->ccb_h.status |= CAM_SENT_SENSE;
5505 mpt_lprt(mpt, MPT_PRT_DEBUG,
5506 "TARGET_STATUS tag %x sts %x flgs %x req "
5507 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5508 ccb->ccb_h.flags, tgt->req);
5510 * Free the Target Send Status Request
5512 KASSERT(tgt->req->ccb == ccb,
5513 ("tgt->req %p:%u tgt->req->ccb %p",
5514 tgt->req, tgt->req->serno, tgt->req->ccb));
5516 * Notify CAM that we're done
5518 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5519 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5520 KASSERT(ccb->ccb_h.status,
5521 ("ZERO ccb sts at %d", __LINE__));
5524 mpt_lprt(mpt, MPT_PRT_DEBUG,
5525 "TARGET_STATUS non-CAM for req %p:%u\n",
5526 tgt->req, tgt->req->serno);
5528 TAILQ_REMOVE(&mpt->request_pending_list,
5530 mpt_free_request(mpt, tgt->req);
5534 * And re-post the Command Buffer.
5535 * This will reset the state.
5537 ioindex = GET_IO_INDEX(reply_desc);
5538 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5540 mpt_post_target_command(mpt, req, ioindex);
5543 * And post a done for anyone who cares
5546 if (mpt->outofbeer) {
5547 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5549 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5551 MPTLOCK_2_CAMLOCK(mpt);
5553 CAMLOCK_2_MPTLOCK(mpt);
5557 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5558 tgt->state = TGT_STATE_LOADED;
5561 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5562 "Reply Function\n", tgt->state);
5567 status = le16toh(reply_frame->IOCStatus);
5568 if (status != MPI_IOCSTATUS_SUCCESS) {
5569 dbg = MPT_PRT_ERROR;
5571 dbg = MPT_PRT_DEBUG1;
5575 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5576 req, req->serno, reply_frame, reply_frame->Function, status);
5578 switch (reply_frame->Function) {
5579 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5581 mpt_tgt_state_t *tgt;
5583 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5585 if (status != MPI_IOCSTATUS_SUCCESS) {
5591 tgt = MPT_TGT_STATE(mpt, req);
5592 KASSERT(tgt->state == TGT_STATE_LOADING,
5593 ("bad state 0x%x on reply to buffer post", tgt->state));
5594 mpt_assign_serno(mpt, req);
5595 tgt->state = TGT_STATE_LOADED;
5598 case MPI_FUNCTION_TARGET_ASSIST:
5600 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5602 mpt_prt(mpt, "target assist completion\n");
5603 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5604 mpt_free_request(mpt, req);
5606 case MPI_FUNCTION_TARGET_STATUS_SEND:
5608 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5610 mpt_prt(mpt, "status send completion\n");
5611 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5612 mpt_free_request(mpt, req);
5614 case MPI_FUNCTION_TARGET_MODE_ABORT:
5616 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5617 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5618 PTR_MSG_TARGET_MODE_ABORT abtp =
5619 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5620 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5622 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5624 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5625 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5626 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5627 mpt_free_request(mpt, req);
5631 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5632 "0x%x\n", reply_frame->Function);