2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
6 * Copyright 1994-2009 The FreeBSD Project.
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
31 * $FreeBSD: src/sys/dev/mfi/mfi_tbolt.c,v 1.00 2010/06/30 16:00:00 Bharat Gusain Exp $
32 * FreeBSD projects/head_mfi/ r232949
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/kernel.h>
44 #include <sys/ioccom.h>
45 #include <sys/eventhandler.h>
46 #include <sys/callout.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/malloc.h>
52 #include <dev/raid/mfi/mfireg.h>
53 #include <dev/raid/mfi/mfi_ioctl.h>
54 #include <dev/raid/mfi/mfivar.h>
56 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
57 union mfi_mpi2_request_descriptor *
58 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
59 void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
60 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
61 struct mfi_cmd_tbolt *cmd);
62 static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
63 struct mfi_cmd_tbolt *cmd);
64 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
65 *sc, struct mfi_command *cmd);
67 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
68 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
69 *sc, struct mfi_command *mfi_cmd);
70 int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd);
71 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
72 struct mfi_cmd_tbolt *cmd);
73 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
74 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
75 static int mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command
76 *mfi_cmd, uint8_t *cdb);
78 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
80 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
81 static void mfi_kill_hba (struct mfi_softc *sc);
82 static void mfi_process_fw_state_chg_isr(void *arg);
83 uint8_t mfi_tbolt_get_map_info(struct mfi_softc *sc);
85 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
88 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
90 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
91 MFI_READ4(sc, MFI_OMSK);
95 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
97 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
98 MFI_READ4(sc, MFI_OMSK);
102 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
104 return MFI_READ4(sc, MFI_OSP0);
108 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
110 int32_t status, mfi_status = 0;
112 status = MFI_READ4(sc, MFI_OSTS);
115 MFI_WRITE4(sc, MFI_OSTS, status);
116 MFI_READ4(sc, MFI_OSTS);
117 if (status & MFI_STATE_CHANGE_INTERRUPT) {
118 mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
123 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
126 MFI_READ4(sc, MFI_OSTS);
132 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
135 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
136 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
137 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
138 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
142 * mfi_tbolt_adp_reset - For controller reset
143 * @regs: MFI register set
145 int mfi_tbolt_adp_reset(struct mfi_softc *sc)
147 int retry = 0, i = 0;
150 MFI_WRITE4(sc, MFI_WSR, 0xF);
151 MFI_WRITE4(sc, MFI_WSR, 4);
152 MFI_WRITE4(sc, MFI_WSR, 0xB);
153 MFI_WRITE4(sc, MFI_WSR, 2);
154 MFI_WRITE4(sc, MFI_WSR, 7);
155 MFI_WRITE4(sc, MFI_WSR, 0xD);
157 for (i = 0; i < 10000; i++) ;
159 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
161 while (!( HostDiag & DIAG_WRITE_ENABLE)) {
162 for (i = 0; i < 1000; i++);
163 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
164 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
165 "hostdiag=%x\n", retry, HostDiag);
171 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
173 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
175 for (i=0; i < 10; i++) {
176 for (i = 0; i < 10000; i++);
179 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
180 while (HostDiag & DIAG_RESET_ADAPTER) {
181 for (i = 0; i < 1000; i++) ;
182 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
183 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
184 "hostdiag=%x\n", retry, HostDiag);
193 *******************************************************************************************
195 * This routine initialize Thunderbolt specific device information
196 *******************************************************************************************
198 void mfi_tbolt_init_globals(struct mfi_softc *sc)
200 /* Initialize single reply size and Message size */
201 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
202 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
205 * Calculating how many SGEs allowed in a allocated main message
206 * (size of the Message - Raid SCSI IO message size(except SGE))
208 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
210 sc->max_SGEs_in_main_message =
211 (uint8_t)((sc->raid_io_msg_size
212 - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
213 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
215 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
217 * (1280 - 256) / 16 = 64
219 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
220 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
222 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command
225 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
226 + sc->max_SGEs_in_chain_message - 1;
228 * This is the offset in number of 4 * 32bit words to the next chain
229 * (0x100 - 0x10)/0x10 = 0xF(15)
231 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
232 - sizeof(MPI2_SGE_IO_UNION))/16;
233 sc->chain_offset_value_for_mpt_ptmsg
234 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
235 sc->mfi_cmd_pool_tbolt = NULL;
236 sc->request_desc_pool = NULL;
240 ****************************************************************************
242 * This function calculates the memory requirement for Thunderbolt
245 * Total required memory in bytes
246 ****************************************************************************
249 uint32_t mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
253 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */
254 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
255 size += sc->reply_size * sc->mfi_max_fw_cmds;
256 /* this is for SGL's */
257 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
262 ****************************************************************************
264 * This function will prepare message pools for the Thunderbolt controller
266 * DevExt - HBA miniport driver's adapter data storage structure
267 * pMemLocation - start of the memory allocated for Thunderbolt.
271 ****************************************************************************
273 int mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
274 uint32_t tbolt_contg_length)
277 uint8_t *addr = mem_location;
279 /* Request Descriptor Base physical Address */
281 /* For Request Decriptors Virtual Memory */
282 /* Initialise the aligned IO Frames Virtual Memory Pointer */
283 if (((uintptr_t)addr) & (0xFF)) {
284 addr = &addr[sc->raid_io_msg_size];
285 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
286 sc->request_message_pool_align = addr;
288 sc->request_message_pool_align = addr;
290 offset = sc->request_message_pool_align - sc->request_message_pool;
291 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
293 /* DJA XXX should this be bus dma ??? */
294 /* Skip request message pool */
295 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
296 /* Reply Frame Pool is initialized */
297 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
298 if (((uintptr_t)addr) & (0xFF)) {
299 addr = &addr[sc->reply_size];
300 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
302 sc->reply_frame_pool_align
303 = (struct mfi_mpi2_reply_header *)addr;
305 offset = (uintptr_t)sc->reply_frame_pool_align
306 - (uintptr_t)sc->request_message_pool;
307 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
309 /* Skip Reply Frame Pool */
310 addr += sc->reply_size * sc->mfi_max_fw_cmds;
311 sc->reply_pool_limit = addr;
313 /* initializing reply address to 0xFFFFFFFF */
314 memset((uint8_t *)sc->reply_frame_pool, 0xFF,
315 (sc->reply_size * sc->mfi_max_fw_cmds));
317 offset = sc->reply_size * sc->mfi_max_fw_cmds;
318 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
319 /* initialize the last_reply_idx to 0 */
320 sc->last_reply_idx = 0;
321 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
322 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
323 if (offset > tbolt_contg_length)
324 device_printf(sc->mfi_dev, "Error:Initialized more than "
330 ****************************************************************************
332 * This routine prepare and issue INIT2 frame to the Firmware
333 ****************************************************************************
337 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
339 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit;
340 struct mfi_init_frame *mfi_init;
341 uintptr_t offset = 0;
342 bus_addr_t phyAddress;
343 MFI_ADDRESS *mfiAddressTemp;
344 struct mfi_command *cm;
347 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
348 /* Check if initialization is already completed */
349 if (sc->MFA_enabled) {
353 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
354 if ((cm = mfi_dequeue_free(sc)) == NULL) {
355 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
358 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
359 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
360 cm->cm_dmamap = sc->mfi_tb_init_dmamap;
361 cm->cm_frame->header.context = 0;
366 * Abuse the SG list area of the frame to hold the init_qinfo
369 mfi_init = &cm->cm_frame->init;
371 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
372 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
373 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
375 /* set MsgVersion and HeaderVersion host driver was built with */
376 mpi2IocInit->MsgVersion = MPI2_VERSION;
377 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
378 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
379 mpi2IocInit->ReplyDescriptorPostQueueDepth
380 = (uint16_t)sc->mfi_max_fw_cmds;
381 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
383 /* Get physical address of reply frame pool */
384 offset = (uintptr_t) sc->reply_frame_pool_align
385 - (uintptr_t)sc->request_message_pool;
386 phyAddress = sc->mfi_tb_busaddr + offset;
388 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
389 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
390 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
392 /* Get physical address of request message pool */
393 offset = sc->request_message_pool_align - sc->request_message_pool;
394 phyAddress = sc->mfi_tb_busaddr + offset;
395 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
396 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
397 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
398 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
399 mpi2IocInit->TimeStamp = time_uptime;
402 ksnprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
404 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
405 mfi_init->driver_ver_hi =
406 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
408 /* Get the physical address of the mpi2 ioc init command */
409 phyAddress = sc->mfi_tb_ioc_init_busaddr;
410 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
411 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
412 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
414 mfi_init->header.cmd = MFI_CMD_INIT;
415 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
416 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
419 cm->cm_flags |= MFI_CMD_POLLED;
420 cm->cm_timestamp = time_uptime;
421 if ((error = mfi_mapcmd(sc, cm)) != 0) {
422 device_printf(sc->mfi_dev, "failed to send IOC init2 "
423 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
424 mfi_release_command(cm);
425 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
428 mfi_release_command(cm);
429 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
431 if (mfi_init->header.cmd_status == 0) {
435 device_printf(sc->mfi_dev, "Init command Failed %x\n",
436 mfi_init->header.cmd_status);
444 int mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
446 struct mfi_cmd_tbolt *cmd;
447 bus_addr_t io_req_base_phys;
448 uint8_t *io_req_base;
449 int i = 0, j = 0, offset = 0;
452 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
453 * Allocate the dynamic array first and then allocate individual
456 sc->request_desc_pool = kmalloc(sizeof(
457 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
458 M_MFIBUF, M_NOWAIT|M_ZERO);
459 sc->mfi_cmd_pool_tbolt = kmalloc(sizeof(struct mfi_cmd_tbolt*)
460 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
462 if (!sc->mfi_cmd_pool_tbolt) {
463 device_printf(sc->mfi_dev, "out of memory. Could not alloc "
464 "memory for cmd_list_fusion\n");
468 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
469 sc->mfi_cmd_pool_tbolt[i] = kmalloc(sizeof(
470 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
472 if (!sc->mfi_cmd_pool_tbolt[i]) {
473 device_printf(sc->mfi_dev, "Could not alloc cmd list "
476 for (j = 0; j < i; j++)
477 kfree(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
479 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
480 sc->mfi_cmd_pool_tbolt = NULL;
485 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
488 io_req_base = sc->request_message_pool_align
489 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
490 io_req_base_phys = sc->request_msg_busaddr
491 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
494 * Add all the commands to command pool (instance->cmd_pool)
496 /* SMID 0 is reserved. Set SMID/index from 1 */
498 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
499 cmd = sc->mfi_cmd_pool_tbolt[i];
500 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
502 cmd->request_desc = (union mfi_mpi2_request_descriptor *)
503 (sc->request_desc_pool + i);
504 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
505 (io_req_base + offset);
506 cmd->io_request_phys_addr = io_req_base_phys + offset;
507 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
508 + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
509 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
510 * MEGASAS_MAX_SZ_CHAIN_FRAME;
512 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
517 int mfi_tbolt_reset(struct mfi_softc *sc)
521 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
522 if (atomic_read(&sc->fw_reset_no_pci_access)) {
523 device_printf(sc->mfi_dev, "NO PCI ACCESS\n");
524 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
528 if (sc->hw_crit_error) {
529 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
530 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
534 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
535 fw_state = sc->mfi_read_fw_status(sc);
536 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT) {
537 if ((sc->disableOnlineCtrlReset == 0)
538 && (sc->adpreset == 0)) {
539 device_printf(sc->mfi_dev, "Adapter RESET "
540 "condition is detected\n");
542 sc->issuepend_done = 0;
544 sc->last_reply_idx = 0;
545 mfi_process_fw_state_chg_isr(sc);
547 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
551 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
556 * mfi_intr_tbolt - isr entry point
558 void mfi_intr_tbolt(void *arg)
560 struct mfi_softc *sc = (struct mfi_softc *)arg;
562 if (sc->mfi_check_clear_intr(sc) == 1) {
565 if (sc->mfi_detaching)
567 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
568 mfi_tbolt_complete_cmd(sc);
569 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
570 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
572 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
577 * map_cmd_status - Maps FW cmd status to OS cmd status
578 * @cmd : Pointer to cmd
579 * @status : status of cmd returned by FW
580 * @ext_status : ext status of cmd returned by FW
584 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
591 mfi_cmd->cm_frame->header.cmd_status = 0;
592 mfi_cmd->cm_frame->dcmd.header.cmd_status = 0;
595 case MFI_STAT_SCSI_IO_FAILED:
596 case MFI_STAT_LD_INIT_IN_PROGRESS:
597 mfi_cmd->cm_frame->header.cmd_status = status;
598 mfi_cmd->cm_frame->header.scsi_status = ext_status;
599 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
600 mfi_cmd->cm_frame->dcmd.header.scsi_status
604 case MFI_STAT_SCSI_DONE_WITH_ERROR:
605 mfi_cmd->cm_frame->header.cmd_status = ext_status;
606 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
609 case MFI_STAT_LD_OFFLINE:
610 case MFI_STAT_DEVICE_NOT_FOUND:
611 mfi_cmd->cm_frame->header.cmd_status = status;
612 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
616 mfi_cmd->cm_frame->header.cmd_status = status;
617 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
623 * mfi_tbolt_return_cmd - Return a cmd to free command pool
624 * @instance: Adapter soft state
625 * @cmd: Command packet to be returned to free command pool
628 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
630 mfi_lockassert(&sc->mfi_io_lock);
632 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
635 void mfi_tbolt_complete_cmd(struct mfi_softc *sc)
637 struct mfi_mpi2_reply_header *desc, *reply_desc;
638 struct mfi_command *cmd_mfi; /* For MFA Cmds */
639 struct mfi_cmd_tbolt *cmd_tbolt;
641 uint8_t reply_descript_type;
642 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req;
643 uint32_t status, extStatus;
644 uint16_t num_completed;
645 union desc_value val;
647 desc = (struct mfi_mpi2_reply_header *)
648 ((uintptr_t)sc->reply_frame_pool_align
649 + sc->last_reply_idx * sc->reply_size);
653 device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
655 reply_descript_type = reply_desc->ReplyFlags
656 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
657 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
661 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
663 /* Read Reply descriptor */
664 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
666 smid = reply_desc->SMID;
667 if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
668 device_printf(sc->mfi_dev, "smid is %x. Cannot "
669 "proceed. Returning \n", smid);
673 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
674 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
675 scsi_io_req = cmd_tbolt->io_request;
677 /* Check if internal commands */
678 status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
679 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
681 switch (scsi_io_req->Function) {
682 case MPI2_FUNCTION_LD_IO_REQUEST:
683 /* Regular Path IO. */
684 /* Map the Fw Error Status. */
685 map_tbolt_cmd_status(cmd_mfi, status,
687 if ((cmd_mfi->cm_frame->dcmd.opcode
688 == MFI_DCMD_LD_MAP_GET_INFO)
689 && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
690 if (cmd_mfi->cm_frame->header.cmd_status
692 device_printf(sc->mfi_dev,
693 "map sync failed\n");
696 device_printf(sc->mfi_dev,
697 "map sync completed\n");
698 mfi_release_command(cmd_mfi);
701 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
703 && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
704 /* BHARAT poll workaround */
705 mfi_remove_busy(cmd_mfi);
706 cmd_mfi->cm_error = 0;
707 mfi_complete(sc, cmd_mfi);
709 mfi_tbolt_return_cmd(sc, cmd_tbolt);
711 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:
712 map_tbolt_cmd_status(cmd_mfi, status, extStatus);
713 if ((cmd_mfi->cm_frame->dcmd.opcode
714 == MFI_DCMD_LD_MAP_GET_INFO)
715 && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
716 if (cmd_mfi->cm_frame->header.cmd_status != 0)
717 device_printf(sc->mfi_dev,
718 "map sync failed\n");
721 device_printf(sc->mfi_dev,
722 "map sync completed\n");
723 mfi_release_command(cmd_mfi);
726 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
728 && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
729 /* BHARAT poll workaround */
730 mfi_remove_busy(cmd_mfi);
731 cmd_mfi->cm_error = 0;
732 mfi_complete(sc, cmd_mfi);
734 mfi_tbolt_return_cmd(sc, cmd_tbolt);
738 sc->last_reply_idx++;
739 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
740 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
741 sc->last_reply_idx = 0;
743 /*set it back to all 0xfff.*/
744 ((union mfi_mpi2_reply_descriptor*)desc)->words =
749 /* Get the next reply descriptor */
750 desc = (struct mfi_mpi2_reply_header *)
751 ((uintptr_t)sc->reply_frame_pool_align
752 + sc->last_reply_idx * sc->reply_size);
754 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
755 reply_descript_type = reply_desc->ReplyFlags
756 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
757 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
764 /* update replyIndex to FW */
765 if (sc->last_reply_idx)
766 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
772 * mfi_get_cmd - Get a command from the free pool
773 * @instance: Adapter soft state
775 * Returns a free command from the pool
778 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc
781 struct mfi_cmd_tbolt *cmd = NULL;
783 mfi_lockassert(&sc->mfi_io_lock);
785 cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
786 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
787 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
788 memset((uint8_t *)cmd->io_request, 0,
789 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
793 union mfi_mpi2_request_descriptor *
794 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
798 if (index >= sc->mfi_max_fw_cmds) {
799 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
800 "for descriptor\n", index);
803 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
805 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
806 return (union mfi_mpi2_request_descriptor *)p;
810 /* Used to build IOCTL cmd */
812 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
814 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
815 struct mfi_mpi2_request_raid_scsi_io *io_req;
816 struct mfi_cmd_tbolt *cmd;
818 cmd = mfi_tbolt_get_cmd(sc);
821 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
822 cmd->sync_cmd_idx = mfi_cmd->cm_index;
823 io_req = cmd->io_request;
824 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
826 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
827 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
829 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
831 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
834 In MFI pass thru, nextChainOffset will always be zero to
835 indicate the end of the chain.
837 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
838 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
840 /* setting the length to the maximum length */
841 mpi25_ieee_chain->Length = 1024;
847 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
848 struct mfi_cmd_tbolt *cmd)
850 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
851 struct mfi_mpi2_request_raid_scsi_io *io_request;
852 struct IO_REQUEST_INFO io_info;
854 device_id = mfi_cmd->cm_frame->io.header.target_id;
855 io_request = cmd->io_request;
856 io_request->RaidContext.TargetID = device_id;
857 io_request->RaidContext.Status = 0;
858 io_request->RaidContext.exStatus =0;
860 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
861 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
863 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
864 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
865 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
866 io_info.ldTgtId = device_id;
867 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
871 io_request->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
872 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
873 io_request->DevHandle = device_id;
874 cmd->request_desc->header.RequestFlags =
875 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
876 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
877 io_request->RaidContext.RegLockLength = 0x100;
878 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
882 int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd)
884 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
885 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
892 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, struct mfi_cmd_tbolt *cmd)
895 uint8_t cdb[32], cdb_len;
898 struct mfi_mpi2_request_raid_scsi_io *io_request = cmd->io_request;
900 /* Have to build CDB here for TB as BSD don't have a scsi layer */
901 if ((cdb_len = mfi_tbolt_build_cdb(sc, mfi_cmd, cdb)) == 1)
904 /* Just the CDB length,rest of the Flags are zero */
905 io_request->IoFlags = cdb_len;
906 memcpy(io_request->CDB.CDB32, cdb, 32);
908 if (mfi_tbolt_is_ldio(mfi_cmd))
909 mfi_tbolt_build_ldio(sc, mfi_cmd , cmd);
916 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
917 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
918 if (sge_count > sc->mfi_max_sge) {
919 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
920 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
923 io_request->RaidContext.numSGE = sge_count;
924 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
926 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
927 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
929 io_request->Control = MPI2_SCSIIO_CONTROL_READ;
931 io_request->SGLOffset0 = offsetof(
932 struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
934 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
935 io_request->SenseBufferLength = MFI_SENSE_LEN;
940 mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
943 uint32_t lba_lo, lba_hi, num_lba;
946 if (mfi_cmd == NULL || cdb == NULL)
948 num_lba = mfi_cmd->cm_frame->io.header.data_len;
949 lba_lo = mfi_cmd->cm_frame->io.lba_lo;
950 lba_hi = mfi_cmd->cm_frame->io.lba_hi;
952 if ((num_lba <= 0xFF) && (lba_lo <= 0x1FFFFF)) {
953 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
954 /* Read 6 or Write 6 */
955 cdb[0] = (uint8_t) (0x0A);
957 cdb[0] = (uint8_t) (0x08);
959 cdb[4] = (uint8_t) num_lba;
960 cdb[3] = (uint8_t) (lba_lo & 0xFF);
961 cdb[2] = (uint8_t) (lba_lo >> 8);
962 cdb[1] = (uint8_t) ((lba_lo >> 16) & 0x1F);
964 } else if ((num_lba <= 0xFFFF) && (lba_lo <= 0xFFFFFFFF)) {
965 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
966 /* Read 10 or Write 10 */
967 cdb[0] = (uint8_t) (0x2A);
969 cdb[0] = (uint8_t) (0x28);
970 cdb[8] = (uint8_t) (num_lba & 0xFF);
971 cdb[7] = (uint8_t) (num_lba >> 8);
972 cdb[5] = (uint8_t) (lba_lo & 0xFF);
973 cdb[4] = (uint8_t) (lba_lo >> 8);
974 cdb[3] = (uint8_t) (lba_lo >> 16);
975 cdb[2] = (uint8_t) (lba_lo >> 24);
977 } else if ((num_lba > 0xFFFF) && (lba_hi == 0)) {
978 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
979 /* Read 12 or Write 12 */
980 cdb[0] = (uint8_t) (0xAA);
982 cdb[0] = (uint8_t) (0xA8);
983 cdb[9] = (uint8_t) (num_lba & 0xFF);
984 cdb[8] = (uint8_t) (num_lba >> 8);
985 cdb[7] = (uint8_t) (num_lba >> 16);
986 cdb[6] = (uint8_t) (num_lba >> 24);
987 cdb[5] = (uint8_t) (lba_lo & 0xFF);
988 cdb[4] = (uint8_t) (lba_lo >> 8);
989 cdb[3] = (uint8_t) (lba_lo >> 16);
990 cdb[2] = (uint8_t) (lba_lo >> 24);
993 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
994 cdb[0] = (uint8_t) (0x8A);
996 cdb[0] = (uint8_t) (0x88);
997 cdb[13] = (uint8_t) (num_lba & 0xFF);
998 cdb[12] = (uint8_t) (num_lba >> 8);
999 cdb[11] = (uint8_t) (num_lba >> 16);
1000 cdb[10] = (uint8_t) (num_lba >> 24);
1001 cdb[9] = (uint8_t) (lba_lo & 0xFF);
1002 cdb[8] = (uint8_t) (lba_lo >> 8);
1003 cdb[7] = (uint8_t) (lba_lo >> 16);
1004 cdb[6] = (uint8_t) (lba_lo >> 24);
1005 cdb[5] = (uint8_t) (lba_hi & 0xFF);
1006 cdb[4] = (uint8_t) (lba_hi >> 8);
1007 cdb[3] = (uint8_t) (lba_hi >> 16);
1008 cdb[2] = (uint8_t) (lba_hi >> 24);
1015 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
1016 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
1018 uint8_t i, sg_processed;
1019 uint8_t sge_count, sge_idx;
1020 union mfi_sgl *os_sgl;
1023 * Return 0 if there is no data transfer
1025 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
1026 device_printf(sc->mfi_dev, "Buffer empty \n");
1029 os_sgl = mfi_cmd->cm_sg;
1030 sge_count = mfi_cmd->cm_frame->header.sg_count;
1032 if (sge_count > sc->mfi_max_sge) {
1033 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
1038 if (sge_count > sc->max_SGEs_in_main_message)
1039 /* One element to store the chain info */
1040 sge_idx = sc->max_SGEs_in_main_message - 1;
1042 sge_idx = sge_count;
1044 for (i = 0; i < sge_idx; i++) {
1046 * For 32bit BSD we are getting 32 bit SGL's from OS
1047 * but FW only take 64 bit SGL's so copying from 32 bit
1050 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1051 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1052 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1054 sgl_ptr->Length = os_sgl->sg32[i].len;
1055 sgl_ptr->Address = os_sgl->sg32[i].addr;
1059 cmd->io_request->ChainOffset = 0;
1064 if (sg_processed < sge_count) {
1065 pMpi25IeeeSgeChain64_t sg_chain;
1066 cmd->io_request->ChainOffset =
1067 sc->chain_offset_value_for_main_message;
1069 /* Prepare chain element */
1070 sg_chain->NextChainOffset = 0;
1071 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1072 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1073 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) *
1074 (sge_count - sg_processed));
1075 sg_chain->Address = cmd->sg_frame_phys_addr;
1076 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1077 for (; i < sge_count; i++) {
1078 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1079 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1080 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1082 sgl_ptr->Length = os_sgl->sg32[i].len;
1083 sgl_ptr->Address = os_sgl->sg32[i].addr;
1092 union mfi_mpi2_request_descriptor *
1093 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1095 struct mfi_cmd_tbolt *cmd;
1096 union mfi_mpi2_request_descriptor *req_desc = NULL;
1098 cmd = mfi_tbolt_get_cmd(sc);
1101 mfi_cmd->cm_extra_frames = cmd->index;
1102 cmd->sync_cmd_idx = mfi_cmd->cm_index;
1105 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1106 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd))
1108 req_desc->header.SMID = index;
1112 union mfi_mpi2_request_descriptor *
1113 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1115 union mfi_mpi2_request_descriptor *req_desc = NULL;
1117 if (mfi_build_mpt_pass_thru(sc, cmd)) {
1118 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1122 /* For fusion the frame_count variable is used for SMID */
1123 index = cmd->cm_extra_frames;
1125 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1129 bzero(req_desc, sizeof(*req_desc));
1130 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1131 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1132 req_desc->header.SMID = index;
1137 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1139 struct mfi_frame_header *hdr;
1141 union mfi_mpi2_request_descriptor *req_desc = NULL;
1142 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1144 hdr = &cm->cm_frame->header;
1145 cdb = cm->cm_frame->pass.cdb;
1148 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1149 cm->cm_timestamp = time_uptime;
1150 mfi_enqueue_busy(cm);
1153 hdr->cmd_status = 0xff;
1154 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1157 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1158 /* check for inquiry commands coming from CLI */
1159 if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1160 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1162 device_printf(sc->mfi_dev, "Mapping from MFI "
1163 "to MPT Failed \n");
1168 device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1170 else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1171 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1172 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1173 device_printf(sc->mfi_dev, "LDIO Failed \n");
1177 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1178 device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
1182 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1183 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1185 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1188 /* This is a polled command, so busy-wait for it to complete. */
1189 while (hdr->cmd_status == 0xff) {
1196 if (hdr->cmd_status == 0xff) {
1197 device_printf(sc->mfi_dev, "Frame %p timed out "
1198 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1204 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc)
1206 struct mfi_command *cm, *tmp;
1208 mfi_lockassert(&sc->mfi_io_lock);
1209 TAILQ_FOREACH_REVERSE_MUTABLE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1211 cm->retry_for_fw_reset++;
1214 * If a command has continuously been tried multiple times
1215 * and causing a FW reset condition, no further recoveries
1216 * should be performed on the controller
1218 if (cm->retry_for_fw_reset == 3) {
1219 device_printf(sc->mfi_dev, "megaraid_sas: command %d "
1220 "was tried multiple times during adapter reset"
1221 "Shutting down the HBA\n", cm->cm_index);
1223 sc->hw_crit_error = 1;
1227 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
1228 struct mfi_cmd_tbolt *cmd;
1229 mfi_remove_busy(cm);
1230 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
1232 mfi_tbolt_return_cmd(sc, cmd);
1233 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
1234 if (cm->cm_frame->dcmd.opcode !=
1235 MFI_DCMD_CTRL_EVENT_WAIT) {
1236 device_printf(sc->mfi_dev,
1237 "APJ ****requeue command %d \n",
1239 mfi_requeue_ready(cm);
1243 mfi_release_command(cm);
1249 static void mfi_kill_hba (struct mfi_softc *sc)
1251 if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1252 MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
1254 MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
1257 static void mfi_process_fw_state_chg_isr(void *arg)
1259 struct mfi_softc *sc= (struct mfi_softc *)arg;
1260 struct mfi_cmd_tbolt *cmd;
1263 if (sc->adpreset == 1) {
1264 device_printf(sc->mfi_dev, "First stage of FW reset "
1267 sc->mfi_adp_reset(sc);
1268 sc->mfi_enable_intr(sc);
1270 device_printf(sc->mfi_dev, "First stage of reset complete, "
1271 "second stage initiated...\n");
1275 /* waiting for about 20 second before start the second init */
1276 for (int wait = 0; wait < 20000; wait++)
1278 device_printf(sc->mfi_dev, "Second stage of FW reset "
1280 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1282 sc->mfi_disable_intr(sc);
1284 /* We expect the FW state to be READY */
1285 if (mfi_transition_firmware(sc)) {
1286 device_printf(sc->mfi_dev, "controller is not in "
1289 sc->hw_crit_error= 1;
1292 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
1295 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1297 sc->mfi_enable_intr(sc);
1299 kfree(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1300 mfi_remove_busy(sc->mfi_aen_cm);
1301 cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
1303 mfi_tbolt_return_cmd(sc, cmd);
1304 if (sc->mfi_aen_cm) {
1305 mfi_release_command(sc->mfi_aen_cm);
1306 sc->mfi_aen_cm = NULL;
1308 if (sc->map_update_cmd) {
1309 mfi_release_command(sc->map_update_cmd);
1310 sc->map_update_cmd = NULL;
1312 mfi_issue_pending_cmds_again(sc);
1315 * Issue pending command can result in adapter being marked
1316 * dead because of too many re-tries. Check for that
1317 * condition before clearing the reset condition on the FW
1319 if (!sc->hw_crit_error) {
1321 * Initiate AEN (Asynchronous Event Notification)
1323 mfi_aen_setup(sc, sc->last_seq_num);
1324 sc->issuepend_done = 1;
1325 device_printf(sc->mfi_dev, "second stage of reset "
1326 "complete, FW is ready now.\n");
1328 device_printf(sc->mfi_dev, "second stage of reset "
1329 "never completed, hba was marked offline.\n");
1332 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1333 "called with unhandled value:%d\n", sc->adpreset);
1335 lockmgr(&sc->mfi_io_lock, LK_RELEASE);