kernel: Remove some unused variables in RAID and disk drivers.
[dragonfly.git] / sys / dev / raid / mfi / mfi_tbolt.c
CommitLineData
590ba11d
SW
1/*-
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
4 * are met:
5 *
6 * Copyright 1994-2009 The FreeBSD Project.
7 * All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
30 *
31 * $FreeBSD: src/sys/dev/mfi/mfi_tbolt.c,v 1.00 2010/06/30 16:00:00 Bharat Gusain Exp $
32 * FreeBSD projects/head_mfi/ r232949
33 */
34
35#include "opt_mfi.h"
36
37#include <sys/param.h>
38#include <sys/types.h>
39#include <sys/kernel.h>
40#include <sys/bus.h>
41#include <sys/conf.h>
42#include <sys/bio.h>
43#include <sys/buf2.h>
44#include <sys/ioccom.h>
45#include <sys/eventhandler.h>
46#include <sys/callout.h>
47#include <sys/uio.h>
48#include <sys/sysctl.h>
49#include <sys/systm.h>
50#include <sys/malloc.h>
51
52#include <dev/raid/mfi/mfireg.h>
53#include <dev/raid/mfi/mfi_ioctl.h>
54#include <dev/raid/mfi/mfivar.h>
55
56struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
57union mfi_mpi2_request_descriptor *
58mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
59void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
60int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
61 struct mfi_cmd_tbolt *cmd);
62static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
63 struct mfi_cmd_tbolt *cmd);
64union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
65 *sc, struct mfi_command *cmd);
66uint8_t
67mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
68union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
69 *sc, struct mfi_command *mfi_cmd);
70int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd);
71void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
72 struct mfi_cmd_tbolt *cmd);
73static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
74 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
75static int mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command
76 *mfi_cmd, uint8_t *cdb);
77void
78map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
79 uint8_t ext_status);
80static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
81static void mfi_kill_hba (struct mfi_softc *sc);
82static void mfi_process_fw_state_chg_isr(void *arg);
83uint8_t mfi_tbolt_get_map_info(struct mfi_softc *sc);
84
85#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
86
87void
88mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
89{
90 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
91 MFI_READ4(sc, MFI_OMSK);
92}
93
94void
95mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
96{
97 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
98 MFI_READ4(sc, MFI_OMSK);
99}
100
101int32_t
102mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
103{
104 return MFI_READ4(sc, MFI_OSP0);
105}
106
107int32_t
108mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
109{
110 int32_t status, mfi_status = 0;
111
112 status = MFI_READ4(sc, MFI_OSTS);
113
114 if (status & 1) {
115 MFI_WRITE4(sc, MFI_OSTS, status);
116 MFI_READ4(sc, MFI_OSTS);
117 if (status & MFI_STATE_CHANGE_INTERRUPT) {
118 mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
119 }
120
121 return mfi_status;
122 }
123 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
124 return 1;
125
126 MFI_READ4(sc, MFI_OSTS);
127 return 0;
128}
129
130
131void
132mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
133 uint32_t frame_cnt)
134{
135 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
136 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
137 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
138 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
139}
140
141/**
142 * mfi_tbolt_adp_reset - For controller reset
143 * @regs: MFI register set
144 */
145int mfi_tbolt_adp_reset(struct mfi_softc *sc)
146{
147 int retry = 0, i = 0;
148 int HostDiag;
149
150 MFI_WRITE4(sc, MFI_WSR, 0xF);
151 MFI_WRITE4(sc, MFI_WSR, 4);
152 MFI_WRITE4(sc, MFI_WSR, 0xB);
153 MFI_WRITE4(sc, MFI_WSR, 2);
154 MFI_WRITE4(sc, MFI_WSR, 7);
155 MFI_WRITE4(sc, MFI_WSR, 0xD);
156
157 for (i = 0; i < 10000; i++) ;
158
159 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
160
161 while (!( HostDiag & DIAG_WRITE_ENABLE)) {
162 for (i = 0; i < 1000; i++);
163 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
164 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
165 "hostdiag=%x\n", retry, HostDiag);
166
167 if (retry++ >= 100)
168 return 1;
169 }
170
171 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
172
173 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
174
175 for (i=0; i < 10; i++) {
176 for (i = 0; i < 10000; i++);
177 }
178
179 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
180 while (HostDiag & DIAG_RESET_ADAPTER) {
181 for (i = 0; i < 1000; i++) ;
182 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
183 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
184 "hostdiag=%x\n", retry, HostDiag);
185
186 if (retry++ >= 1000)
187 return 1;
188 }
189 return 0;
190}
191
192/*
193 *******************************************************************************************
194 * Description:
195 * This routine initialize Thunderbolt specific device information
196 *******************************************************************************************
197 */
198void mfi_tbolt_init_globals(struct mfi_softc *sc)
199{
200 /* Initialize single reply size and Message size */
201 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
202 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
203
204 /*
205 * Calculating how many SGEs allowed in a allocated main message
206 * (size of the Message - Raid SCSI IO message size(except SGE))
207 * / size of SGE
208 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
209 */
210 sc->max_SGEs_in_main_message =
211 (uint8_t)((sc->raid_io_msg_size
212 - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
213 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
214 /*
215 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
216 * / size of SGL ;
217 * (1280 - 256) / 16 = 64
218 */
219 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
220 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
221 /*
222 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command
223 * colscing
224 */
225 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
226 + sc->max_SGEs_in_chain_message - 1;
227 /*
228 * This is the offset in number of 4 * 32bit words to the next chain
229 * (0x100 - 0x10)/0x10 = 0xF(15)
230 */
231 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
232 - sizeof(MPI2_SGE_IO_UNION))/16;
233 sc->chain_offset_value_for_mpt_ptmsg
234 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
235 sc->mfi_cmd_pool_tbolt = NULL;
236 sc->request_desc_pool = NULL;
237}
238
239/*
240 ****************************************************************************
241 * Description:
242 * This function calculates the memory requirement for Thunderbolt
243 * controller
244 * Return Value:
245 * Total required memory in bytes
246 ****************************************************************************
247 */
248
249uint32_t mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
250{
251 uint32_t size;
252
253 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */
254 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
255 size += sc->reply_size * sc->mfi_max_fw_cmds;
256 /* this is for SGL's */
257 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
258 return size;
259}
260
261/*
262 ****************************************************************************
263 * Description:
264 * This function will prepare message pools for the Thunderbolt controller
265 * Arguments:
266 * DevExt - HBA miniport driver's adapter data storage structure
267 * pMemLocation - start of the memory allocated for Thunderbolt.
268 * Return Value:
269 * TRUE if successful
270 * FALSE if failed
271 ****************************************************************************
272 */
273int mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
274 uint32_t tbolt_contg_length)
275{
276 uint32_t offset = 0;
277 uint8_t *addr = mem_location;
278
279 /* Request Descriptor Base physical Address */
280
281 /* For Request Decriptors Virtual Memory */
282 /* Initialise the aligned IO Frames Virtual Memory Pointer */
283 if (((uintptr_t)addr) & (0xFF)) {
284 addr = &addr[sc->raid_io_msg_size];
285 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
286 sc->request_message_pool_align = addr;
287 } else
288 sc->request_message_pool_align = addr;
289
290 offset = sc->request_message_pool_align - sc->request_message_pool;
291 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
292
293 /* DJA XXX should this be bus dma ??? */
294 /* Skip request message pool */
295 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
296 /* Reply Frame Pool is initialized */
297 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
298 if (((uintptr_t)addr) & (0xFF)) {
299 addr = &addr[sc->reply_size];
300 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
301 }
302 sc->reply_frame_pool_align
303 = (struct mfi_mpi2_reply_header *)addr;
304
305 offset = (uintptr_t)sc->reply_frame_pool_align
306 - (uintptr_t)sc->request_message_pool;
307 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
308
309 /* Skip Reply Frame Pool */
310 addr += sc->reply_size * sc->mfi_max_fw_cmds;
311 sc->reply_pool_limit = addr;
312
313 /* initializing reply address to 0xFFFFFFFF */
314 memset((uint8_t *)sc->reply_frame_pool, 0xFF,
315 (sc->reply_size * sc->mfi_max_fw_cmds));
316
317 offset = sc->reply_size * sc->mfi_max_fw_cmds;
318 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
319 /* initialize the last_reply_idx to 0 */
320 sc->last_reply_idx = 0;
321 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
322 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
323 if (offset > tbolt_contg_length)
324 device_printf(sc->mfi_dev, "Error:Initialized more than "
325 "allocated\n");
326 return 0;
327}
328
329/*
330 ****************************************************************************
331 * Description:
332 * This routine prepare and issue INIT2 frame to the Firmware
333 ****************************************************************************
334 */
335
336int
337mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
338{
339 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit;
340 struct mfi_init_frame *mfi_init;
341 uintptr_t offset = 0;
342 bus_addr_t phyAddress;
343 MFI_ADDRESS *mfiAddressTemp;
344 struct mfi_command *cm;
345 int error;
346
347 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
348 /* Check if initialization is already completed */
349 if (sc->MFA_enabled) {
350 return 1;
351 }
352
353 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
354 if ((cm = mfi_dequeue_free(sc)) == NULL) {
355 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
356 return (EBUSY);
357 }
358 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
359 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
360 cm->cm_dmamap = sc->mfi_tb_init_dmamap;
361 cm->cm_frame->header.context = 0;
362 cm->cm_sc = sc;
363 cm->cm_index = 0;
364
365 /*
366 * Abuse the SG list area of the frame to hold the init_qinfo
367 * object;
368 */
369 mfi_init = &cm->cm_frame->init;
370
371 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
372 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
373 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
374
375 /* set MsgVersion and HeaderVersion host driver was built with */
376 mpi2IocInit->MsgVersion = MPI2_VERSION;
377 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
378 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
379 mpi2IocInit->ReplyDescriptorPostQueueDepth
380 = (uint16_t)sc->mfi_max_fw_cmds;
381 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
382
383 /* Get physical address of reply frame pool */
384 offset = (uintptr_t) sc->reply_frame_pool_align
385 - (uintptr_t)sc->request_message_pool;
386 phyAddress = sc->mfi_tb_busaddr + offset;
387 mfiAddressTemp =
388 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
389 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
390 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
391
392 /* Get physical address of request message pool */
393 offset = sc->request_message_pool_align - sc->request_message_pool;
394 phyAddress = sc->mfi_tb_busaddr + offset;
395 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
396 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
397 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
398 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
399 mpi2IocInit->TimeStamp = time_second;
400
401 if (sc->verbuf) {
402 ksnprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
403 MEGASAS_VERSION);
404 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
405 mfi_init->driver_ver_hi =
406 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
407 }
408 /* Get the physical address of the mpi2 ioc init command */
409 phyAddress = sc->mfi_tb_ioc_init_busaddr;
410 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
411 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
412 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
413
414 mfi_init->header.cmd = MFI_CMD_INIT;
415 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
416 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
417
418 cm->cm_data = NULL;
419 cm->cm_flags |= MFI_CMD_POLLED;
420 cm->cm_timestamp = time_second;
421 if ((error = mfi_mapcmd(sc, cm)) != 0) {
422 device_printf(sc->mfi_dev, "failed to send IOC init2 "
423 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
424 mfi_release_command(cm);
425 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
426 return (error);
427 }
428 mfi_release_command(cm);
429 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
430
431 if (mfi_init->header.cmd_status == 0) {
432 sc->MFA_enabled = 1;
433 }
434 else {
435 device_printf(sc->mfi_dev, "Init command Failed %x\n",
436 mfi_init->header.cmd_status);
437 return 1;
438 }
439
440 return 0;
441
442}
443
444int mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
445{
446 struct mfi_cmd_tbolt *cmd;
447 bus_addr_t io_req_base_phys;
448 uint8_t *io_req_base;
449 int i = 0, j = 0, offset = 0;
450
451 /*
452 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
453 * Allocate the dynamic array first and then allocate individual
454 * commands.
455 */
456 sc->request_desc_pool = kmalloc(sizeof(
457 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
458 M_MFIBUF, M_NOWAIT|M_ZERO);
459 sc->mfi_cmd_pool_tbolt = kmalloc(sizeof(struct mfi_cmd_tbolt*)
460 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
461
462 if (!sc->mfi_cmd_pool_tbolt) {
463 device_printf(sc->mfi_dev, "out of memory. Could not alloc "
464 "memory for cmd_list_fusion\n");
465 return 1;
466 }
467
468 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
469 sc->mfi_cmd_pool_tbolt[i] = kmalloc(sizeof(
470 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
471
472 if (!sc->mfi_cmd_pool_tbolt[i]) {
473 device_printf(sc->mfi_dev, "Could not alloc cmd list "
474 "fusion\n");
475
476 for (j = 0; j < i; j++)
477 kfree(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
478
479 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
480 sc->mfi_cmd_pool_tbolt = NULL;
481 }
482 }
483
484 /*
485 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
486 *list
487 */
488 io_req_base = sc->request_message_pool_align
489 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
490 io_req_base_phys = sc->request_msg_busaddr
491 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
492
493 /*
494 * Add all the commands to command pool (instance->cmd_pool)
495 */
496 /* SMID 0 is reserved. Set SMID/index from 1 */
497
498 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
499 cmd = sc->mfi_cmd_pool_tbolt[i];
500 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
501 cmd->index = i + 1;
502 cmd->request_desc = (union mfi_mpi2_request_descriptor *)
503 (sc->request_desc_pool + i);
504 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
505 (io_req_base + offset);
506 cmd->io_request_phys_addr = io_req_base_phys + offset;
507 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
508 + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
509 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
510 * MEGASAS_MAX_SZ_CHAIN_FRAME;
511
512 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
513 }
514 return 0;
515}
516
517int mfi_tbolt_reset(struct mfi_softc *sc)
518{
519 uint32_t fw_state;
520
521 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
522 if (atomic_read(&sc->fw_reset_no_pci_access)) {
523 device_printf(sc->mfi_dev, "NO PCI ACCESS\n");
524 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
525 return 1;
526 }
527
528 if (sc->hw_crit_error) {
529 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
530 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
531 return 1;
532 }
533
534 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
535 fw_state = sc->mfi_read_fw_status(sc);
536 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT) {
537 if ((sc->disableOnlineCtrlReset == 0)
538 && (sc->adpreset == 0)) {
539 device_printf(sc->mfi_dev, "Adapter RESET "
540 "condition is detected\n");
541 sc->adpreset = 1;
542 sc->issuepend_done = 0;
543 sc->MFA_enabled = 0;
544 sc->last_reply_idx = 0;
4e1af74f 545 mfi_process_fw_state_chg_isr(sc);
590ba11d
SW
546 }
547 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
548 return 0;
549 }
550 }
551 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
552 return 1;
553}
554
555/*
556 * mfi_intr_tbolt - isr entry point
557 */
558void mfi_intr_tbolt(void *arg)
559{
560 struct mfi_softc *sc = (struct mfi_softc *)arg;
561
562 if (sc->mfi_check_clear_intr(sc) == 1) {
563 return;
564 }
565 if (sc->mfi_detaching)
566 return;
567 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
568 mfi_tbolt_complete_cmd(sc);
569 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
570 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
571 mfi_startio(sc);
572 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
573 return;
574}
575
576/**
577 * map_cmd_status - Maps FW cmd status to OS cmd status
578 * @cmd : Pointer to cmd
579 * @status : status of cmd returned by FW
580 * @ext_status : ext status of cmd returned by FW
581 */
582
583void
584map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
585 uint8_t ext_status)
586{
587
588 switch (status) {
589
590 case MFI_STAT_OK:
591 mfi_cmd->cm_frame->header.cmd_status = 0;
592 mfi_cmd->cm_frame->dcmd.header.cmd_status = 0;
593 break;
594
595 case MFI_STAT_SCSI_IO_FAILED:
596 case MFI_STAT_LD_INIT_IN_PROGRESS:
597 mfi_cmd->cm_frame->header.cmd_status = status;
598 mfi_cmd->cm_frame->header.scsi_status = ext_status;
599 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
600 mfi_cmd->cm_frame->dcmd.header.scsi_status
601 = ext_status;
602 break;
603
604 case MFI_STAT_SCSI_DONE_WITH_ERROR:
605 mfi_cmd->cm_frame->header.cmd_status = ext_status;
606 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
607 break;
608
609 case MFI_STAT_LD_OFFLINE:
610 case MFI_STAT_DEVICE_NOT_FOUND:
611 mfi_cmd->cm_frame->header.cmd_status = status;
612 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
613 break;
614
615 default:
616 mfi_cmd->cm_frame->header.cmd_status = status;
617 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
618 break;
619 }
620}
621
622/**
623 * mfi_tbolt_return_cmd - Return a cmd to free command pool
624 * @instance: Adapter soft state
625 * @cmd: Command packet to be returned to free command pool
626 */
627static inline void
628mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
629{
148e9c0b 630 mfi_lockassert(&sc->mfi_io_lock);
590ba11d
SW
631
632 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
633}
634
635void mfi_tbolt_complete_cmd(struct mfi_softc *sc)
636{
637 struct mfi_mpi2_reply_header *desc, *reply_desc;
638 struct mfi_command *cmd_mfi; /* For MFA Cmds */
639 struct mfi_cmd_tbolt *cmd_tbolt;
640 uint16_t smid;
641 uint8_t reply_descript_type;
642 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req;
643 uint32_t status, extStatus;
644 uint16_t num_completed;
645 union desc_value val;
646
647 desc = (struct mfi_mpi2_reply_header *)
648 ((uintptr_t)sc->reply_frame_pool_align
649 + sc->last_reply_idx * sc->reply_size);
650 reply_desc = desc;
651
652 if (!reply_desc)
653 device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
654
655 reply_descript_type = reply_desc->ReplyFlags
656 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
657 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
658 return;
659
660 num_completed = 0;
661 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
662
663 /* Read Reply descriptor */
664 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
665
666 smid = reply_desc->SMID;
667 if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
668 device_printf(sc->mfi_dev, "smid is %x. Cannot "
669 "proceed. Returning \n", smid);
670 return;
671 }
672
673 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
674 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
675 scsi_io_req = cmd_tbolt->io_request;
676
677 /* Check if internal commands */
678 status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
679 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
680
681 switch (scsi_io_req->Function) {
682 case MPI2_FUNCTION_LD_IO_REQUEST:
683 /* Regular Path IO. */
684 /* Map the Fw Error Status. */
685 map_tbolt_cmd_status(cmd_mfi, status,
686 extStatus);
687 if ((cmd_mfi->cm_frame->dcmd.opcode
688 == MFI_DCMD_LD_MAP_GET_INFO)
689 && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
690 if (cmd_mfi->cm_frame->header.cmd_status
691 != 0)
692 device_printf(sc->mfi_dev,
693 "map sync failed\n");
694 else {
695 sc->map_id++;
696 device_printf(sc->mfi_dev,
697 "map sync completed\n");
698 mfi_release_command(cmd_mfi);
699 }
700 }
701 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
702 == MFI_ON_MFIQ_BUSY
703 && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
704 /* BHARAT poll workaround */
705 mfi_remove_busy(cmd_mfi);
706 cmd_mfi->cm_error = 0;
707 mfi_complete(sc, cmd_mfi);
708 }
709 mfi_tbolt_return_cmd(sc, cmd_tbolt);
710 break;
711 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:
712 map_tbolt_cmd_status(cmd_mfi, status, extStatus);
713 if ((cmd_mfi->cm_frame->dcmd.opcode
714 == MFI_DCMD_LD_MAP_GET_INFO)
715 && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
716 if (cmd_mfi->cm_frame->header.cmd_status != 0)
717 device_printf(sc->mfi_dev,
718 "map sync failed\n");
719 else {
720 sc->map_id++;
721 device_printf(sc->mfi_dev,
722 "map sync completed\n");
723 mfi_release_command(cmd_mfi);
724 }
725 }
726 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
727 == MFI_ON_MFIQ_BUSY
728 && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
729 /* BHARAT poll workaround */
730 mfi_remove_busy(cmd_mfi);
731 cmd_mfi->cm_error = 0;
732 mfi_complete(sc, cmd_mfi);
733 }
734 mfi_tbolt_return_cmd(sc, cmd_tbolt);
735 break;
736 }
737
738 sc->last_reply_idx++;
739 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
740 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
741 sc->last_reply_idx = 0;
742 }
743 /*set it back to all 0xfff.*/
744 ((union mfi_mpi2_reply_descriptor*)desc)->words =
745 ~((uint64_t)0x00);
746
747 num_completed++;
748
749 /* Get the next reply descriptor */
750 desc = (struct mfi_mpi2_reply_header *)
751 ((uintptr_t)sc->reply_frame_pool_align
752 + sc->last_reply_idx * sc->reply_size);
753 reply_desc = desc;
754 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
755 reply_descript_type = reply_desc->ReplyFlags
756 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
757 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
758 break;
759 }
760
761 if (!num_completed)
762 return;
763
764 /* update replyIndex to FW */
765 if (sc->last_reply_idx)
766 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
767
768 return;
769}
770
771/**
772 * mfi_get_cmd - Get a command from the free pool
773 * @instance: Adapter soft state
774 *
775 * Returns a free command from the pool
776 */
777
778struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc
779 *sc)
780{
781 struct mfi_cmd_tbolt *cmd = NULL;
782
148e9c0b 783 mfi_lockassert(&sc->mfi_io_lock);
590ba11d
SW
784
785 cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
786 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
787 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
788 memset((uint8_t *)cmd->io_request, 0,
789 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
790 return cmd;
791}
792
793union mfi_mpi2_request_descriptor *
794mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
795{
796 uint8_t *p;
797
798 if (index >= sc->mfi_max_fw_cmds) {
799 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
800 "for descriptor\n", index);
801 return NULL;
802 }
803 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
804 * index;
805 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
806 return (union mfi_mpi2_request_descriptor *)p;
807}
808
809
810/* Used to build IOCTL cmd */
811uint8_t
812mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
813{
814 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
815 struct mfi_mpi2_request_raid_scsi_io *io_req;
816 struct mfi_cmd_tbolt *cmd;
817
818 cmd = mfi_tbolt_get_cmd(sc);
819 if (!cmd)
820 return EBUSY;
821 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
822 cmd->sync_cmd_idx = mfi_cmd->cm_index;
823 io_req = cmd->io_request;
824 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
825
826 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
827 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
828 SGL) / 4;
829 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
830
831 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
832
833 /*
834 In MFI pass thru, nextChainOffset will always be zero to
835 indicate the end of the chain.
836 */
837 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
838 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
839
840 /* setting the length to the maximum length */
841 mpi25_ieee_chain->Length = 1024;
842
843 return 0;
844}
845
846void
847mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
848 struct mfi_cmd_tbolt *cmd)
849{
850 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
851 struct mfi_mpi2_request_raid_scsi_io *io_request;
852 struct IO_REQUEST_INFO io_info;
853
854 device_id = mfi_cmd->cm_frame->io.header.target_id;
855 io_request = cmd->io_request;
856 io_request->RaidContext.TargetID = device_id;
857 io_request->RaidContext.Status = 0;
858 io_request->RaidContext.exStatus =0;
859
860 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
861 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
862
863 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
864 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
865 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
866 io_info.ldTgtId = device_id;
867 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
868 MFI_FRAME_DIR_READ)
869 io_info.isRead = 1;
870
c5b95a56
SW
871 io_request->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
872 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
873 io_request->DevHandle = device_id;
874 cmd->request_desc->header.RequestFlags =
875 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
590ba11d
SW
876 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
877 io_request->RaidContext.RegLockLength = 0x100;
878 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
879 * MFI_SECTOR_LEN;
880}
881
882int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd)
883{
884 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
885 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
886 return 1;
887 else
888 return 0;
889}
890
891int
892mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, struct mfi_cmd_tbolt *cmd)
893{
590ba11d
SW
894 uint32_t sge_count;
895 uint8_t cdb[32], cdb_len;
896
897 memset(cdb, 0, 32);
898 struct mfi_mpi2_request_raid_scsi_io *io_request = cmd->io_request;
899
590ba11d
SW
900 /* Have to build CDB here for TB as BSD don't have a scsi layer */
901 if ((cdb_len = mfi_tbolt_build_cdb(sc, mfi_cmd, cdb)) == 1)
902 return 1;
903
904 /* Just the CDB length,rest of the Flags are zero */
905 io_request->IoFlags = cdb_len;
906 memcpy(io_request->CDB.CDB32, cdb, 32);
907
908 if (mfi_tbolt_is_ldio(mfi_cmd))
909 mfi_tbolt_build_ldio(sc, mfi_cmd , cmd);
910 else
911 return 1;
912
913 /*
914 * Construct SGL
915 */
916 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
917 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
918 if (sge_count > sc->mfi_max_sge) {
919 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
920 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
921 return 1;
922 }
923 io_request->RaidContext.numSGE = sge_count;
924 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
925
926 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
927 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
928 else
929 io_request->Control = MPI2_SCSIIO_CONTROL_READ;
930
931 io_request->SGLOffset0 = offsetof(
932 struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
933
934 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
935 io_request->SenseBufferLength = MFI_SENSE_LEN;
936 return 0;
937}
938
939static int
940mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
941 uint8_t *cdb)
942{
943 uint32_t lba_lo, lba_hi, num_lba;
944 uint8_t cdb_len;
945
946 if (mfi_cmd == NULL || cdb == NULL)
947 return 1;
948 num_lba = mfi_cmd->cm_frame->io.header.data_len;
949 lba_lo = mfi_cmd->cm_frame->io.lba_lo;
950 lba_hi = mfi_cmd->cm_frame->io.lba_hi;
951
952 if ((num_lba <= 0xFF) && (lba_lo <= 0x1FFFFF)) {
953 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
954 /* Read 6 or Write 6 */
955 cdb[0] = (uint8_t) (0x0A);
956 else
957 cdb[0] = (uint8_t) (0x08);
958
959 cdb[4] = (uint8_t) num_lba;
960 cdb[3] = (uint8_t) (lba_lo & 0xFF);
961 cdb[2] = (uint8_t) (lba_lo >> 8);
962 cdb[1] = (uint8_t) ((lba_lo >> 16) & 0x1F);
963 cdb_len = 6;
964 } else if ((num_lba <= 0xFFFF) && (lba_lo <= 0xFFFFFFFF)) {
965 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
966 /* Read 10 or Write 10 */
967 cdb[0] = (uint8_t) (0x2A);
968 else
969 cdb[0] = (uint8_t) (0x28);
970 cdb[8] = (uint8_t) (num_lba & 0xFF);
971 cdb[7] = (uint8_t) (num_lba >> 8);
972 cdb[5] = (uint8_t) (lba_lo & 0xFF);
973 cdb[4] = (uint8_t) (lba_lo >> 8);
974 cdb[3] = (uint8_t) (lba_lo >> 16);
975 cdb[2] = (uint8_t) (lba_lo >> 24);
976 cdb_len = 10;
977 } else if ((num_lba > 0xFFFF) && (lba_hi == 0)) {
978 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
979 /* Read 12 or Write 12 */
980 cdb[0] = (uint8_t) (0xAA);
981 else
982 cdb[0] = (uint8_t) (0xA8);
983 cdb[9] = (uint8_t) (num_lba & 0xFF);
984 cdb[8] = (uint8_t) (num_lba >> 8);
985 cdb[7] = (uint8_t) (num_lba >> 16);
986 cdb[6] = (uint8_t) (num_lba >> 24);
987 cdb[5] = (uint8_t) (lba_lo & 0xFF);
988 cdb[4] = (uint8_t) (lba_lo >> 8);
989 cdb[3] = (uint8_t) (lba_lo >> 16);
990 cdb[2] = (uint8_t) (lba_lo >> 24);
991 cdb_len = 12;
992 } else {
993 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
994 cdb[0] = (uint8_t) (0x8A);
995 else
996 cdb[0] = (uint8_t) (0x88);
997 cdb[13] = (uint8_t) (num_lba & 0xFF);
998 cdb[12] = (uint8_t) (num_lba >> 8);
999 cdb[11] = (uint8_t) (num_lba >> 16);
1000 cdb[10] = (uint8_t) (num_lba >> 24);
1001 cdb[9] = (uint8_t) (lba_lo & 0xFF);
1002 cdb[8] = (uint8_t) (lba_lo >> 8);
1003 cdb[7] = (uint8_t) (lba_lo >> 16);
1004 cdb[6] = (uint8_t) (lba_lo >> 24);
1005 cdb[5] = (uint8_t) (lba_hi & 0xFF);
1006 cdb[4] = (uint8_t) (lba_hi >> 8);
1007 cdb[3] = (uint8_t) (lba_hi >> 16);
1008 cdb[2] = (uint8_t) (lba_hi >> 24);
1009 cdb_len = 16;
1010 }
1011 return cdb_len;
1012}
1013
1014static int
1015mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
1016 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
1017{
489fe090 1018 uint8_t i, sg_processed;
590ba11d
SW
1019 uint8_t sge_count, sge_idx;
1020 union mfi_sgl *os_sgl;
1021
1022 /*
1023 * Return 0 if there is no data transfer
1024 */
1025 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
1026 device_printf(sc->mfi_dev, "Buffer empty \n");
1027 return 0;
1028 }
1029 os_sgl = mfi_cmd->cm_sg;
1030 sge_count = mfi_cmd->cm_frame->header.sg_count;
1031
1032 if (sge_count > sc->mfi_max_sge) {
1033 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
1034 os_sgl, sge_count);
1035 return sge_count;
1036 }
1037
1038 if (sge_count > sc->max_SGEs_in_main_message)
1039 /* One element to store the chain info */
1040 sge_idx = sc->max_SGEs_in_main_message - 1;
1041 else
1042 sge_idx = sge_count;
1043
1044 for (i = 0; i < sge_idx; i++) {
1045 /*
1046 * For 32bit BSD we are getting 32 bit SGL's from OS
1047 * but FW only take 64 bit SGL's so copying from 32 bit
1048 * SGL's to 64.
1049 */
1050 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1051 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1052 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1053 } else {
1054 sgl_ptr->Length = os_sgl->sg32[i].len;
1055 sgl_ptr->Address = os_sgl->sg32[i].addr;
1056 }
1057 sgl_ptr->Flags = 0;
1058 sgl_ptr++;
1059 cmd->io_request->ChainOffset = 0;
1060 }
1061
1062 sg_processed = i;
1063
1064 if (sg_processed < sge_count) {
1065 pMpi25IeeeSgeChain64_t sg_chain;
590ba11d
SW
1066 cmd->io_request->ChainOffset =
1067 sc->chain_offset_value_for_main_message;
1068 sg_chain = sgl_ptr;
1069 /* Prepare chain element */
1070 sg_chain->NextChainOffset = 0;
1071 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1072 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1073 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) *
1074 (sge_count - sg_processed));
1075 sg_chain->Address = cmd->sg_frame_phys_addr;
1076 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1077 for (; i < sge_count; i++) {
1078 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1079 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1080 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1081 } else {
1082 sgl_ptr->Length = os_sgl->sg32[i].len;
1083 sgl_ptr->Address = os_sgl->sg32[i].addr;
1084 }
1085 sgl_ptr->Flags = 0;
1086 sgl_ptr++;
1087 }
1088 }
1089 return sge_count;
1090}
1091
1092union mfi_mpi2_request_descriptor *
1093mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1094{
1095 struct mfi_cmd_tbolt *cmd;
1096 union mfi_mpi2_request_descriptor *req_desc = NULL;
1097 uint16_t index;
1098 cmd = mfi_tbolt_get_cmd(sc);
1099 if (!cmd)
1100 return NULL;
1101 mfi_cmd->cm_extra_frames = cmd->index;
1102 cmd->sync_cmd_idx = mfi_cmd->cm_index;
1103
1104 index = cmd->index;
1105 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1106 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd))
1107 return NULL;
1108 req_desc->header.SMID = index;
1109 return req_desc;
1110}
1111
1112union mfi_mpi2_request_descriptor *
1113mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1114{
1115 union mfi_mpi2_request_descriptor *req_desc = NULL;
1116 uint16_t index;
1117 if (mfi_build_mpt_pass_thru(sc, cmd)) {
1118 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1119 "cmd\n");
1120 return NULL;
1121 }
1122 /* For fusion the frame_count variable is used for SMID */
1123 index = cmd->cm_extra_frames;
1124
1125 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1126 if (!req_desc)
1127 return NULL;
1128
277dbd16 1129 bzero(req_desc, sizeof(*req_desc));
590ba11d
SW
1130 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1131 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1132 req_desc->header.SMID = index;
1133 return req_desc;
1134}
1135
1136int
1137mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1138{
1139 struct mfi_frame_header *hdr;
1140 uint8_t *cdb;
1141 union mfi_mpi2_request_descriptor *req_desc = NULL;
1142 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1143
1144 hdr = &cm->cm_frame->header;
1145 cdb = cm->cm_frame->pass.cdb;
1146 if (sc->adpreset)
1147 return 1;
1148 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1149 cm->cm_timestamp = time_second;
1150 mfi_enqueue_busy(cm);
1151 }
1152 else {
1153 hdr->cmd_status = 0xff;
1154 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1155 }
1156
1157 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1158 /* check for inquiry commands coming from CLI */
1159 if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1160 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1161 NULL) {
1162 device_printf(sc->mfi_dev, "Mapping from MFI "
1163 "to MPT Failed \n");
1164 return 1;
1165 }
1166 }
1167 else
1168 device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1169 }
1170 else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1171 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1172 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1173 device_printf(sc->mfi_dev, "LDIO Failed \n");
1174 return 1;
1175 }
1176 } else
1177 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1178 device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
1179 "Failed\n");
1180 return 1;
1181 }
1182 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1183 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1184
1185 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1186 return 0;
1187
1188 /* This is a polled command, so busy-wait for it to complete. */
1189 while (hdr->cmd_status == 0xff) {
1190 DELAY(1000);
1191 tm -= 1;
1192 if (tm <= 0)
1193 break;
1194 }
1195
1196 if (hdr->cmd_status == 0xff) {
1197 device_printf(sc->mfi_dev, "Frame %p timed out "
1198 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1199 return (ETIMEDOUT);
1200 }
1201 return 0;
1202}
1203
1204static void mfi_issue_pending_cmds_again (struct mfi_softc *sc)
1205{
1206 struct mfi_command *cm, *tmp;
1207
148e9c0b 1208 mfi_lockassert(&sc->mfi_io_lock);
590ba11d
SW
1209 TAILQ_FOREACH_REVERSE_MUTABLE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1210
1211 cm->retry_for_fw_reset++;
1212
1213 /*
1214 * If a command has continuously been tried multiple times
1215 * and causing a FW reset condition, no further recoveries
1216 * should be performed on the controller
1217 */
1218 if (cm->retry_for_fw_reset == 3) {
1219 device_printf(sc->mfi_dev, "megaraid_sas: command %d "
1220 "was tried multiple times during adapter reset"
1221 "Shutting down the HBA\n", cm->cm_index);
1222 mfi_kill_hba(sc);
1223 sc->hw_crit_error = 1;
1224 return;
1225 }
1226
1227 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
1228 struct mfi_cmd_tbolt *cmd;
1229 mfi_remove_busy(cm);
1230 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
1231 1 ];
1232 mfi_tbolt_return_cmd(sc, cmd);
1233 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
1234 if (cm->cm_frame->dcmd.opcode !=
1235 MFI_DCMD_CTRL_EVENT_WAIT) {
1236 device_printf(sc->mfi_dev,
1237 "APJ ****requeue command %d \n",
1238 cm->cm_index);
1239 mfi_requeue_ready(cm);
1240 }
1241 }
1242 else
1243 mfi_release_command(cm);
1244 }
1245 }
1246 mfi_startio(sc);
1247}
1248
1249static void mfi_kill_hba (struct mfi_softc *sc)
1250{
1251 if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1252 MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
1253 else
1254 MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
1255}
1256
1257static void mfi_process_fw_state_chg_isr(void *arg)
1258{
1259 struct mfi_softc *sc= (struct mfi_softc *)arg;
1260 struct mfi_cmd_tbolt *cmd;
1261 int error, status;
1262
1263 if (sc->adpreset == 1) {
1264 device_printf(sc->mfi_dev, "First stage of FW reset "
1265 "initiated...\n");
1266
1267 sc->mfi_adp_reset(sc);
1268 sc->mfi_enable_intr(sc);
1269
1270 device_printf(sc->mfi_dev, "First stage of reset complete, "
1271 "second stage initiated...\n");
1272
1273 sc->adpreset = 2;
1274
1275 /* waiting for about 20 second before start the second init */
1276 for (int wait = 0; wait < 20000; wait++)
1277 DELAY(1000);
1278 device_printf(sc->mfi_dev, "Second stage of FW reset "
1279 "initiated...\n");
1280 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1281
1282 sc->mfi_disable_intr(sc);
1283
1284 /* We expect the FW state to be READY */
1285 if (mfi_transition_firmware(sc)) {
1286 device_printf(sc->mfi_dev, "controller is not in "
1287 "ready state\n");
1288 mfi_kill_hba(sc);
1289 sc->hw_crit_error= 1;
1290 return ;
1291 }
1292 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
1293 return;
1294
1295 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1296
1297 sc->mfi_enable_intr(sc);
1298 sc->adpreset = 0;
1299 kfree(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1300 mfi_remove_busy(sc->mfi_aen_cm);
1301 cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
1302 - 1];
1303 mfi_tbolt_return_cmd(sc, cmd);
1304 if (sc->mfi_aen_cm) {
1305 mfi_release_command(sc->mfi_aen_cm);
1306 sc->mfi_aen_cm = NULL;
1307 }
1308 if (sc->map_update_cmd) {
1309 mfi_release_command(sc->map_update_cmd);
1310 sc->map_update_cmd = NULL;
1311 }
1312 mfi_issue_pending_cmds_again(sc);
1313
1314 /*
1315 * Issue pending command can result in adapter being marked
1316 * dead because of too many re-tries. Check for that
1317 * condition before clearing the reset condition on the FW
1318 */
1319 if (!sc->hw_crit_error) {
1320 /*
1321 * Initiate AEN (Asynchronous Event Notification)
1322 */
1323 mfi_aen_setup(sc, sc->last_seq_num);
1324 sc->issuepend_done = 1;
1325 device_printf(sc->mfi_dev, "second stage of reset "
1326 "complete, FW is ready now.\n");
1327 } else {
1328 device_printf(sc->mfi_dev, "second stage of reset "
1329 "never completed, hba was marked offline.\n");
1330 }
1331 } else {
1332 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1333 "called with unhandled value:%d\n", sc->adpreset);
1334 }
1335 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1336}