2 * Copyright (c) 2014, LSI Corp.
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
42 * $FreeBSD: head/sys/dev/mrsas/mrsas.c 265555 2014-05-07 16:16:49Z ambrisko $
45 #include <dev/raid/mrsas/mrsas.h>
46 #include <dev/raid/mrsas/mrsas_ioctl.h>
48 #include <bus/cam/cam.h>
49 #include <bus/cam/cam_ccb.h>
51 #include <sys/sysctl.h>
52 #include <sys/types.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/device.h>
56 #include <sys/spinlock2.h>
62 static d_open_t mrsas_open;
63 static d_close_t mrsas_close;
64 static d_read_t mrsas_read;
65 static d_write_t mrsas_write;
66 static d_ioctl_t mrsas_ioctl;
68 static struct mrsas_ident *mrsas_find_ident(device_t);
69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70 static void mrsas_flush_cache(struct mrsas_softc *sc);
71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72 static void mrsas_ocr_thread(void *arg);
73 static int mrsas_get_map_info(struct mrsas_softc *sc);
74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75 static int mrsas_sync_map_info(struct mrsas_softc *sc);
76 static int mrsas_get_pd_list(struct mrsas_softc *sc);
77 static int mrsas_get_ld_list(struct mrsas_softc *sc);
78 static int mrsas_setup_irq(struct mrsas_softc *sc);
79 static int mrsas_alloc_mem(struct mrsas_softc *sc);
80 static int mrsas_init_fw(struct mrsas_softc *sc);
81 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82 static int mrsas_complete_cmd(struct mrsas_softc *sc);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85 struct mrsas_ctrl_info *ctrl_info);
86 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87 struct mrsas_mfi_cmd *cmd_to_abort);
88 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89 u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90 struct mrsas_mfi_cmd *mfi_cmd);
91 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92 int mrsas_init_adapter(struct mrsas_softc *sc);
93 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96 int mrsas_ioc_init(struct mrsas_softc *sc);
97 int mrsas_bus_scan(struct mrsas_softc *sc);
98 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100 int mrsas_reset_ctrl(struct mrsas_softc *sc);
101 int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *cmd);
104 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
106 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 void mrsas_disable_intr(struct mrsas_softc *sc);
111 void mrsas_enable_intr(struct mrsas_softc *sc);
112 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113 void mrsas_free_mem(struct mrsas_softc *sc);
114 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115 void mrsas_isr(void *arg);
116 void mrsas_teardown_intr(struct mrsas_softc *sc);
117 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118 void mrsas_kill_hba (struct mrsas_softc *sc);
119 void mrsas_aen_handler(struct mrsas_softc *sc);
120 void mrsas_write_reg(struct mrsas_softc *sc, int offset,
122 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123 u_int32_t req_desc_hi);
124 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125 void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126 struct mrsas_mfi_cmd *cmd, u_int8_t status);
127 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
129 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130 MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131 struct mrsas_mfi_cmd *cmd);
133 extern int mrsas_cam_attach(struct mrsas_softc *sc);
134 extern void mrsas_cam_detach(struct mrsas_softc *sc);
135 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
143 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
144 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145 extern void mrsas_xpt_release(struct mrsas_softc *sc);
146 extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
148 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
155 * PCI device struct and table
158 typedef struct mrsas_ident {
166 MRSAS_CTLR_ID device_table[] = {
167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
174 * Character device entry points
177 static struct dev_ops mrsas_ops = {
178 { "mrsas", 0, D_MPSAFE },
179 .d_open = mrsas_open,
180 .d_close = mrsas_close,
181 .d_read = mrsas_read,
182 .d_write = mrsas_write,
183 .d_ioctl = mrsas_ioctl,
186 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
188 static int mrsas_mfi_enable = 0;
189 TUNABLE_INT("hw.mrsas.mfi_enable", &mrsas_mfi_enable);
191 static int mrsas_msi_enable = 1;
192 TUNABLE_INT("hw.mrsas.msi.enable", &mrsas_msi_enable);
195 * In the cdevsw routines, we find our softc by using the si_drv1 member
196 * of struct cdev. We set this variable to point to our softc in our
197 * attach routine when we create the /dev entry.
200 mrsas_open(struct dev_open_args *ap)
202 cdev_t dev = ap->a_head.a_dev;
203 struct mrsas_softc *sc;
210 mrsas_close(struct dev_close_args *ap)
212 cdev_t dev = ap->a_head.a_dev;
213 struct mrsas_softc *sc;
220 mrsas_read(struct dev_read_args *ap)
222 cdev_t dev = ap->a_head.a_dev;
223 struct mrsas_softc *sc;
229 mrsas_write(struct dev_write_args *ap)
231 cdev_t dev = ap->a_head.a_dev;
232 struct mrsas_softc *sc;
239 * Register Read/Write Functions
243 mrsas_write_reg(struct mrsas_softc *sc, int offset,
246 bus_space_tag_t bus_tag = sc->bus_tag;
247 bus_space_handle_t bus_handle = sc->bus_handle;
249 bus_space_write_4(bus_tag, bus_handle, offset, value);
253 mrsas_read_reg(struct mrsas_softc *sc, int offset)
255 bus_space_tag_t bus_tag = sc->bus_tag;
256 bus_space_handle_t bus_handle = sc->bus_handle;
258 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
263 * Interrupt Disable/Enable/Clear Functions
266 void mrsas_disable_intr(struct mrsas_softc *sc)
268 u_int32_t mask = 0xFFFFFFFF;
271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
272 /* Dummy read to force pci flush */
273 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
276 void mrsas_enable_intr(struct mrsas_softc *sc)
278 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
284 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
285 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
288 static int mrsas_clear_intr(struct mrsas_softc *sc)
290 u_int32_t status, fw_status, fw_state;
292 /* Read received interrupt */
293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
295 /* If FW state change interrupt is received, write to it again to clear */
296 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
297 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
298 outbound_scratch_pad));
299 fw_state = fw_status & MFI_STATE_MASK;
300 if (fw_state == MFI_STATE_FAULT) {
301 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
302 if(sc->ocr_thread_active)
303 wakeup(&sc->ocr_chan);
305 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
306 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
310 /* Not our interrupt, so just return */
311 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
314 /* We got a reply interrupt */
319 * PCI Support Functions
322 static struct mrsas_ident * mrsas_find_ident(device_t dev)
324 struct mrsas_ident *pci_device;
326 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
328 if ((pci_device->vendor == pci_get_vendor(dev)) &&
329 (pci_device->device == pci_get_device(dev)) &&
330 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
331 (pci_device->subvendor == 0xffff)) &&
332 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
333 (pci_device->subdevice == 0xffff)))
339 static int mrsas_probe(device_t dev)
341 static u_int8_t first_ctrl = 1;
342 struct mrsas_ident *id;
344 if ((id = mrsas_find_ident(dev)) != NULL) {
345 /* give priority to mfi(4) if tunable set */
346 TUNABLE_INT_FETCH("hw.mrsas.mfi_enable", &mrsas_mfi_enable);
347 if ((id->device == MRSAS_TBOLT) && mrsas_mfi_enable) {
351 kprintf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n",
355 device_set_desc(dev, id->desc);
356 return (BUS_PROBE_DEFAULT);
363 * mrsas_setup_sysctl: setup sysctl values for mrsas
364 * input: Adapter instance soft state
366 * Setup sysctl entries for mrsas driver.
369 mrsas_setup_sysctl(struct mrsas_softc *sc)
371 struct sysctl_ctx_list *sysctl_ctx = NULL;
372 struct sysctl_oid *sysctl_tree = NULL;
373 char tmpstr[80], tmpstr2[80];
376 * Setup the sysctl variable so the user can change the debug level
379 ksnprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
380 device_get_unit(sc->mrsas_dev));
381 ksnprintf(tmpstr2, sizeof(tmpstr2), "mrsas%d", device_get_unit(sc->mrsas_dev));
384 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
385 if (sysctl_ctx != NULL)
386 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
388 if (sysctl_tree == NULL) {
390 sysctl_ctx_init(&sc->sysctl_ctx);
391 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
392 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, tmpstr2,
393 CTLFLAG_RD, 0, tmpstr);
394 if (sc->sysctl_tree == NULL)
396 sysctl_ctx = &sc->sysctl_ctx;
397 sysctl_tree = sc->sysctl_tree;
401 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
402 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
403 "Disable the use of OCR");
405 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
407 strlen(MRSAS_VERSION), "driver version");
409 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 OID_AUTO, "reset_count", CTLFLAG_RD,
411 &sc->reset_count, 0, "number of ocr from start of the day");
413 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
415 &sc->fw_outstanding, 0, "FW outstanding commands");
417 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
419 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
421 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
423 "Driver debug level");
425 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
427 0, "Driver IO timeout value in mili-second.");
429 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
430 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
431 &sc->mrsas_fw_fault_check_delay,
432 0, "FW fault check thread delay in seconds. <default is 1 sec>");
434 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
435 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
436 &sc->reset_in_progress, 0, "ocr in progress status");
441 * mrsas_get_tunables: get tunable parameters.
442 * input: Adapter instance soft state
444 * Get tunable parameters. This will help to debug driver at boot time.
447 mrsas_get_tunables(struct mrsas_softc *sc)
451 /* XXX default to some debugging for now */
452 sc->mrsas_debug = MRSAS_FAULT;
453 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
454 sc->mrsas_fw_fault_check_delay = 1;
456 sc->reset_in_progress = 0;
459 * Grab the global variables.
461 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
463 /* Grab the unit-instance variables */
464 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
465 device_get_unit(sc->mrsas_dev));
466 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
470 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
471 * Used to get sequence number at driver load time.
472 * input: Adapter soft state
474 * Allocates DMAable memory for the event log info internal command.
476 int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
480 /* Allocate get event log info command */
481 el_info_size = sizeof(struct mrsas_evt_log_info);
482 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
483 1, 0, // algnmnt, boundary
484 BUS_SPACE_MAXADDR_32BIT,// lowaddr
485 BUS_SPACE_MAXADDR, // highaddr
486 NULL, NULL, // filter, filterarg
487 el_info_size, // maxsize
489 el_info_size, // maxsegsize
490 BUS_DMA_ALLOCNOW, // flags
492 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
495 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
496 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
497 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
500 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
501 sc->el_info_mem, el_info_size, mrsas_addr_cb,
502 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
503 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
507 memset(sc->el_info_mem, 0, el_info_size);
512 * mrsas_free_evt_info_cmd: Free memory for Event log info command
513 * input: Adapter soft state
515 * Deallocates memory for the event log info internal command.
517 void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
519 if (sc->el_info_phys_addr)
520 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
521 if (sc->el_info_mem != NULL)
522 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
523 if (sc->el_info_tag != NULL)
524 bus_dma_tag_destroy(sc->el_info_tag);
528 * mrsas_get_seq_num: Get latest event sequence number
529 * @sc: Adapter soft state
530 * @eli: Firmware event log sequence number information.
531 * Firmware maintains a log of all events in a non-volatile area.
532 * Driver get the sequence number using DCMD
533 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
537 mrsas_get_seq_num(struct mrsas_softc *sc,
538 struct mrsas_evt_log_info *eli)
540 struct mrsas_mfi_cmd *cmd;
541 struct mrsas_dcmd_frame *dcmd;
543 cmd = mrsas_get_mfi_cmd(sc);
546 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
550 dcmd = &cmd->frame->dcmd;
552 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
553 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
554 mrsas_release_mfi_cmd(cmd);
558 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
560 dcmd->cmd = MFI_CMD_DCMD;
561 dcmd->cmd_status = 0x0;
563 dcmd->flags = MFI_FRAME_DIR_READ;
566 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
567 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
568 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
569 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
571 mrsas_issue_blocked_cmd(sc, cmd);
574 * Copy the data back into callers buffer
576 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
577 mrsas_free_evt_log_info_cmd(sc);
578 mrsas_release_mfi_cmd(cmd);
585 * mrsas_register_aen: Register for asynchronous event notification
586 * @sc: Adapter soft state
587 * @seq_num: Starting sequence number
588 * @class_locale: Class of the event
589 * This function subscribes for events beyond the @seq_num
590 * and type @class_locale.
594 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
595 u_int32_t class_locale_word)
598 struct mrsas_mfi_cmd *cmd;
599 struct mrsas_dcmd_frame *dcmd;
600 union mrsas_evt_class_locale curr_aen;
601 union mrsas_evt_class_locale prev_aen;
604 * If there an AEN pending already (aen_cmd), check if the
605 * class_locale of that pending AEN is inclusive of the new
606 * AEN request we currently have. If it is, then we don't have
607 * to do anything. In other words, whichever events the current
608 * AEN request is subscribing to, have already been subscribed
610 * If the old_cmd is _not_ inclusive, then we have to abort
611 * that command, form a class_locale that is superset of both
612 * old and current and re-issue to the FW
615 curr_aen.word = class_locale_word;
619 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
622 * A class whose enum value is smaller is inclusive of all
623 * higher values. If a PROGRESS (= -1) was previously
624 * registered, then a new registration requests for higher
625 * classes need not be sent to FW. They are automatically
627 * Locale numbers don't have such hierarchy. They are bitmap values
629 if ((prev_aen.members.class <= curr_aen.members.class) &&
630 !((prev_aen.members.locale & curr_aen.members.locale) ^
631 curr_aen.members.locale)) {
633 * Previously issued event registration includes
634 * current request. Nothing to do.
638 curr_aen.members.locale |= prev_aen.members.locale;
640 if (prev_aen.members.class < curr_aen.members.class)
641 curr_aen.members.class = prev_aen.members.class;
643 sc->aen_cmd->abort_aen = 1;
644 ret_val = mrsas_issue_blocked_abort_cmd(sc,
648 kprintf("mrsas: Failed to abort "
649 "previous AEN command\n");
655 cmd = mrsas_get_mfi_cmd(sc);
660 dcmd = &cmd->frame->dcmd;
662 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
665 * Prepare DCMD for aen registration
667 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
669 dcmd->cmd = MFI_CMD_DCMD;
670 dcmd->cmd_status = 0x0;
672 dcmd->flags = MFI_FRAME_DIR_READ;
675 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
676 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
677 dcmd->mbox.w[0] = seq_num;
678 sc->last_seq_num = seq_num;
679 dcmd->mbox.w[1] = curr_aen.word;
680 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
681 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
683 if (sc->aen_cmd != NULL) {
684 mrsas_release_mfi_cmd(cmd);
689 * Store reference to the cmd used to register for AEN. When an
690 * application wants us to register for AEN, we have to abort this
691 * cmd and re-register with a new EVENT LOCALE supplied by that app
696 Issue the aen registration frame
698 if (mrsas_issue_dcmd(sc, cmd)){
699 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
706 * mrsas_start_aen - Subscribes to AEN during driver load time
707 * @instance: Adapter soft state
709 static int mrsas_start_aen(struct mrsas_softc *sc)
711 struct mrsas_evt_log_info eli;
712 union mrsas_evt_class_locale class_locale;
715 /* Get the latest sequence number from FW*/
717 memset(&eli, 0, sizeof(eli));
719 if (mrsas_get_seq_num(sc, &eli))
722 /* Register AEN with FW for latest sequence number plus 1*/
723 class_locale.members.reserved = 0;
724 class_locale.members.locale = MR_EVT_LOCALE_ALL;
725 class_locale.members.class = MR_EVT_CLASS_DEBUG;
727 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
732 * mrsas_attach: PCI entry point
733 * input: device struct pointer
735 * Performs setup of PCI and registers, initializes mutexes and
736 * linked lists, registers interrupts and CAM, and initializes
737 * the adapter/controller to its proper state.
739 static int mrsas_attach(device_t dev)
741 struct mrsas_softc *sc = device_get_softc(dev);
742 uint32_t cmd, bar, error;
744 /* Look up our softc and initialize its fields. */
746 sc->device_id = pci_get_device(dev);
748 mrsas_get_tunables(sc);
751 * Set up PCI and registers
753 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
754 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
757 /* Force the busmaster enable bit on. */
758 cmd |= PCIM_CMD_BUSMASTEREN;
759 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
761 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
762 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
764 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
765 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
766 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
768 device_printf(dev, "Cannot allocate PCI registers\n");
771 sc->bus_tag = rman_get_bustag(sc->reg_res);
772 sc->bus_handle = rman_get_bushandle(sc->reg_res);
774 /* Intialize mutexes */
775 lockinit(&sc->sim_lock, "mrsas_sim_lock", 0, LK_CANRECURSE);
776 lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE);
777 lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE);
778 lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE);
779 spin_init(&sc->ioctl_lock, "mrsasioctl");
780 lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0,
782 lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0,
784 lockinit(&sc->raidmap_lock, "mrsas_raidmap_lock", 0, LK_CANRECURSE);
786 /* Intialize linked list */
787 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
788 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
790 atomic_set(&sc->fw_outstanding,0);
792 sc->io_cmds_highwater = 0;
794 /* Create a /dev entry for this device. */
795 sc->mrsas_cdev = make_dev(&mrsas_ops, device_get_unit(dev), UID_ROOT,
796 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
797 device_get_unit(dev));
799 sc->mrsas_cdev->si_drv1 = sc;
801 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
802 sc->UnevenSpanSupport = 0;
804 /* Initialize Firmware */
805 if (mrsas_init_fw(sc) != SUCCESS) {
809 /* Register SCSI mid-layer */
810 if ((mrsas_cam_attach(sc) != SUCCESS)) {
811 goto attach_fail_cam;
815 if (mrsas_setup_irq(sc) != SUCCESS) {
816 goto attach_fail_irq;
819 /* Enable Interrupts */
820 mrsas_enable_intr(sc);
822 error = kthread_create(mrsas_ocr_thread, sc, &sc->ocr_thread, "mrsas_ocr%d",
823 device_get_unit(sc->mrsas_dev));
825 kprintf("Error %d starting rescan thread\n", error);
826 goto attach_fail_irq;
829 mrsas_setup_sysctl(sc);
831 /* Initiate AEN (Asynchronous Event Notification)*/
833 if (mrsas_start_aen(sc)) {
834 kprintf("Error: start aen failed\n");
842 mrsas_teardown_intr(sc);
844 mrsas_cam_detach(sc);
846 //attach_fail_raidmap:
848 lockuninit(&sc->sim_lock);
849 lockuninit(&sc->aen_lock);
850 lockuninit(&sc->pci_lock);
851 lockuninit(&sc->io_lock);
852 spin_uninit(&sc->ioctl_lock);
853 lockuninit(&sc->mpt_cmd_pool_lock);
854 lockuninit(&sc->mfi_cmd_pool_lock);
855 lockuninit(&sc->raidmap_lock);
857 destroy_dev(sc->mrsas_cdev);
859 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
860 sc->reg_res_id, sc->reg_res);
866 * mrsas_detach: De-allocates and teardown resources
867 * input: device struct pointer
869 * This function is the entry point for device disconnect and detach. It
870 * performs memory de-allocations, shutdown of the controller and various
871 * teardown and destroy resource functions.
873 static int mrsas_detach(device_t dev)
875 struct mrsas_softc *sc;
878 sc = device_get_softc(dev);
879 sc->remove_in_progress = 1;
880 if(sc->ocr_thread_active)
881 wakeup(&sc->ocr_chan);
882 while(sc->reset_in_progress){
884 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
885 mrsas_dprint(sc, MRSAS_INFO,
886 "[%2d]waiting for ocr to be finished\n",i);
888 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
891 while(sc->ocr_thread_active){
893 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
894 mrsas_dprint(sc, MRSAS_INFO,
896 "mrsas_ocr thread to quit ocr %d\n",i,
897 sc->ocr_thread_active);
899 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
901 mrsas_flush_cache(sc);
902 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
903 mrsas_disable_intr(sc);
904 mrsas_cam_detach(sc);
905 mrsas_teardown_intr(sc);
907 lockuninit(&sc->sim_lock);
908 lockuninit(&sc->aen_lock);
909 lockuninit(&sc->pci_lock);
910 lockuninit(&sc->io_lock);
911 spin_uninit(&sc->ioctl_lock);
912 lockuninit(&sc->mpt_cmd_pool_lock);
913 lockuninit(&sc->mfi_cmd_pool_lock);
914 lockuninit(&sc->raidmap_lock);
916 bus_release_resource(sc->mrsas_dev,
917 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
919 destroy_dev(sc->mrsas_cdev);
920 if (sc->sysctl_tree != NULL)
921 sysctl_ctx_free(&sc->sysctl_ctx);
926 * mrsas_free_mem: Frees allocated memory
927 * input: Adapter instance soft state
929 * This function is called from mrsas_detach() to free previously allocated
932 void mrsas_free_mem(struct mrsas_softc *sc)
936 struct mrsas_mfi_cmd *mfi_cmd;
937 struct mrsas_mpt_cmd *mpt_cmd;
940 * Free RAID map memory
942 for (i=0; i < 2; i++)
944 if (sc->raidmap_phys_addr[i])
945 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
946 if (sc->raidmap_mem[i] != NULL)
947 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
948 if (sc->raidmap_tag[i] != NULL)
949 bus_dma_tag_destroy(sc->raidmap_tag[i]);
953 * Free version buffer memroy
955 if (sc->verbuf_phys_addr)
956 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
957 if (sc->verbuf_mem != NULL)
958 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
959 if (sc->verbuf_tag != NULL)
960 bus_dma_tag_destroy(sc->verbuf_tag);
964 * Free sense buffer memory
966 if (sc->sense_phys_addr)
967 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
968 if (sc->sense_mem != NULL)
969 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
970 if (sc->sense_tag != NULL)
971 bus_dma_tag_destroy(sc->sense_tag);
974 * Free chain frame memory
976 if (sc->chain_frame_phys_addr)
977 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
978 if (sc->chain_frame_mem != NULL)
979 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
980 if (sc->chain_frame_tag != NULL)
981 bus_dma_tag_destroy(sc->chain_frame_tag);
984 * Free IO Request memory
986 if (sc->io_request_phys_addr)
987 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
988 if (sc->io_request_mem != NULL)
989 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
990 if (sc->io_request_tag != NULL)
991 bus_dma_tag_destroy(sc->io_request_tag);
994 * Free Reply Descriptor memory
996 if (sc->reply_desc_phys_addr)
997 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
998 if (sc->reply_desc_mem != NULL)
999 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1000 if (sc->reply_desc_tag != NULL)
1001 bus_dma_tag_destroy(sc->reply_desc_tag);
1004 * Free event detail memory
1006 if (sc->evt_detail_phys_addr)
1007 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1008 if (sc->evt_detail_mem != NULL)
1009 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1010 if (sc->evt_detail_tag != NULL)
1011 bus_dma_tag_destroy(sc->evt_detail_tag);
1016 if (sc->mfi_cmd_list) {
1017 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1018 mfi_cmd = sc->mfi_cmd_list[i];
1019 mrsas_free_frame(sc, mfi_cmd);
1022 if (sc->mficmd_frame_tag != NULL)
1023 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1026 * Free MPT internal command list
1028 max_cmd = sc->max_fw_cmds;
1029 if (sc->mpt_cmd_list) {
1030 for (i = 0; i < max_cmd; i++) {
1031 mpt_cmd = sc->mpt_cmd_list[i];
1032 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1033 kfree(sc->mpt_cmd_list[i], M_MRSAS);
1035 kfree(sc->mpt_cmd_list, M_MRSAS);
1036 sc->mpt_cmd_list = NULL;
1040 * Free MFI internal command list
1043 if (sc->mfi_cmd_list) {
1044 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1045 kfree(sc->mfi_cmd_list[i], M_MRSAS);
1047 kfree(sc->mfi_cmd_list, M_MRSAS);
1048 sc->mfi_cmd_list = NULL;
1052 * Free request descriptor memory
1054 kfree(sc->req_desc, M_MRSAS);
1055 sc->req_desc = NULL;
1058 * Destroy parent tag
1060 if (sc->mrsas_parent_tag != NULL)
1061 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1065 * mrsas_teardown_intr: Teardown interrupt
1066 * input: Adapter instance soft state
1068 * This function is called from mrsas_detach() to teardown and release
1069 * bus interrupt resourse.
1071 void mrsas_teardown_intr(struct mrsas_softc *sc)
1073 if (sc->intr_handle)
1074 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1075 if (sc->mrsas_irq != NULL)
1076 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1077 if (sc->irq_type == PCI_INTR_TYPE_MSI)
1078 pci_release_msi(sc->mrsas_dev);
1079 sc->intr_handle = NULL;
1083 * mrsas_suspend: Suspend entry point
1084 * input: Device struct pointer
1086 * This function is the entry point for system suspend from the OS.
1088 static int mrsas_suspend(device_t dev)
1090 struct mrsas_softc *sc;
1092 sc = device_get_softc(dev);
1097 * mrsas_resume: Resume entry point
1098 * input: Device struct pointer
1100 * This function is the entry point for system resume from the OS.
1102 static int mrsas_resume(device_t dev)
1104 struct mrsas_softc *sc;
1106 sc = device_get_softc(dev);
1111 * mrsas_ioctl: IOCtl commands entry point.
1113 * This function is the entry point for IOCtls from the OS. It calls the
1114 * appropriate function for processing depending on the command received.
1117 mrsas_ioctl(struct dev_ioctl_args *ap)
1119 cdev_t dev = ap->a_head.a_dev;
1120 u_long cmd = ap->a_cmd;
1121 caddr_t arg = ap->a_data;
1122 struct mrsas_softc *sc;
1125 sc = (struct mrsas_softc *)(dev->si_drv1);
1127 if (sc->remove_in_progress) {
1128 mrsas_dprint(sc, MRSAS_INFO,
1129 "Driver remove or shutdown called.\n");
1133 spin_lock(&sc->ioctl_lock);
1134 if (!sc->reset_in_progress) {
1135 spin_unlock(&sc->ioctl_lock);
1139 /* Release ioclt_lock, and wait for OCR
1141 spin_unlock(&sc->ioctl_lock);
1142 while(sc->reset_in_progress){
1144 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1145 mrsas_dprint(sc, MRSAS_INFO,
1147 "OCR to be finished %d\n",i,
1148 sc->ocr_thread_active);
1150 tsleep(mrsas_ioctl, 0, "mr_ioctl", hz);
1155 case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1156 ret = mrsas_passthru(sc, (void *)arg);
1158 case MRSAS_IOC_SCAN_BUS:
1159 ret = mrsas_bus_scan(sc);
1167 * mrsas_setup_irq: Set up interrupt.
1168 * input: Adapter instance soft state
1170 * This function sets up interrupts as a bus resource, with flags indicating
1171 * resource permitting contemporaneous sharing and for resource to activate
1174 static int mrsas_setup_irq(struct mrsas_softc *sc)
1179 sc->irq_type = pci_alloc_1intr(sc->mrsas_dev, mrsas_msi_enable,
1180 &sc->irq_id, &irq_flags);
1182 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1183 &sc->irq_id, irq_flags);
1184 if (sc->mrsas_irq == NULL){
1185 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1188 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE,
1189 mrsas_isr, sc, &sc->intr_handle, NULL)) {
1190 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1198 * mrsas_isr: ISR entry point
1199 * input: argument pointer
1201 * This function is the interrupt service routine entry point. There
1202 * are two types of interrupts, state change interrupt and response
1203 * interrupt. If an interrupt is not ours, we just return.
1205 void mrsas_isr(void *arg)
1207 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1210 /* Clear FW state change interrupt */
1211 status = mrsas_clear_intr(sc);
1213 /* Not our interrupt */
1217 /* If we are resetting, bail */
1218 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1219 kprintf(" Entered into ISR when OCR is going active. \n");
1220 mrsas_clear_intr(sc);
1223 /* Process for reply request and clear response interrupt */
1224 if (mrsas_complete_cmd(sc) != SUCCESS)
1225 mrsas_clear_intr(sc);
1231 * mrsas_complete_cmd: Process reply request
1232 * input: Adapter instance soft state
1234 * This function is called from mrsas_isr() to process reply request and
1235 * clear response interrupt. Processing of the reply request entails
1236 * walking through the reply descriptor array for the command request
1237 * pended from Firmware. We look at the Function field to determine
1238 * the command type and perform the appropriate action. Before we
1239 * return, we clear the response interrupt.
1241 static int mrsas_complete_cmd(struct mrsas_softc *sc)
1243 Mpi2ReplyDescriptorsUnion_t *desc;
1244 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1245 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1246 struct mrsas_mpt_cmd *cmd_mpt;
1247 struct mrsas_mfi_cmd *cmd_mfi;
1248 u_int8_t arm, reply_descript_type;
1249 u_int16_t smid, num_completed;
1250 u_int8_t status, extStatus;
1251 union desc_value desc_val;
1252 PLD_LOAD_BALANCE_INFO lbinfo;
1253 u_int32_t device_id;
1254 int threshold_reply_count = 0;
1257 /* If we have a hardware error, not need to continue */
1258 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1261 desc = sc->reply_desc_mem;
1262 desc += sc->last_reply_idx;
1264 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1266 desc_val.word = desc->Words;
1269 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1271 /* Find our reply descriptor for the command and process */
1272 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1274 smid = reply_desc->SMID;
1275 cmd_mpt = sc->mpt_cmd_list[smid -1];
1276 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1278 status = scsi_io_req->RaidContext.status;
1279 extStatus = scsi_io_req->RaidContext.exStatus;
1281 switch (scsi_io_req->Function)
1283 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1284 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1285 lbinfo = &sc->load_balance_info[device_id];
1286 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1287 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1288 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1289 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1291 //Fall thru and complete IO
1292 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1293 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1294 mrsas_cmd_done(sc, cmd_mpt);
1295 scsi_io_req->RaidContext.status = 0;
1296 scsi_io_req->RaidContext.exStatus = 0;
1297 atomic_dec(&sc->fw_outstanding);
1299 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1300 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1301 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1303 mrsas_release_mpt_cmd(cmd_mpt);
1307 sc->last_reply_idx++;
1308 if (sc->last_reply_idx >= sc->reply_q_depth)
1309 sc->last_reply_idx = 0;
1311 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1313 threshold_reply_count++;
1315 /* Get the next reply descriptor */
1316 if (!sc->last_reply_idx)
1317 desc = sc->reply_desc_mem;
1321 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1322 desc_val.word = desc->Words;
1324 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1326 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1330 * Write to reply post index after completing threshold reply count
1331 * and still there are more replies in reply queue pending to be
1334 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1335 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1336 sc->last_reply_idx);
1337 threshold_reply_count = 0;
1341 /* No match, just return */
1342 if (num_completed == 0)
1345 /* Clear response interrupt */
1346 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1352 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1353 * input: Adapter instance soft state
1355 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1356 * It checks the command status and maps the appropriate CAM status for the CCB.
1358 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1360 struct mrsas_softc *sc = cmd->sc;
1361 u_int8_t *sense_data;
1365 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1367 case MFI_STAT_SCSI_IO_FAILED:
1368 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1369 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1370 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1372 /* For now just copy 18 bytes back */
1373 memcpy(sense_data, cmd->sense, 18);
1374 cmd->ccb_ptr->csio.sense_len = 18;
1375 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1378 case MFI_STAT_LD_OFFLINE:
1379 case MFI_STAT_DEVICE_NOT_FOUND:
1380 if (cmd->ccb_ptr->ccb_h.target_lun)
1381 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1383 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1385 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1386 /*send status to CAM layer to retry sending command without
1387 * decrementing retry counter*/
1388 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1391 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1392 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1393 cmd->ccb_ptr->csio.scsi_status = status;
1399 * mrsas_alloc_mem: Allocate DMAable memory.
1400 * input: Adapter instance soft state
1402 * This function creates the parent DMA tag and allocates DMAable memory.
1403 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1404 * into Kernel virtual address. Callback argument is physical memory address.
1406 static int mrsas_alloc_mem(struct mrsas_softc *sc)
1408 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1409 chain_frame_size, evt_detail_size;
1412 * Allocate parent DMA tag
1414 if (bus_dma_tag_create(NULL, /* parent */
1417 BUS_SPACE_MAXADDR, /* lowaddr */
1418 BUS_SPACE_MAXADDR, /* highaddr */
1419 NULL, NULL, /* filter, filterarg */
1420 MRSAS_MAX_IO_SIZE,/* maxsize */
1421 MRSAS_MAX_SGL, /* nsegments */
1422 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1424 &sc->mrsas_parent_tag /* tag */
1426 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1431 * Allocate for version buffer
1433 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1434 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1435 1, 0, // algnmnt, boundary
1436 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1437 BUS_SPACE_MAXADDR, // highaddr
1438 NULL, NULL, // filter, filterarg
1439 verbuf_size, // maxsize
1441 verbuf_size, // maxsegsize
1442 BUS_DMA_ALLOCNOW, // flags
1444 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1447 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1448 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1449 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1452 bzero(sc->verbuf_mem, verbuf_size);
1453 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1454 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1455 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1460 * Allocate IO Request Frames
1462 io_req_size = sc->io_frames_alloc_sz;
1463 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1464 16, 0, // algnmnt, boundary
1465 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1466 BUS_SPACE_MAXADDR, // highaddr
1467 NULL, NULL, // filter, filterarg
1468 io_req_size, // maxsize
1470 io_req_size, // maxsegsize
1471 BUS_DMA_ALLOCNOW, // flags
1472 &sc->io_request_tag)) {
1473 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1476 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1477 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1478 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1481 bzero(sc->io_request_mem, io_req_size);
1482 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1483 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1484 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1485 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1490 * Allocate Chain Frames
1492 chain_frame_size = sc->chain_frames_alloc_sz;
1493 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1494 4, 0, // algnmnt, boundary
1495 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1496 BUS_SPACE_MAXADDR, // highaddr
1497 NULL, NULL, // filter, filterarg
1498 chain_frame_size, // maxsize
1500 chain_frame_size, // maxsegsize
1501 BUS_DMA_ALLOCNOW, // flags
1502 &sc->chain_frame_tag)) {
1503 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1506 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1507 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1508 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1511 bzero(sc->chain_frame_mem, chain_frame_size);
1512 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1513 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1514 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1515 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1520 * Allocate Reply Descriptor Array
1522 reply_desc_size = sc->reply_alloc_sz;
1523 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1524 16, 0, // algnmnt, boundary
1525 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1526 BUS_SPACE_MAXADDR, // highaddr
1527 NULL, NULL, // filter, filterarg
1528 reply_desc_size, // maxsize
1530 reply_desc_size, // maxsegsize
1531 BUS_DMA_ALLOCNOW, // flags
1532 &sc->reply_desc_tag)) {
1533 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1536 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1537 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1538 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1541 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1542 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1543 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1544 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1549 * Allocate Sense Buffer Array. Keep in lower 4GB
1551 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1552 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1553 64, 0, // algnmnt, boundary
1554 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1555 BUS_SPACE_MAXADDR, // highaddr
1556 NULL, NULL, // filter, filterarg
1557 sense_size, // maxsize
1559 sense_size, // maxsegsize
1560 BUS_DMA_ALLOCNOW, // flags
1562 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1565 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1566 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1567 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1570 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1571 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1573 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1578 * Allocate for Event detail structure
1580 evt_detail_size = sizeof(struct mrsas_evt_detail);
1581 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1582 1, 0, // algnmnt, boundary
1583 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1584 BUS_SPACE_MAXADDR, // highaddr
1585 NULL, NULL, // filter, filterarg
1586 evt_detail_size, // maxsize
1588 evt_detail_size, // maxsegsize
1589 BUS_DMA_ALLOCNOW, // flags
1590 &sc->evt_detail_tag)) {
1591 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1594 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1595 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1596 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1599 bzero(sc->evt_detail_mem, evt_detail_size);
1600 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1601 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1602 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1603 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1609 * Create a dma tag for data buffers; size will be the maximum
1610 * possible I/O size (280kB).
1612 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1615 BUS_SPACE_MAXADDR, // lowaddr
1616 BUS_SPACE_MAXADDR, // highaddr
1617 NULL, NULL, // filter, filterarg
1618 MRSAS_MAX_IO_SIZE, // maxsize
1619 MRSAS_MAX_SGL, // nsegments
1620 MRSAS_MAX_IO_SIZE, // maxsegsize
1621 BUS_DMA_ALLOCNOW, // flags
1623 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1631 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1632 * input: callback argument,
1633 * machine dependent type that describes DMA segments,
1634 * number of segments,
1637 * This function is for the driver to receive mapping information resultant
1638 * of the bus_dmamap_load(). The information is actually not being used,
1639 * but the address is saved anyway.
1642 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1647 *addr = segs[0].ds_addr;
1651 * mrsas_setup_raidmap: Set up RAID map.
1652 * input: Adapter instance soft state
1654 * Allocate DMA memory for the RAID maps and perform setup.
1656 static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1658 sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1659 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1661 for (int i=0; i < 2; i++)
1663 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1664 4, 0, // algnmnt, boundary
1665 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1666 BUS_SPACE_MAXADDR, // highaddr
1667 NULL, NULL, // filter, filterarg
1668 sc->map_sz, // maxsize
1670 sc->map_sz, // maxsegsize
1671 BUS_DMA_ALLOCNOW, // flags
1672 &sc->raidmap_tag[i])) {
1673 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
1676 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
1677 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1678 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
1681 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1682 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1684 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1687 if (!sc->raidmap_mem[i]) {
1688 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
1693 if (!mrsas_get_map_info(sc))
1694 mrsas_sync_map_info(sc);
1700 * mrsas_init_fw: Initialize Firmware
1701 * input: Adapter soft state
1703 * Calls transition_to_ready() to make sure Firmware is in operational
1704 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1705 * Firmware. It issues internal commands to get the controller info
1706 * after the IOC_INIT command response is received by Firmware.
1707 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1708 * are currently not being used, it is left here as placeholder.
1710 static int mrsas_init_fw(struct mrsas_softc *sc)
1712 u_int32_t max_sectors_1;
1713 u_int32_t max_sectors_2;
1714 u_int32_t tmp_sectors;
1715 struct mrsas_ctrl_info *ctrl_info;
1720 /* Make sure Firmware is ready */
1721 ret = mrsas_transition_to_ready(sc, ocr);
1722 if (ret != SUCCESS) {
1726 /* Get operational params, sge flags, send init cmd to ctlr */
1727 if (mrsas_init_adapter(sc) != SUCCESS){
1728 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1732 /* Allocate internal commands for pass-thru */
1733 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1734 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1738 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1739 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1743 /* For pass-thru, get PD/LD list and controller info */
1744 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1745 mrsas_get_pd_list(sc);
1747 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1748 mrsas_get_ld_list(sc);
1750 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
1752 ctrl_info = kmalloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1755 * Compute the max allowed sectors per IO: The controller info has two
1756 * limits on max sectors. Driver should use the minimum of these two.
1758 * 1 << stripe_sz_ops.min = max sectors per strip
1760 * Note that older firmwares ( < FW ver 30) didn't report information
1761 * to calculate max_sectors_1. So the number ended up as zero always.
1764 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
1765 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1766 ctrl_info->max_strips_per_io;
1767 max_sectors_2 = ctrl_info->max_request_size;
1768 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1769 sc->disableOnlineCtrlReset =
1770 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1771 sc->UnevenSpanSupport =
1772 ctrl_info->adapterOperations2.supportUnevenSpans;
1773 if(sc->UnevenSpanSupport) {
1774 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
1775 sc->UnevenSpanSupport);
1776 if (MR_ValidateMapInfo(sc))
1777 sc->fast_path_io = 1;
1779 sc->fast_path_io = 0;
1783 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1785 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1786 sc->max_sectors_per_req = tmp_sectors;
1789 kfree(ctrl_info, M_MRSAS);
1795 * mrsas_init_adapter: Initializes the adapter/controller
1796 * input: Adapter soft state
1798 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1799 * ROC/controller. The FW register is read to determined the number of
1800 * commands that is supported. All memory allocations for IO is based on
1801 * max_cmd. Appropriate calculations are performed in this function.
1803 int mrsas_init_adapter(struct mrsas_softc *sc)
1809 /* Read FW status register */
1810 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1812 /* Get operational params from status register */
1813 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1815 /* Decrement the max supported by 1, to correlate with FW */
1816 sc->max_fw_cmds = sc->max_fw_cmds-1;
1817 max_cmd = sc->max_fw_cmds;
1819 /* Determine allocation size of command frames */
1820 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1821 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1822 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1823 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1824 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1825 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1826 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1828 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1829 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1831 /* Used for pass thru MFI frame (DCMD) */
1832 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1834 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1835 sizeof(MPI2_SGE_IO_UNION))/16;
1837 sc->last_reply_idx = 0;
1839 ret = mrsas_alloc_mem(sc);
1843 ret = mrsas_alloc_mpt_cmds(sc);
1847 ret = mrsas_ioc_init(sc);
1856 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1857 * input: Adapter soft state
1859 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1861 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1865 /* Allocate IOC INIT command */
1866 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1867 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1868 1, 0, // algnmnt, boundary
1869 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1870 BUS_SPACE_MAXADDR, // highaddr
1871 NULL, NULL, // filter, filterarg
1872 ioc_init_size, // maxsize
1874 ioc_init_size, // maxsegsize
1875 BUS_DMA_ALLOCNOW, // flags
1876 &sc->ioc_init_tag)) {
1877 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1880 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1881 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1882 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1885 bzero(sc->ioc_init_mem, ioc_init_size);
1886 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1887 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1888 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1889 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1897 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
1898 * input: Adapter soft state
1900 * Deallocates memory of the IOC Init cmd.
1902 void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1904 if (sc->ioc_init_phys_mem)
1905 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1906 if (sc->ioc_init_mem != NULL)
1907 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1908 if (sc->ioc_init_tag != NULL)
1909 bus_dma_tag_destroy(sc->ioc_init_tag);
1913 * mrsas_ioc_init: Sends IOC Init command to FW
1914 * input: Adapter soft state
1916 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1918 int mrsas_ioc_init(struct mrsas_softc *sc)
1920 struct mrsas_init_frame *init_frame;
1921 pMpi2IOCInitRequest_t IOCInitMsg;
1922 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1923 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1924 bus_addr_t phys_addr;
1927 /* Allocate memory for the IOC INIT command */
1928 if (mrsas_alloc_ioc_cmd(sc)) {
1929 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1933 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1934 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1935 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1936 IOCInitMsg->MsgVersion = MPI2_VERSION;
1937 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1938 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1939 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1940 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1941 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1943 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1944 init_frame->cmd = MFI_CMD_INIT;
1945 init_frame->cmd_status = 0xFF;
1946 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1948 if (sc->verbuf_mem) {
1949 ksnprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1951 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
1952 init_frame->driver_ver_hi = 0;
1955 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
1956 init_frame->queue_info_new_phys_addr_lo = phys_addr;
1957 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1959 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
1960 req_desc.MFAIo.RequestFlags =
1961 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1963 mrsas_disable_intr(sc);
1964 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
1965 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
1966 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
1969 * Poll response timer to wait for Firmware response. While this
1970 * timer with the DELAY call could block CPU, the time interval for
1971 * this is only 1 millisecond.
1973 if (init_frame->cmd_status == 0xFF) {
1974 for (i=0; i < (max_wait * 1000); i++){
1975 if (init_frame->cmd_status == 0xFF)
1982 if (init_frame->cmd_status == 0)
1983 mrsas_dprint(sc, MRSAS_OCR,
1984 "IOC INIT response received from FW.\n");
1985 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
1988 if (init_frame->cmd_status == 0xFF)
1989 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
1991 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1995 mrsas_free_ioc_cmd(sc);
2000 * mrsas_alloc_mpt_cmds: Allocates the command packets
2001 * input: Adapter instance soft state
2003 * This function allocates the internal commands for IOs. Each command that is
2004 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
2005 * An array is allocated with mrsas_mpt_cmd context. The free commands are
2006 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2009 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2013 struct mrsas_mpt_cmd *cmd;
2014 pMpi2ReplyDescriptorsUnion_t reply_desc;
2015 u_int32_t offset, chain_offset, sense_offset;
2016 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2017 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2019 max_cmd = sc->max_fw_cmds;
2021 sc->req_desc = kmalloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2022 if (!sc->req_desc) {
2023 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2026 memset(sc->req_desc, 0, sc->request_alloc_sz);
2029 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2030 * dynamic array first and then allocate individual commands.
2032 sc->mpt_cmd_list = kmalloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2033 if (!sc->mpt_cmd_list) {
2034 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2037 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2038 for (i = 0; i < max_cmd; i++) {
2039 sc->mpt_cmd_list[i] = kmalloc(sizeof(struct mrsas_mpt_cmd),
2041 if (!sc->mpt_cmd_list[i]) {
2042 for (j = 0; j < i; j++)
2043 kfree(sc->mpt_cmd_list[j],M_MRSAS);
2044 kfree(sc->mpt_cmd_list, M_MRSAS);
2045 sc->mpt_cmd_list = NULL;
2050 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2051 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2052 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2053 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2054 sense_base = (u_int8_t*)sc->sense_mem;
2055 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2056 for (i = 0; i < max_cmd; i++) {
2057 cmd = sc->mpt_cmd_list[i];
2058 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2059 chain_offset = 1024 * i;
2060 sense_offset = MRSAS_SENSE_LEN * i;
2061 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2063 cmd->ccb_ptr = NULL;
2064 callout_init(&cmd->cm_callout);
2065 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2067 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2068 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2069 cmd->io_request_phys_addr = io_req_base_phys + offset;
2070 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2071 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2072 cmd->sense = sense_base + sense_offset;
2073 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2074 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2077 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2080 /* Initialize reply descriptor array to 0xFFFFFFFF */
2081 reply_desc = sc->reply_desc_mem;
2082 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2083 reply_desc->Words = MRSAS_ULONG_MAX;
2089 * mrsas_fire_cmd: Sends command to FW
2090 * input: Adapter soft state
2091 * request descriptor address low
2092 * request descriptor address high
2094 * This functions fires the command to Firmware by writing to the
2095 * inbound_low_queue_port and inbound_high_queue_port.
2097 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2098 u_int32_t req_desc_hi)
2100 lockmgr(&sc->pci_lock, LK_EXCLUSIVE);
2101 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2103 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2105 lockmgr(&sc->pci_lock, LK_RELEASE);
2109 * mrsas_transition_to_ready: Move FW to Ready state
2110 * input: Adapter instance soft state
2112 * During the initialization, FW passes can potentially be in any one of
2113 * several possible states. If the FW in operational, waiting-for-handshake
2114 * states, driver must take steps to bring it to ready state. Otherwise, it
2115 * has to wait for the ready state.
2117 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2121 u_int32_t val, fw_state;
2122 u_int32_t cur_state;
2123 u_int32_t abs_state, curr_abs_state;
2125 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2126 fw_state = val & MFI_STATE_MASK;
2127 max_wait = MRSAS_RESET_WAIT_TIME;
2129 if (fw_state != MFI_STATE_READY)
2130 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2132 while (fw_state != MFI_STATE_READY) {
2133 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2135 case MFI_STATE_FAULT:
2136 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2138 cur_state = MFI_STATE_FAULT;
2143 case MFI_STATE_WAIT_HANDSHAKE:
2144 /* Set the CLR bit in inbound doorbell */
2145 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2146 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2147 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2149 case MFI_STATE_BOOT_MESSAGE_PENDING:
2150 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2152 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2154 case MFI_STATE_OPERATIONAL:
2155 /* Bring it to READY state; assuming max wait 10 secs */
2156 mrsas_disable_intr(sc);
2157 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2158 for (i=0; i < max_wait * 1000; i++) {
2159 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2164 cur_state = MFI_STATE_OPERATIONAL;
2166 case MFI_STATE_UNDEFINED:
2167 /* This state should not last for more than 2 seconds */
2168 cur_state = MFI_STATE_UNDEFINED;
2170 case MFI_STATE_BB_INIT:
2171 cur_state = MFI_STATE_BB_INIT;
2173 case MFI_STATE_FW_INIT:
2174 cur_state = MFI_STATE_FW_INIT;
2176 case MFI_STATE_FW_INIT_2:
2177 cur_state = MFI_STATE_FW_INIT_2;
2179 case MFI_STATE_DEVICE_SCAN:
2180 cur_state = MFI_STATE_DEVICE_SCAN;
2182 case MFI_STATE_FLUSH_CACHE:
2183 cur_state = MFI_STATE_FLUSH_CACHE;
2186 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2191 * The cur_state should not last for more than max_wait secs
2193 for (i = 0; i < (max_wait * 1000); i++) {
2194 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2195 outbound_scratch_pad))& MFI_STATE_MASK);
2196 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2197 outbound_scratch_pad));
2198 if (abs_state == curr_abs_state)
2205 * Return error if fw_state hasn't changed after max_wait
2207 if (curr_abs_state == abs_state) {
2208 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2209 "in %d secs\n", fw_state, max_wait);
2213 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2214 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2219 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2220 * input: Adapter soft state
2222 * This function removes an MFI command from the command list.
2224 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2226 struct mrsas_mfi_cmd *cmd = NULL;
2228 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2229 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2230 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2231 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2233 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2239 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2240 * input: Adapter Context.
2242 * This function will check FW status register and flag
2243 * do_timeout_reset flag. It will do OCR/Kill adapter if
2244 * FW is in fault state or IO timed out has trigger reset.
2247 mrsas_ocr_thread(void *arg)
2249 struct mrsas_softc *sc;
2250 u_int32_t fw_status, fw_state;
2252 sc = (struct mrsas_softc *)arg;
2254 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2256 sc->ocr_thread_active = 1;
2257 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
2259 /* Sleep for 1 second and check the queue status*/
2260 lksleep(&sc->ocr_chan, &sc->sim_lock, 0,
2261 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2262 if (sc->remove_in_progress) {
2263 mrsas_dprint(sc, MRSAS_OCR,
2264 "Exit due to shutdown from %s\n", __func__);
2267 fw_status = mrsas_read_reg(sc,
2268 offsetof(mrsas_reg_set, outbound_scratch_pad));
2269 fw_state = fw_status & MFI_STATE_MASK;
2270 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2271 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2272 sc->do_timedout_reset?"IO Timeout":
2273 "FW fault detected");
2274 spin_lock(&sc->ioctl_lock);
2275 sc->reset_in_progress = 1;
2277 spin_unlock(&sc->ioctl_lock);
2278 mrsas_xpt_freeze(sc);
2279 mrsas_reset_ctrl(sc);
2280 mrsas_xpt_release(sc);
2281 sc->reset_in_progress = 0;
2282 sc->do_timedout_reset = 0;
2285 lockmgr(&sc->sim_lock, LK_RELEASE);
2286 sc->ocr_thread_active = 0;
2291 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2292 * input: Adapter Context.
2294 * This function will clear reply descriptor so that post OCR
2295 * driver and FW will lost old history.
2297 void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2300 pMpi2ReplyDescriptorsUnion_t reply_desc;
2302 sc->last_reply_idx = 0;
2303 reply_desc = sc->reply_desc_mem;
2304 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2305 reply_desc->Words = MRSAS_ULONG_MAX;
2310 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2311 * input: Adapter Context.
2313 * This function will run from thread context so that it can sleep.
2314 * 1. Do not handle OCR if FW is in HW critical error.
2315 * 2. Wait for outstanding command to complete for 180 seconds.
2316 * 3. If #2 does not find any outstanding command Controller is in working
2317 * state, so skip OCR.
2318 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2319 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2321 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2324 int mrsas_reset_ctrl(struct mrsas_softc *sc)
2326 int retval = SUCCESS, i, j, retry = 0;
2327 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2329 struct mrsas_mfi_cmd *mfi_cmd;
2330 struct mrsas_mpt_cmd *mpt_cmd;
2331 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2333 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2334 device_printf(sc->mrsas_dev,
2335 "mrsas: Hardware critical error, returning FAIL.\n");
2339 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2340 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2341 mrsas_disable_intr(sc);
2344 /* First try waiting for commands to complete */
2345 if (mrsas_wait_for_outstanding(sc)) {
2346 mrsas_dprint(sc, MRSAS_OCR,
2347 "resetting adapter from %s.\n",
2349 /* Now return commands back to the CAM layer */
2350 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2351 mpt_cmd = sc->mpt_cmd_list[i];
2352 if (mpt_cmd->ccb_ptr) {
2353 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2354 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2355 mrsas_cmd_done(sc, mpt_cmd);
2356 atomic_dec(&sc->fw_outstanding);
2360 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2361 outbound_scratch_pad));
2362 abs_state = status_reg & MFI_STATE_MASK;
2363 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2364 if (sc->disableOnlineCtrlReset ||
2365 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2366 /* Reset not supported, kill adapter */
2367 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2369 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2374 /* Now try to reset the chip */
2375 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2376 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2377 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2378 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2379 MPI2_WRSEQ_1ST_KEY_VALUE);
2380 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2381 MPI2_WRSEQ_2ND_KEY_VALUE);
2382 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2383 MPI2_WRSEQ_3RD_KEY_VALUE);
2384 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2385 MPI2_WRSEQ_4TH_KEY_VALUE);
2386 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2387 MPI2_WRSEQ_5TH_KEY_VALUE);
2388 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2389 MPI2_WRSEQ_6TH_KEY_VALUE);
2391 /* Check that the diag write enable (DRWE) bit is on */
2392 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2395 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2397 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2399 if (retry++ == 100) {
2400 mrsas_dprint(sc, MRSAS_OCR,
2401 "Host diag unlock failed!\n");
2405 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2408 /* Send chip reset command */
2409 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2410 host_diag | HOST_DIAG_RESET_ADAPTER);
2413 /* Make sure reset adapter bit is cleared */
2414 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2417 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2419 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2421 if (retry++ == 1000) {
2422 mrsas_dprint(sc, MRSAS_OCR,
2423 "Diag reset adapter never cleared!\n");
2427 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2430 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2431 outbound_scratch_pad)) & MFI_STATE_MASK;
2434 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2436 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2437 outbound_scratch_pad)) & MFI_STATE_MASK;
2439 if (abs_state <= MFI_STATE_FW_INIT) {
2440 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2441 " state = 0x%x\n", abs_state);
2445 /* Wait for FW to become ready */
2446 if (mrsas_transition_to_ready(sc, 1)) {
2447 mrsas_dprint(sc, MRSAS_OCR,
2448 "mrsas: Failed to transition controller to ready.\n");
2452 mrsas_reset_reply_desc(sc);
2453 if (mrsas_ioc_init(sc)) {
2454 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2458 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2459 mrsas_enable_intr(sc);
2460 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2462 /* Re-fire management commands */
2463 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2464 mpt_cmd = sc->mpt_cmd_list[j];
2465 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2466 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2467 if (mfi_cmd->frame->dcmd.opcode ==
2468 MR_DCMD_LD_MAP_GET_INFO) {
2469 mrsas_release_mfi_cmd(mfi_cmd);
2470 mrsas_release_mpt_cmd(mpt_cmd);
2472 req_desc = mrsas_get_request_desc(sc,
2473 mfi_cmd->cmd_id.context.smid - 1);
2474 mrsas_dprint(sc, MRSAS_OCR,
2475 "Re-fire command DCMD opcode 0x%x index %d\n ",
2476 mfi_cmd->frame->dcmd.opcode, j);
2478 device_printf(sc->mrsas_dev,
2479 "Cannot build MPT cmd.\n");
2481 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2482 req_desc->addr.u.high);
2487 /* Reset load balance info */
2488 memset(sc->load_balance_info, 0,
2489 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
2491 if (!mrsas_get_map_info(sc))
2492 mrsas_sync_map_info(sc);
2494 /* Adapter reset completed successfully */
2495 device_printf(sc->mrsas_dev, "Reset successful\n");
2499 /* Reset failed, kill the adapter */
2500 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2504 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2505 mrsas_enable_intr(sc);
2506 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2509 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2510 mrsas_dprint(sc, MRSAS_OCR,
2511 "Reset Exit with %d.\n", retval);
2516 * mrsas_kill_hba Kill HBA when OCR is not supported.
2517 * input: Adapter Context.
2519 * This function will kill HBA when OCR is not supported.
2521 void mrsas_kill_hba (struct mrsas_softc *sc)
2523 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2524 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2527 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2531 * mrsas_wait_for_outstanding Wait for outstanding commands
2532 * input: Adapter Context.
2534 * This function will wait for 180 seconds for outstanding
2535 * commands to be completed.
2537 int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2539 int i, outstanding, retval = 0;
2542 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2543 if (sc->remove_in_progress) {
2544 mrsas_dprint(sc, MRSAS_OCR,
2545 "Driver remove or shutdown called.\n");
2549 /* Check if firmware is in fault state */
2550 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2551 outbound_scratch_pad)) & MFI_STATE_MASK;
2552 if (fw_state == MFI_STATE_FAULT) {
2553 mrsas_dprint(sc, MRSAS_OCR,
2554 "Found FW in FAULT state, will reset adapter.\n");
2558 outstanding = atomic_read(&sc->fw_outstanding);
2562 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2563 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2564 "commands to complete\n",i,outstanding);
2565 mrsas_complete_cmd(sc);
2570 if (atomic_read(&sc->fw_outstanding)) {
2571 mrsas_dprint(sc, MRSAS_OCR,
2572 " pending commands remain after waiting,"
2573 " will reset adapter.\n");
2581 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2582 * input: Command packet for return to free cmd pool
2584 * This function returns the MFI command to the command list.
2586 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2588 struct mrsas_softc *sc = cmd->sc;
2590 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2591 cmd->ccb_ptr = NULL;
2592 cmd->cmd_id.frame_count = 0;
2593 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2594 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2600 * mrsas_get_controller_info - Returns FW's controller structure
2601 * input: Adapter soft state
2602 * Controller information structure
2604 * Issues an internal command (DCMD) to get the FW's controller structure.
2605 * This information is mainly used to find out the maximum IO transfer per
2606 * command supported by the FW.
2608 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2609 struct mrsas_ctrl_info *ctrl_info)
2612 struct mrsas_mfi_cmd *cmd;
2613 struct mrsas_dcmd_frame *dcmd;
2615 cmd = mrsas_get_mfi_cmd(sc);
2618 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2621 dcmd = &cmd->frame->dcmd;
2623 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2624 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2625 mrsas_release_mfi_cmd(cmd);
2628 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2630 dcmd->cmd = MFI_CMD_DCMD;
2631 dcmd->cmd_status = 0xFF;
2632 dcmd->sge_count = 1;
2633 dcmd->flags = MFI_FRAME_DIR_READ;
2636 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2637 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2638 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2639 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2641 if (!mrsas_issue_polled(sc, cmd))
2642 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2646 mrsas_free_ctlr_info_cmd(sc);
2647 mrsas_release_mfi_cmd(cmd);
2652 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2653 * input: Adapter soft state
2655 * Allocates DMAable memory for the controller info internal command.
2657 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2661 /* Allocate get controller info command */
2662 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2663 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2664 1, 0, // algnmnt, boundary
2665 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2666 BUS_SPACE_MAXADDR, // highaddr
2667 NULL, NULL, // filter, filterarg
2668 ctlr_info_size, // maxsize
2670 ctlr_info_size, // maxsegsize
2671 BUS_DMA_ALLOCNOW, // flags
2672 &sc->ctlr_info_tag)) {
2673 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2676 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2677 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2678 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2681 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2682 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2683 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2684 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2688 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2693 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2694 * input: Adapter soft state
2696 * Deallocates memory of the get controller info cmd.
2698 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2700 if (sc->ctlr_info_phys_addr)
2701 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2702 if (sc->ctlr_info_mem != NULL)
2703 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2704 if (sc->ctlr_info_tag != NULL)
2705 bus_dma_tag_destroy(sc->ctlr_info_tag);
2709 * mrsas_issue_polled: Issues a polling command
2710 * inputs: Adapter soft state
2711 * Command packet to be issued
2713 * This function is for posting of internal commands to Firmware. MFI
2714 * requires the cmd_status to be set to 0xFF before posting. The maximun
2715 * wait time of the poll response timer is 180 seconds.
2717 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2719 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2720 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2723 frame_hdr->cmd_status = 0xFF;
2724 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2726 /* Issue the frame using inbound queue port */
2727 if (mrsas_issue_dcmd(sc, cmd)) {
2728 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2733 * Poll response timer to wait for Firmware response. While this
2734 * timer with the DELAY call could block CPU, the time interval for
2735 * this is only 1 millisecond.
2737 if (frame_hdr->cmd_status == 0xFF) {
2738 for (i=0; i < (max_wait * 1000); i++){
2739 if (frame_hdr->cmd_status == 0xFF)
2745 if (frame_hdr->cmd_status != 0)
2747 if (frame_hdr->cmd_status == 0xFF)
2748 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2750 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2757 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2758 * input: Adapter soft state
2761 * This function is called by mrsas_issued_blocked_cmd() and
2762 * mrsas_issued_polled(), to build the MPT command and then fire the
2763 * command to Firmware.
2766 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2768 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2770 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2772 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2776 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2782 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2783 * input: Adapter soft state
2786 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2787 * passthru command and prepares the MPT command to send to Firmware.
2789 MRSAS_REQUEST_DESCRIPTOR_UNION *
2790 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2792 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2795 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2796 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2800 index = cmd->cmd_id.context.smid;
2802 req_desc = mrsas_get_request_desc(sc, index-1);
2806 req_desc->addr.Words = 0;
2807 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2809 req_desc->SCSIIO.SMID = index;
2815 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2816 * input: Adapter soft state
2819 * The MPT command and the io_request are setup as a passthru command.
2820 * The SGE chain address is set to frame_phys_addr of the MFI command.
2823 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2825 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2826 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2827 struct mrsas_mpt_cmd *mpt_cmd;
2828 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2830 mpt_cmd = mrsas_get_mpt_cmd(sc);
2834 /* Save the smid. To be used for returning the cmd */
2835 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2837 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2840 * For cmds where the flag is set, store the flag and check
2841 * on completion. For cmds with this flag, don't call
2842 * mrsas_complete_cmd.
2845 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2846 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2848 io_req = mpt_cmd->io_request;
2850 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2851 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2852 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2853 sgl_ptr_end->Flags = 0;
2856 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2858 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2859 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2860 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2862 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2864 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2865 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2867 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2873 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2874 * input: Adapter soft state
2875 * Command to be issued
2877 * This function waits on an event for the command to be returned
2878 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2879 * Used for issuing internal and ioctl commands.
2881 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2883 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2884 unsigned long total_time = 0;
2887 /* Initialize cmd_status */
2888 cmd->cmd_status = ECONNREFUSED;
2890 /* Build MPT-MFI command for issue to FW */
2891 if (mrsas_issue_dcmd(sc, cmd)){
2892 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2896 sc->chan = (void*)&cmd;
2898 /* The following is for debug only... */
2899 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2900 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2903 if (cmd->cmd_status == ECONNREFUSED){
2904 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2909 if (total_time >= max_wait) {
2910 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2919 * mrsas_complete_mptmfi_passthru - Completes a command
2920 * input: sc: Adapter soft state
2921 * cmd: Command to be completed
2922 * status: cmd completion status
2924 * This function is called from mrsas_complete_cmd() after an interrupt
2925 * is received from Firmware, and io_request->Function is
2926 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2929 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2932 struct mrsas_header *hdr = &cmd->frame->hdr;
2933 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2935 /* Reset the retry counter for future re-tries */
2936 cmd->retry_for_fw_reset = 0;
2939 cmd->ccb_ptr = NULL;
2942 case MFI_CMD_INVALID:
2943 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
2945 case MFI_CMD_PD_SCSI_IO:
2946 case MFI_CMD_LD_SCSI_IO:
2948 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
2949 * issued either through an IO path or an IOCTL path. If it
2950 * was via IOCTL, we will send it to internal completion.
2952 if (cmd->sync_cmd) {
2954 mrsas_wakeup(sc, cmd);
2960 /* Check for LD map update */
2961 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2962 (cmd->frame->dcmd.mbox.b[1] == 1)) {
2963 sc->fast_path_io = 0;
2964 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE);
2965 if (cmd_status != 0) {
2966 if (cmd_status != MFI_STAT_NOT_FOUND)
2967 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
2969 mrsas_release_mfi_cmd(cmd);
2970 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2976 mrsas_release_mfi_cmd(cmd);
2977 if (MR_ValidateMapInfo(sc))
2978 sc->fast_path_io = 0;
2980 sc->fast_path_io = 1;
2981 mrsas_sync_map_info(sc);
2982 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2985 #if 0 //currently not supporting event handling, so commenting out
2986 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2987 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2988 mrsas_poll_wait_aen = 0;
2991 /* See if got an event notification */
2992 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
2993 mrsas_complete_aen(sc, cmd);
2995 mrsas_wakeup(sc, cmd);
2998 /* Command issued to abort another cmd return */
2999 mrsas_complete_abort(sc, cmd);
3002 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
3008 * mrsas_wakeup - Completes an internal command
3009 * input: Adapter soft state
3010 * Command to be completed
3012 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
3013 * a wait timer is started. This function is called from
3014 * mrsas_complete_mptmfi_passthru() as it completes the command,
3015 * to wake up from the command wait.
3017 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3019 cmd->cmd_status = cmd->frame->io.cmd_status;
3021 if (cmd->cmd_status == ECONNREFUSED)
3022 cmd->cmd_status = 0;
3024 /* For debug only ... */
3025 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3027 sc->chan = (void*)&cmd;
3028 wakeup_one((void *)&sc->chan);
3033 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3034 * input: Adapter soft state
3035 * Shutdown/Hibernate
3037 * This function issues a DCMD internal command to Firmware to initiate
3038 * shutdown of the controller.
3040 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3042 struct mrsas_mfi_cmd *cmd;
3043 struct mrsas_dcmd_frame *dcmd;
3045 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3048 cmd = mrsas_get_mfi_cmd(sc);
3050 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3055 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3057 if (sc->map_update_cmd)
3058 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3060 dcmd = &cmd->frame->dcmd;
3061 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3063 dcmd->cmd = MFI_CMD_DCMD;
3064 dcmd->cmd_status = 0x0;
3065 dcmd->sge_count = 0;
3066 dcmd->flags = MFI_FRAME_DIR_NONE;
3069 dcmd->data_xfer_len = 0;
3070 dcmd->opcode = opcode;
3072 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3074 mrsas_issue_blocked_cmd(sc, cmd);
3075 mrsas_release_mfi_cmd(cmd);
3081 * mrsas_flush_cache: Requests FW to flush all its caches
3082 * input: Adapter soft state
3084 * This function is issues a DCMD internal command to Firmware to initiate
3085 * flushing of all caches.
3087 static void mrsas_flush_cache(struct mrsas_softc *sc)
3089 struct mrsas_mfi_cmd *cmd;
3090 struct mrsas_dcmd_frame *dcmd;
3092 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3095 cmd = mrsas_get_mfi_cmd(sc);
3097 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3101 dcmd = &cmd->frame->dcmd;
3102 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3104 dcmd->cmd = MFI_CMD_DCMD;
3105 dcmd->cmd_status = 0x0;
3106 dcmd->sge_count = 0;
3107 dcmd->flags = MFI_FRAME_DIR_NONE;
3110 dcmd->data_xfer_len = 0;
3111 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3112 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3114 mrsas_issue_blocked_cmd(sc, cmd);
3115 mrsas_release_mfi_cmd(cmd);
3121 * mrsas_get_map_info: Load and validate RAID map
3122 * input: Adapter instance soft state
3124 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3125 * to load and validate RAID map. It returns 0 if successful, 1 other-
3128 static int mrsas_get_map_info(struct mrsas_softc *sc)
3130 uint8_t retcode = 0;
3132 sc->fast_path_io = 0;
3133 if (!mrsas_get_ld_map_info(sc)) {
3134 retcode = MR_ValidateMapInfo(sc);
3136 sc->fast_path_io = 1;
3144 * mrsas_get_ld_map_info: Get FW's ld_map structure
3145 * input: Adapter instance soft state
3147 * Issues an internal command (DCMD) to get the FW's controller PD
3150 static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3153 struct mrsas_mfi_cmd *cmd;
3154 struct mrsas_dcmd_frame *dcmd;
3155 MR_FW_RAID_MAP_ALL *map;
3156 bus_addr_t map_phys_addr = 0;
3158 cmd = mrsas_get_mfi_cmd(sc);
3160 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
3164 dcmd = &cmd->frame->dcmd;
3166 map = sc->raidmap_mem[(sc->map_id & 1)];
3167 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3169 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
3170 mrsas_release_mfi_cmd(cmd);
3173 memset(map, 0, sizeof(*map));
3174 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3176 dcmd->cmd = MFI_CMD_DCMD;
3177 dcmd->cmd_status = 0xFF;
3178 dcmd->sge_count = 1;
3179 dcmd->flags = MFI_FRAME_DIR_READ;
3182 dcmd->data_xfer_len = sc->map_sz;
3183 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3184 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3185 dcmd->sgl.sge32[0].length = sc->map_sz;
3186 if (!mrsas_issue_polled(sc, cmd))
3190 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
3193 mrsas_release_mfi_cmd(cmd);
3198 * mrsas_sync_map_info: Get FW's ld_map structure
3199 * input: Adapter instance soft state
3201 * Issues an internal command (DCMD) to get the FW's controller PD
3204 static int mrsas_sync_map_info(struct mrsas_softc *sc)
3207 struct mrsas_mfi_cmd *cmd;
3208 struct mrsas_dcmd_frame *dcmd;
3209 uint32_t size_sync_info, num_lds;
3210 MR_LD_TARGET_SYNC *target_map = NULL;
3211 MR_FW_RAID_MAP_ALL *map;
3213 MR_LD_TARGET_SYNC *ld_sync;
3214 bus_addr_t map_phys_addr = 0;
3216 cmd = mrsas_get_mfi_cmd(sc);
3218 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
3222 map = sc->raidmap_mem[sc->map_id & 1];
3223 num_lds = map->raidMap.ldCount;
3225 dcmd = &cmd->frame->dcmd;
3226 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3227 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3229 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3230 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
3232 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3234 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3236 for (i = 0; i < num_lds; i++, ld_sync++) {
3237 raid = MR_LdRaidGet(i, map);
3238 ld_sync->targetId = MR_GetLDTgtId(i, map);
3239 ld_sync->seqNum = raid->seqNum;
3242 dcmd->cmd = MFI_CMD_DCMD;
3243 dcmd->cmd_status = 0xFF;
3244 dcmd->sge_count = 1;
3245 dcmd->flags = MFI_FRAME_DIR_WRITE;
3248 dcmd->data_xfer_len = sc->map_sz;
3249 dcmd->mbox.b[0] = num_lds;
3250 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3251 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3252 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3253 dcmd->sgl.sge32[0].length = sc->map_sz;
3255 sc->map_update_cmd = cmd;
3256 if (mrsas_issue_dcmd(sc, cmd)) {
3257 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
3264 * mrsas_get_pd_list: Returns FW's PD list structure
3265 * input: Adapter soft state
3267 * Issues an internal command (DCMD) to get the FW's controller PD
3268 * list structure. This information is mainly used to find out about
3269 * system supported by Firmware.
3271 static int mrsas_get_pd_list(struct mrsas_softc *sc)
3273 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3274 struct mrsas_mfi_cmd *cmd;
3275 struct mrsas_dcmd_frame *dcmd;
3276 struct MR_PD_LIST *pd_list_mem;
3277 struct MR_PD_ADDRESS *pd_addr;
3278 bus_addr_t pd_list_phys_addr = 0;
3279 struct mrsas_tmp_dcmd *tcmd;
3281 cmd = mrsas_get_mfi_cmd(sc);
3283 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
3287 dcmd = &cmd->frame->dcmd;
3289 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3290 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3291 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3292 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
3293 mrsas_release_mfi_cmd(cmd);
3297 pd_list_mem = tcmd->tmp_dcmd_mem;
3298 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3300 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3302 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3303 dcmd->mbox.b[1] = 0;
3304 dcmd->cmd = MFI_CMD_DCMD;
3305 dcmd->cmd_status = 0xFF;
3306 dcmd->sge_count = 1;
3307 dcmd->flags = MFI_FRAME_DIR_READ;
3310 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3311 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3312 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3313 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3315 if (!mrsas_issue_polled(sc, cmd))
3320 /* Get the instance PD list */
3321 pd_count = MRSAS_MAX_PD;
3322 pd_addr = pd_list_mem->addr;
3323 if (retcode == 0 && pd_list_mem->count < pd_count) {
3324 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3325 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3326 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3327 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
3328 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
3333 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3334 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3335 mrsas_free_tmp_dcmd(tcmd);
3336 mrsas_release_mfi_cmd(cmd);
3337 kfree(tcmd, M_MRSAS);
3342 * mrsas_get_ld_list: Returns FW's LD list structure
3343 * input: Adapter soft state
3345 * Issues an internal command (DCMD) to get the FW's controller PD
3346 * list structure. This information is mainly used to find out about
3347 * supported by the FW.
3349 static int mrsas_get_ld_list(struct mrsas_softc *sc)
3351 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3352 struct mrsas_mfi_cmd *cmd;
3353 struct mrsas_dcmd_frame *dcmd;
3354 struct MR_LD_LIST *ld_list_mem;
3355 bus_addr_t ld_list_phys_addr = 0;
3356 struct mrsas_tmp_dcmd *tcmd;
3358 cmd = mrsas_get_mfi_cmd(sc);
3360 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
3364 dcmd = &cmd->frame->dcmd;
3366 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3367 ld_list_size = sizeof(struct MR_LD_LIST);
3368 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3369 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
3370 mrsas_release_mfi_cmd(cmd);
3374 ld_list_mem = tcmd->tmp_dcmd_mem;
3375 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3377 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3379 dcmd->cmd = MFI_CMD_DCMD;
3380 dcmd->cmd_status = 0xFF;
3381 dcmd->sge_count = 1;
3382 dcmd->flags = MFI_FRAME_DIR_READ;
3384 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3385 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3386 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3387 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3390 if (!mrsas_issue_polled(sc, cmd))
3395 /* Get the instance LD list */
3396 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
3397 sc->CurLdCount = ld_list_mem->ldCount;
3398 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
3399 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3400 if (ld_list_mem->ldList[ld_index].state != 0) {
3401 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3402 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3407 mrsas_free_tmp_dcmd(tcmd);
3408 mrsas_release_mfi_cmd(cmd);
3409 kfree(tcmd, M_MRSAS);
3414 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3415 * input: Adapter soft state
3419 * Allocates DMAable memory for a temporary internal command. The allocated
3420 * memory is initialized to all zeros upon successful loading of the dma
3423 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3426 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3427 1, 0, // algnmnt, boundary
3428 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3429 BUS_SPACE_MAXADDR, // highaddr
3430 NULL, NULL, // filter, filterarg
3434 BUS_DMA_ALLOCNOW, // flags
3435 &tcmd->tmp_dcmd_tag)) {
3436 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3439 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3440 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3441 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3444 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3445 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3446 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3447 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3451 memset(tcmd->tmp_dcmd_mem, 0, size);
3456 * mrsas_free_tmp_dcmd: Free memory for temporary command
3457 * input: temporary dcmd pointer
3459 * Deallocates memory of the temporary command for use in the construction
3460 * of the internal DCMD.
3462 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3464 if (tmp->tmp_dcmd_phys_addr)
3465 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3466 if (tmp->tmp_dcmd_mem != NULL)
3467 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3468 if (tmp->tmp_dcmd_tag != NULL)
3469 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3473 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3474 * input: Adapter soft state
3475 * Previously issued cmd to be aborted
3477 * This function is used to abort previously issued commands, such as AEN and
3478 * RAID map sync map commands. The abort command is sent as a DCMD internal
3479 * command and subsequently the driver will wait for a return status. The
3480 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3482 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3483 struct mrsas_mfi_cmd *cmd_to_abort)
3485 struct mrsas_mfi_cmd *cmd;
3486 struct mrsas_abort_frame *abort_fr;
3487 u_int8_t retcode = 0;
3488 unsigned long total_time = 0;
3489 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3491 cmd = mrsas_get_mfi_cmd(sc);
3493 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3497 abort_fr = &cmd->frame->abort;
3499 /* Prepare and issue the abort frame */
3500 abort_fr->cmd = MFI_CMD_ABORT;
3501 abort_fr->cmd_status = 0xFF;
3502 abort_fr->flags = 0;
3503 abort_fr->abort_context = cmd_to_abort->index;
3504 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3505 abort_fr->abort_mfi_phys_addr_hi = 0;
3508 cmd->cmd_status = 0xFF;
3510 if (mrsas_issue_dcmd(sc, cmd)) {
3511 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3515 /* Wait for this cmd to complete */
3516 sc->chan = (void*)&cmd;
3518 if (cmd->cmd_status == 0xFF){
3519 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3524 if (total_time >= max_wait) {
3525 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3532 mrsas_release_mfi_cmd(cmd);
3537 * mrsas_complete_abort: Completes aborting a command
3538 * input: Adapter soft state
3539 * Cmd that was issued to abort another cmd
3541 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3542 * to change after sending the command. This function is called from
3543 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3545 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3547 if (cmd->sync_cmd) {
3549 cmd->cmd_status = 0;
3550 sc->chan = (void*)&cmd;
3551 wakeup_one((void *)&sc->chan);
3557 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3558 * input: Adapter soft state
3561 void mrsas_aen_handler(struct mrsas_softc *sc)
3563 union mrsas_evt_class_locale class_locale;
3569 device_printf(sc->mrsas_dev, "invalid instance!\n");
3573 if (sc->evt_detail_mem) {
3574 switch (sc->evt_detail_mem->code) {
3575 case MR_EVT_PD_INSERTED:
3576 mrsas_get_pd_list(sc);
3577 mrsas_bus_scan_sim(sc, sc->sim_1);
3580 case MR_EVT_PD_REMOVED:
3581 mrsas_get_pd_list(sc);
3582 mrsas_bus_scan_sim(sc, sc->sim_1);
3585 case MR_EVT_LD_OFFLINE:
3586 case MR_EVT_CFG_CLEARED:
3587 case MR_EVT_LD_DELETED:
3588 mrsas_bus_scan_sim(sc, sc->sim_0);
3591 case MR_EVT_LD_CREATED:
3592 mrsas_get_ld_list(sc);
3593 mrsas_bus_scan_sim(sc, sc->sim_0);
3596 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3597 case MR_EVT_FOREIGN_CFG_IMPORTED:
3598 case MR_EVT_LD_STATE_CHANGE:
3606 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3610 mrsas_get_pd_list(sc);
3611 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3612 mrsas_bus_scan_sim(sc, sc->sim_1);
3613 mrsas_get_ld_list(sc);
3614 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3615 mrsas_bus_scan_sim(sc, sc->sim_0);
3618 seq_num = sc->evt_detail_mem->seq_num + 1;
3620 // Register AEN with FW for latest sequence number plus 1
3621 class_locale.members.reserved = 0;
3622 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3623 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3625 if (sc->aen_cmd != NULL )
3628 lockmgr(&sc->aen_lock, LK_EXCLUSIVE);
3629 error = mrsas_register_aen(sc, seq_num,
3631 lockmgr(&sc->aen_lock, LK_RELEASE);
3634 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3640 * mrsas_complete_aen: Completes AEN command
3641 * input: Adapter soft state
3642 * Cmd that was issued to abort another cmd
3644 * This function will be called from ISR and will continue
3645 * event processing from thread context by enqueuing task
3646 * in ev_tq (callback function "mrsas_aen_handler").
3648 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3651 * Don't signal app if it is just an aborted previously registered aen
3653 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3660 mrsas_release_mfi_cmd(cmd);
3662 if (!sc->remove_in_progress)
3663 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3668 static device_method_t mrsas_methods[] = {
3669 DEVMETHOD(device_probe, mrsas_probe),
3670 DEVMETHOD(device_attach, mrsas_attach),
3671 DEVMETHOD(device_detach, mrsas_detach),
3672 DEVMETHOD(device_suspend, mrsas_suspend),
3673 DEVMETHOD(device_resume, mrsas_resume),
3674 DEVMETHOD(bus_print_child, bus_generic_print_child),
3675 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3679 static driver_t mrsas_driver = {
3682 sizeof(struct mrsas_softc)
3685 static devclass_t mrsas_devclass;
3686 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, NULL, NULL);
3687 MODULE_VERSION(mrsas, 1);
3688 MODULE_DEPEND(mrsas, cam, 1, 1, 1);