2 * Copyright (c) 2014, LSI Corp.
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
42 * $FreeBSD: head/sys/dev/mrsas/mrsas.c 265555 2014-05-07 16:16:49Z ambrisko $
45 #include <dev/raid/mrsas/mrsas.h>
46 #include <dev/raid/mrsas/mrsas_ioctl.h>
48 #include <bus/cam/cam.h>
49 #include <bus/cam/cam_ccb.h>
51 #include <sys/sysctl.h>
52 #include <sys/types.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/device.h>
56 #include <sys/spinlock2.h>
62 static d_open_t mrsas_open;
63 static d_close_t mrsas_close;
64 static d_read_t mrsas_read;
65 static d_write_t mrsas_write;
66 static d_ioctl_t mrsas_ioctl;
68 static struct mrsas_ident *mrsas_find_ident(device_t);
69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70 static void mrsas_flush_cache(struct mrsas_softc *sc);
71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72 static void mrsas_ocr_thread(void *arg);
73 static int mrsas_get_map_info(struct mrsas_softc *sc);
74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75 static int mrsas_sync_map_info(struct mrsas_softc *sc);
76 static int mrsas_get_pd_list(struct mrsas_softc *sc);
77 static int mrsas_get_ld_list(struct mrsas_softc *sc);
78 static int mrsas_setup_irq(struct mrsas_softc *sc);
79 static int mrsas_alloc_mem(struct mrsas_softc *sc);
80 static int mrsas_init_fw(struct mrsas_softc *sc);
81 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82 static int mrsas_complete_cmd(struct mrsas_softc *sc);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85 struct mrsas_ctrl_info *ctrl_info);
86 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87 struct mrsas_mfi_cmd *cmd_to_abort);
88 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89 u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90 struct mrsas_mfi_cmd *mfi_cmd);
91 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92 int mrsas_init_adapter(struct mrsas_softc *sc);
93 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96 int mrsas_ioc_init(struct mrsas_softc *sc);
97 int mrsas_bus_scan(struct mrsas_softc *sc);
98 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100 int mrsas_reset_ctrl(struct mrsas_softc *sc);
101 int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *cmd);
104 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
106 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 void mrsas_disable_intr(struct mrsas_softc *sc);
111 void mrsas_enable_intr(struct mrsas_softc *sc);
112 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113 void mrsas_free_mem(struct mrsas_softc *sc);
114 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115 void mrsas_isr(void *arg);
116 void mrsas_teardown_intr(struct mrsas_softc *sc);
117 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118 void mrsas_kill_hba (struct mrsas_softc *sc);
119 void mrsas_aen_handler(struct mrsas_softc *sc);
120 void mrsas_write_reg(struct mrsas_softc *sc, int offset,
122 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123 u_int32_t req_desc_hi);
124 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125 void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126 struct mrsas_mfi_cmd *cmd, u_int8_t status);
127 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
129 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130 MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131 struct mrsas_mfi_cmd *cmd);
133 extern int mrsas_cam_attach(struct mrsas_softc *sc);
134 extern void mrsas_cam_detach(struct mrsas_softc *sc);
135 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
143 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
144 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145 extern void mrsas_xpt_release(struct mrsas_softc *sc);
146 extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
148 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
155 * PCI device struct and table
158 typedef struct mrsas_ident {
166 MRSAS_CTLR_ID device_table[] = {
167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
174 * Character device entry points
177 static struct dev_ops mrsas_ops = {
178 { "mrsas", 0, D_MPSAFE },
179 .d_open = mrsas_open,
180 .d_close = mrsas_close,
181 .d_read = mrsas_read,
182 .d_write = mrsas_write,
183 .d_ioctl = mrsas_ioctl,
186 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
188 static int mrsas_mfi_enable = 0;
189 TUNABLE_INT("hw.mrsas.mfi_enable", &mrsas_mfi_enable);
191 static int mrsas_msi_enable = 1;
192 TUNABLE_INT("hw.mrsas.msi.enable", &mrsas_msi_enable);
195 * In the cdevsw routines, we find our softc by using the si_drv1 member
196 * of struct cdev. We set this variable to point to our softc in our
197 * attach routine when we create the /dev entry.
200 mrsas_open(struct dev_open_args *ap)
202 cdev_t dev = ap->a_head.a_dev;
203 struct mrsas_softc *sc;
210 mrsas_close(struct dev_close_args *ap)
212 cdev_t dev = ap->a_head.a_dev;
213 struct mrsas_softc *sc;
220 mrsas_read(struct dev_read_args *ap)
222 cdev_t dev = ap->a_head.a_dev;
223 struct mrsas_softc *sc;
229 mrsas_write(struct dev_write_args *ap)
231 cdev_t dev = ap->a_head.a_dev;
232 struct mrsas_softc *sc;
239 * Register Read/Write Functions
243 mrsas_write_reg(struct mrsas_softc *sc, int offset,
246 bus_space_tag_t bus_tag = sc->bus_tag;
247 bus_space_handle_t bus_handle = sc->bus_handle;
249 bus_space_write_4(bus_tag, bus_handle, offset, value);
253 mrsas_read_reg(struct mrsas_softc *sc, int offset)
255 bus_space_tag_t bus_tag = sc->bus_tag;
256 bus_space_handle_t bus_handle = sc->bus_handle;
258 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
263 * Interrupt Disable/Enable/Clear Functions
266 void mrsas_disable_intr(struct mrsas_softc *sc)
268 u_int32_t mask = 0xFFFFFFFF;
271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
272 /* Dummy read to force pci flush */
273 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
276 void mrsas_enable_intr(struct mrsas_softc *sc)
278 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
284 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
285 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
288 static int mrsas_clear_intr(struct mrsas_softc *sc)
290 u_int32_t status, fw_status, fw_state;
292 /* Read received interrupt */
293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
295 /* If FW state change interrupt is received, write to it again to clear */
296 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
297 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
298 outbound_scratch_pad));
299 fw_state = fw_status & MFI_STATE_MASK;
300 if (fw_state == MFI_STATE_FAULT) {
301 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
302 if(sc->ocr_thread_active)
303 wakeup(&sc->ocr_chan);
305 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
306 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
310 /* Not our interrupt, so just return */
311 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
314 /* We got a reply interrupt */
319 * PCI Support Functions
322 static struct mrsas_ident * mrsas_find_ident(device_t dev)
324 struct mrsas_ident *pci_device;
326 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
328 if ((pci_device->vendor == pci_get_vendor(dev)) &&
329 (pci_device->device == pci_get_device(dev)) &&
330 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
331 (pci_device->subvendor == 0xffff)) &&
332 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
333 (pci_device->subdevice == 0xffff)))
339 static int mrsas_probe(device_t dev)
341 static u_int8_t first_ctrl = 1;
342 struct mrsas_ident *id;
344 if ((id = mrsas_find_ident(dev)) != NULL) {
345 /* give priority to mfi(4) if tunable set */
346 TUNABLE_INT_FETCH("hw.mrsas.mfi_enable", &mrsas_mfi_enable);
347 if ((id->device == MRSAS_TBOLT) && mrsas_mfi_enable) {
351 kprintf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n",
355 device_set_desc(dev, id->desc);
356 return (BUS_PROBE_DEFAULT);
363 * mrsas_setup_sysctl: setup sysctl values for mrsas
364 * input: Adapter instance soft state
366 * Setup sysctl entries for mrsas driver.
369 mrsas_setup_sysctl(struct mrsas_softc *sc)
371 struct sysctl_ctx_list *sysctl_ctx = NULL;
372 struct sysctl_oid *sysctl_tree = NULL;
373 char tmpstr[80], tmpstr2[80];
376 * Setup the sysctl variable so the user can change the debug level
379 ksnprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
380 device_get_unit(sc->mrsas_dev));
381 ksnprintf(tmpstr2, sizeof(tmpstr2), "mrsas%d", device_get_unit(sc->mrsas_dev));
383 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
384 if (sysctl_ctx != NULL)
385 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
387 if (sysctl_tree == NULL) {
388 sysctl_ctx_init(&sc->sysctl_ctx);
389 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
390 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, tmpstr2,
391 CTLFLAG_RD, 0, tmpstr);
392 if (sc->sysctl_tree == NULL)
394 sysctl_ctx = &sc->sysctl_ctx;
395 sysctl_tree = sc->sysctl_tree;
397 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
398 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
399 "Disable the use of OCR");
401 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
402 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
403 strlen(MRSAS_VERSION), "driver version");
405 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 OID_AUTO, "reset_count", CTLFLAG_RD,
407 &sc->reset_count, 0, "number of ocr from start of the day");
409 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
411 &sc->fw_outstanding, 0, "FW outstanding commands");
413 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
415 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
417 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
419 "Driver debug level");
421 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
423 0, "Driver IO timeout value in mili-second.");
425 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
427 &sc->mrsas_fw_fault_check_delay,
428 0, "FW fault check thread delay in seconds. <default is 1 sec>");
430 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
431 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
432 &sc->reset_in_progress, 0, "ocr in progress status");
437 * mrsas_get_tunables: get tunable parameters.
438 * input: Adapter instance soft state
440 * Get tunable parameters. This will help to debug driver at boot time.
443 mrsas_get_tunables(struct mrsas_softc *sc)
447 /* XXX default to some debugging for now */
448 sc->mrsas_debug = MRSAS_FAULT;
449 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
450 sc->mrsas_fw_fault_check_delay = 1;
452 sc->reset_in_progress = 0;
455 * Grab the global variables.
457 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
459 /* Grab the unit-instance variables */
460 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
461 device_get_unit(sc->mrsas_dev));
462 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
466 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
467 * Used to get sequence number at driver load time.
468 * input: Adapter soft state
470 * Allocates DMAable memory for the event log info internal command.
472 int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
476 /* Allocate get event log info command */
477 el_info_size = sizeof(struct mrsas_evt_log_info);
478 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
479 1, 0, // algnmnt, boundary
480 BUS_SPACE_MAXADDR_32BIT,// lowaddr
481 BUS_SPACE_MAXADDR, // highaddr
482 NULL, NULL, // filter, filterarg
483 el_info_size, // maxsize
485 el_info_size, // maxsegsize
486 BUS_DMA_ALLOCNOW, // flags
488 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
491 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
492 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
493 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
496 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
497 sc->el_info_mem, el_info_size, mrsas_addr_cb,
498 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
499 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
503 memset(sc->el_info_mem, 0, el_info_size);
508 * mrsas_free_evt_info_cmd: Free memory for Event log info command
509 * input: Adapter soft state
511 * Deallocates memory for the event log info internal command.
513 void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
515 if (sc->el_info_phys_addr)
516 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
517 if (sc->el_info_mem != NULL)
518 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
519 if (sc->el_info_tag != NULL)
520 bus_dma_tag_destroy(sc->el_info_tag);
524 * mrsas_get_seq_num: Get latest event sequence number
525 * @sc: Adapter soft state
526 * @eli: Firmware event log sequence number information.
527 * Firmware maintains a log of all events in a non-volatile area.
528 * Driver get the sequence number using DCMD
529 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
533 mrsas_get_seq_num(struct mrsas_softc *sc,
534 struct mrsas_evt_log_info *eli)
536 struct mrsas_mfi_cmd *cmd;
537 struct mrsas_dcmd_frame *dcmd;
539 cmd = mrsas_get_mfi_cmd(sc);
542 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
546 dcmd = &cmd->frame->dcmd;
548 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
549 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
550 mrsas_release_mfi_cmd(cmd);
554 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
556 dcmd->cmd = MFI_CMD_DCMD;
557 dcmd->cmd_status = 0x0;
559 dcmd->flags = MFI_FRAME_DIR_READ;
562 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
563 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
564 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
565 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
567 mrsas_issue_blocked_cmd(sc, cmd);
570 * Copy the data back into callers buffer
572 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
573 mrsas_free_evt_log_info_cmd(sc);
574 mrsas_release_mfi_cmd(cmd);
581 * mrsas_register_aen: Register for asynchronous event notification
582 * @sc: Adapter soft state
583 * @seq_num: Starting sequence number
584 * @class_locale: Class of the event
585 * This function subscribes for events beyond the @seq_num
586 * and type @class_locale.
590 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
591 u_int32_t class_locale_word)
594 struct mrsas_mfi_cmd *cmd;
595 struct mrsas_dcmd_frame *dcmd;
596 union mrsas_evt_class_locale curr_aen;
597 union mrsas_evt_class_locale prev_aen;
600 * If there an AEN pending already (aen_cmd), check if the
601 * class_locale of that pending AEN is inclusive of the new
602 * AEN request we currently have. If it is, then we don't have
603 * to do anything. In other words, whichever events the current
604 * AEN request is subscribing to, have already been subscribed
606 * If the old_cmd is _not_ inclusive, then we have to abort
607 * that command, form a class_locale that is superset of both
608 * old and current and re-issue to the FW
611 curr_aen.word = class_locale_word;
615 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
618 * A class whose enum value is smaller is inclusive of all
619 * higher values. If a PROGRESS (= -1) was previously
620 * registered, then a new registration requests for higher
621 * classes need not be sent to FW. They are automatically
623 * Locale numbers don't have such hierarchy. They are bitmap values
625 if ((prev_aen.members.class <= curr_aen.members.class) &&
626 !((prev_aen.members.locale & curr_aen.members.locale) ^
627 curr_aen.members.locale)) {
629 * Previously issued event registration includes
630 * current request. Nothing to do.
634 curr_aen.members.locale |= prev_aen.members.locale;
636 if (prev_aen.members.class < curr_aen.members.class)
637 curr_aen.members.class = prev_aen.members.class;
639 sc->aen_cmd->abort_aen = 1;
640 ret_val = mrsas_issue_blocked_abort_cmd(sc,
644 kprintf("mrsas: Failed to abort "
645 "previous AEN command\n");
651 cmd = mrsas_get_mfi_cmd(sc);
656 dcmd = &cmd->frame->dcmd;
658 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
661 * Prepare DCMD for aen registration
663 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
665 dcmd->cmd = MFI_CMD_DCMD;
666 dcmd->cmd_status = 0x0;
668 dcmd->flags = MFI_FRAME_DIR_READ;
671 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
672 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
673 dcmd->mbox.w[0] = seq_num;
674 sc->last_seq_num = seq_num;
675 dcmd->mbox.w[1] = curr_aen.word;
676 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
677 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
679 if (sc->aen_cmd != NULL) {
680 mrsas_release_mfi_cmd(cmd);
685 * Store reference to the cmd used to register for AEN. When an
686 * application wants us to register for AEN, we have to abort this
687 * cmd and re-register with a new EVENT LOCALE supplied by that app
692 Issue the aen registration frame
694 if (mrsas_issue_dcmd(sc, cmd)){
695 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
702 * mrsas_start_aen - Subscribes to AEN during driver load time
703 * @instance: Adapter soft state
705 static int mrsas_start_aen(struct mrsas_softc *sc)
707 struct mrsas_evt_log_info eli;
708 union mrsas_evt_class_locale class_locale;
711 /* Get the latest sequence number from FW*/
713 memset(&eli, 0, sizeof(eli));
715 if (mrsas_get_seq_num(sc, &eli))
718 /* Register AEN with FW for latest sequence number plus 1*/
719 class_locale.members.reserved = 0;
720 class_locale.members.locale = MR_EVT_LOCALE_ALL;
721 class_locale.members.class = MR_EVT_CLASS_DEBUG;
723 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
728 * mrsas_attach: PCI entry point
729 * input: device struct pointer
731 * Performs setup of PCI and registers, initializes mutexes and
732 * linked lists, registers interrupts and CAM, and initializes
733 * the adapter/controller to its proper state.
735 static int mrsas_attach(device_t dev)
737 struct mrsas_softc *sc = device_get_softc(dev);
738 uint32_t cmd, bar, error;
740 /* Look up our softc and initialize its fields. */
742 sc->device_id = pci_get_device(dev);
744 mrsas_get_tunables(sc);
747 * Set up PCI and registers
749 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
750 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
753 /* Force the busmaster enable bit on. */
754 cmd |= PCIM_CMD_BUSMASTEREN;
755 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
757 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
758 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
760 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
761 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
762 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
764 device_printf(dev, "Cannot allocate PCI registers\n");
767 sc->bus_tag = rman_get_bustag(sc->reg_res);
768 sc->bus_handle = rman_get_bushandle(sc->reg_res);
770 /* Intialize mutexes */
771 lockinit(&sc->sim_lock, "mrsas_sim_lock", 0, LK_CANRECURSE);
772 lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE);
773 lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE);
774 lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE);
775 spin_init(&sc->ioctl_lock, "mrsasioctl");
776 lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0,
778 lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0,
780 lockinit(&sc->raidmap_lock, "mrsas_raidmap_lock", 0, LK_CANRECURSE);
782 /* Intialize linked list */
783 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
784 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
786 atomic_set(&sc->fw_outstanding,0);
788 sc->io_cmds_highwater = 0;
790 /* Create a /dev entry for this device. */
791 sc->mrsas_cdev = make_dev(&mrsas_ops, device_get_unit(dev), UID_ROOT,
792 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
793 device_get_unit(dev));
795 sc->mrsas_cdev->si_drv1 = sc;
797 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
798 sc->UnevenSpanSupport = 0;
800 /* Initialize Firmware */
801 if (mrsas_init_fw(sc) != SUCCESS) {
805 /* Register SCSI mid-layer */
806 if ((mrsas_cam_attach(sc) != SUCCESS)) {
807 goto attach_fail_cam;
811 if (mrsas_setup_irq(sc) != SUCCESS) {
812 goto attach_fail_irq;
815 /* Enable Interrupts */
816 mrsas_enable_intr(sc);
818 error = kthread_create(mrsas_ocr_thread, sc, &sc->ocr_thread, "mrsas_ocr%d",
819 device_get_unit(sc->mrsas_dev));
821 kprintf("Error %d starting rescan thread\n", error);
822 goto attach_fail_irq;
825 mrsas_setup_sysctl(sc);
827 /* Initiate AEN (Asynchronous Event Notification)*/
829 if (mrsas_start_aen(sc)) {
830 kprintf("Error: start aen failed\n");
838 mrsas_teardown_intr(sc);
840 mrsas_cam_detach(sc);
842 //attach_fail_raidmap:
844 lockuninit(&sc->sim_lock);
845 lockuninit(&sc->aen_lock);
846 lockuninit(&sc->pci_lock);
847 lockuninit(&sc->io_lock);
848 spin_uninit(&sc->ioctl_lock);
849 lockuninit(&sc->mpt_cmd_pool_lock);
850 lockuninit(&sc->mfi_cmd_pool_lock);
851 lockuninit(&sc->raidmap_lock);
853 destroy_dev(sc->mrsas_cdev);
855 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
856 sc->reg_res_id, sc->reg_res);
862 * mrsas_detach: De-allocates and teardown resources
863 * input: device struct pointer
865 * This function is the entry point for device disconnect and detach. It
866 * performs memory de-allocations, shutdown of the controller and various
867 * teardown and destroy resource functions.
869 static int mrsas_detach(device_t dev)
871 struct mrsas_softc *sc;
874 sc = device_get_softc(dev);
875 sc->remove_in_progress = 1;
876 if(sc->ocr_thread_active)
877 wakeup(&sc->ocr_chan);
878 while(sc->reset_in_progress){
880 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
881 mrsas_dprint(sc, MRSAS_INFO,
882 "[%2d]waiting for ocr to be finished\n",i);
884 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
887 while(sc->ocr_thread_active){
889 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
890 mrsas_dprint(sc, MRSAS_INFO,
892 "mrsas_ocr thread to quit ocr %d\n",i,
893 sc->ocr_thread_active);
895 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
897 mrsas_flush_cache(sc);
898 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
899 mrsas_disable_intr(sc);
900 mrsas_cam_detach(sc);
901 mrsas_teardown_intr(sc);
903 lockuninit(&sc->sim_lock);
904 lockuninit(&sc->aen_lock);
905 lockuninit(&sc->pci_lock);
906 lockuninit(&sc->io_lock);
907 spin_uninit(&sc->ioctl_lock);
908 lockuninit(&sc->mpt_cmd_pool_lock);
909 lockuninit(&sc->mfi_cmd_pool_lock);
910 lockuninit(&sc->raidmap_lock);
912 bus_release_resource(sc->mrsas_dev,
913 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
915 destroy_dev(sc->mrsas_cdev);
916 if (sc->sysctl_tree != NULL)
917 sysctl_ctx_free(&sc->sysctl_ctx);
922 * mrsas_free_mem: Frees allocated memory
923 * input: Adapter instance soft state
925 * This function is called from mrsas_detach() to free previously allocated
928 void mrsas_free_mem(struct mrsas_softc *sc)
932 struct mrsas_mfi_cmd *mfi_cmd;
933 struct mrsas_mpt_cmd *mpt_cmd;
936 * Free RAID map memory
938 for (i=0; i < 2; i++)
940 if (sc->raidmap_phys_addr[i])
941 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
942 if (sc->raidmap_mem[i] != NULL)
943 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
944 if (sc->raidmap_tag[i] != NULL)
945 bus_dma_tag_destroy(sc->raidmap_tag[i]);
949 * Free version buffer memroy
951 if (sc->verbuf_phys_addr)
952 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
953 if (sc->verbuf_mem != NULL)
954 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
955 if (sc->verbuf_tag != NULL)
956 bus_dma_tag_destroy(sc->verbuf_tag);
960 * Free sense buffer memory
962 if (sc->sense_phys_addr)
963 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
964 if (sc->sense_mem != NULL)
965 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
966 if (sc->sense_tag != NULL)
967 bus_dma_tag_destroy(sc->sense_tag);
970 * Free chain frame memory
972 if (sc->chain_frame_phys_addr)
973 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
974 if (sc->chain_frame_mem != NULL)
975 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
976 if (sc->chain_frame_tag != NULL)
977 bus_dma_tag_destroy(sc->chain_frame_tag);
980 * Free IO Request memory
982 if (sc->io_request_phys_addr)
983 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
984 if (sc->io_request_mem != NULL)
985 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
986 if (sc->io_request_tag != NULL)
987 bus_dma_tag_destroy(sc->io_request_tag);
990 * Free Reply Descriptor memory
992 if (sc->reply_desc_phys_addr)
993 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
994 if (sc->reply_desc_mem != NULL)
995 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
996 if (sc->reply_desc_tag != NULL)
997 bus_dma_tag_destroy(sc->reply_desc_tag);
1000 * Free event detail memory
1002 if (sc->evt_detail_phys_addr)
1003 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1004 if (sc->evt_detail_mem != NULL)
1005 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1006 if (sc->evt_detail_tag != NULL)
1007 bus_dma_tag_destroy(sc->evt_detail_tag);
1012 if (sc->mfi_cmd_list) {
1013 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1014 mfi_cmd = sc->mfi_cmd_list[i];
1015 mrsas_free_frame(sc, mfi_cmd);
1018 if (sc->mficmd_frame_tag != NULL)
1019 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1022 * Free MPT internal command list
1024 max_cmd = sc->max_fw_cmds;
1025 if (sc->mpt_cmd_list) {
1026 for (i = 0; i < max_cmd; i++) {
1027 mpt_cmd = sc->mpt_cmd_list[i];
1028 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1029 kfree(sc->mpt_cmd_list[i], M_MRSAS);
1031 kfree(sc->mpt_cmd_list, M_MRSAS);
1032 sc->mpt_cmd_list = NULL;
1036 * Free MFI internal command list
1039 if (sc->mfi_cmd_list) {
1040 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1041 kfree(sc->mfi_cmd_list[i], M_MRSAS);
1043 kfree(sc->mfi_cmd_list, M_MRSAS);
1044 sc->mfi_cmd_list = NULL;
1048 * Free request descriptor memory
1050 kfree(sc->req_desc, M_MRSAS);
1051 sc->req_desc = NULL;
1054 * Destroy parent tag
1056 if (sc->mrsas_parent_tag != NULL)
1057 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1061 * mrsas_teardown_intr: Teardown interrupt
1062 * input: Adapter instance soft state
1064 * This function is called from mrsas_detach() to teardown and release
1065 * bus interrupt resourse.
1067 void mrsas_teardown_intr(struct mrsas_softc *sc)
1069 if (sc->intr_handle)
1070 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1071 if (sc->mrsas_irq != NULL)
1072 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1073 if (sc->irq_type == PCI_INTR_TYPE_MSI)
1074 pci_release_msi(sc->mrsas_dev);
1075 sc->intr_handle = NULL;
1079 * mrsas_suspend: Suspend entry point
1080 * input: Device struct pointer
1082 * This function is the entry point for system suspend from the OS.
1084 static int mrsas_suspend(device_t dev)
1086 struct mrsas_softc *sc;
1088 sc = device_get_softc(dev);
1093 * mrsas_resume: Resume entry point
1094 * input: Device struct pointer
1096 * This function is the entry point for system resume from the OS.
1098 static int mrsas_resume(device_t dev)
1100 struct mrsas_softc *sc;
1102 sc = device_get_softc(dev);
1107 * mrsas_ioctl: IOCtl commands entry point.
1109 * This function is the entry point for IOCtls from the OS. It calls the
1110 * appropriate function for processing depending on the command received.
1113 mrsas_ioctl(struct dev_ioctl_args *ap)
1115 cdev_t dev = ap->a_head.a_dev;
1116 u_long cmd = ap->a_cmd;
1117 caddr_t arg = ap->a_data;
1118 struct mrsas_softc *sc;
1121 sc = (struct mrsas_softc *)(dev->si_drv1);
1123 if (sc->remove_in_progress) {
1124 mrsas_dprint(sc, MRSAS_INFO,
1125 "Driver remove or shutdown called.\n");
1129 spin_lock(&sc->ioctl_lock);
1130 if (!sc->reset_in_progress) {
1131 spin_unlock(&sc->ioctl_lock);
1135 /* Release ioclt_lock, and wait for OCR
1137 spin_unlock(&sc->ioctl_lock);
1138 while(sc->reset_in_progress){
1140 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1141 mrsas_dprint(sc, MRSAS_INFO,
1143 "OCR to be finished %d\n",i,
1144 sc->ocr_thread_active);
1146 tsleep(mrsas_ioctl, 0, "mr_ioctl", hz);
1151 case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1152 ret = mrsas_passthru(sc, (void *)arg);
1154 case MRSAS_IOC_SCAN_BUS:
1155 ret = mrsas_bus_scan(sc);
1163 * mrsas_setup_irq: Set up interrupt.
1164 * input: Adapter instance soft state
1166 * This function sets up interrupts as a bus resource, with flags indicating
1167 * resource permitting contemporaneous sharing and for resource to activate
1170 static int mrsas_setup_irq(struct mrsas_softc *sc)
1175 sc->irq_type = pci_alloc_1intr(sc->mrsas_dev, mrsas_msi_enable,
1176 &sc->irq_id, &irq_flags);
1178 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1179 &sc->irq_id, irq_flags);
1180 if (sc->mrsas_irq == NULL){
1181 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1184 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE,
1185 mrsas_isr, sc, &sc->intr_handle, NULL)) {
1186 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1194 * mrsas_isr: ISR entry point
1195 * input: argument pointer
1197 * This function is the interrupt service routine entry point. There
1198 * are two types of interrupts, state change interrupt and response
1199 * interrupt. If an interrupt is not ours, we just return.
1201 void mrsas_isr(void *arg)
1203 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1206 /* Clear FW state change interrupt */
1207 status = mrsas_clear_intr(sc);
1209 /* Not our interrupt */
1213 /* If we are resetting, bail */
1214 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1215 kprintf(" Entered into ISR when OCR is going active. \n");
1216 mrsas_clear_intr(sc);
1219 /* Process for reply request and clear response interrupt */
1220 if (mrsas_complete_cmd(sc) != SUCCESS)
1221 mrsas_clear_intr(sc);
1227 * mrsas_complete_cmd: Process reply request
1228 * input: Adapter instance soft state
1230 * This function is called from mrsas_isr() to process reply request and
1231 * clear response interrupt. Processing of the reply request entails
1232 * walking through the reply descriptor array for the command request
1233 * pended from Firmware. We look at the Function field to determine
1234 * the command type and perform the appropriate action. Before we
1235 * return, we clear the response interrupt.
1237 static int mrsas_complete_cmd(struct mrsas_softc *sc)
1239 Mpi2ReplyDescriptorsUnion_t *desc;
1240 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1241 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1242 struct mrsas_mpt_cmd *cmd_mpt;
1243 struct mrsas_mfi_cmd *cmd_mfi;
1244 u_int8_t arm, reply_descript_type;
1245 u_int16_t smid, num_completed;
1246 u_int8_t status, extStatus;
1247 union desc_value desc_val;
1248 PLD_LOAD_BALANCE_INFO lbinfo;
1249 u_int32_t device_id;
1250 int threshold_reply_count = 0;
1253 /* If we have a hardware error, not need to continue */
1254 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1257 desc = sc->reply_desc_mem;
1258 desc += sc->last_reply_idx;
1260 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1262 desc_val.word = desc->Words;
1265 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1267 /* Find our reply descriptor for the command and process */
1268 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1270 smid = reply_desc->SMID;
1271 cmd_mpt = sc->mpt_cmd_list[smid -1];
1272 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1274 status = scsi_io_req->RaidContext.status;
1275 extStatus = scsi_io_req->RaidContext.exStatus;
1277 switch (scsi_io_req->Function)
1279 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1280 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1281 lbinfo = &sc->load_balance_info[device_id];
1282 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1283 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1284 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1285 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1287 //Fall thru and complete IO
1288 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1289 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1290 mrsas_cmd_done(sc, cmd_mpt);
1291 scsi_io_req->RaidContext.status = 0;
1292 scsi_io_req->RaidContext.exStatus = 0;
1293 atomic_dec(&sc->fw_outstanding);
1295 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1296 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1297 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1299 mrsas_release_mpt_cmd(cmd_mpt);
1303 sc->last_reply_idx++;
1304 if (sc->last_reply_idx >= sc->reply_q_depth)
1305 sc->last_reply_idx = 0;
1307 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1309 threshold_reply_count++;
1311 /* Get the next reply descriptor */
1312 if (!sc->last_reply_idx)
1313 desc = sc->reply_desc_mem;
1317 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1318 desc_val.word = desc->Words;
1320 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1322 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1326 * Write to reply post index after completing threshold reply count
1327 * and still there are more replies in reply queue pending to be
1330 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1331 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1332 sc->last_reply_idx);
1333 threshold_reply_count = 0;
1337 /* No match, just return */
1338 if (num_completed == 0)
1341 /* Clear response interrupt */
1342 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1348 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1349 * input: Adapter instance soft state
1351 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1352 * It checks the command status and maps the appropriate CAM status for the CCB.
1354 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1356 struct mrsas_softc *sc = cmd->sc;
1357 u_int8_t *sense_data;
1361 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1363 case MFI_STAT_SCSI_IO_FAILED:
1364 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1365 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1366 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1368 /* For now just copy 18 bytes back */
1369 memcpy(sense_data, cmd->sense, 18);
1370 cmd->ccb_ptr->csio.sense_len = 18;
1371 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1374 case MFI_STAT_LD_OFFLINE:
1375 case MFI_STAT_DEVICE_NOT_FOUND:
1376 if (cmd->ccb_ptr->ccb_h.target_lun)
1377 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1379 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1381 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1382 /*send status to CAM layer to retry sending command without
1383 * decrementing retry counter*/
1384 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1387 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1388 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1389 cmd->ccb_ptr->csio.scsi_status = status;
1395 * mrsas_alloc_mem: Allocate DMAable memory.
1396 * input: Adapter instance soft state
1398 * This function creates the parent DMA tag and allocates DMAable memory.
1399 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1400 * into Kernel virtual address. Callback argument is physical memory address.
1402 static int mrsas_alloc_mem(struct mrsas_softc *sc)
1404 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1405 chain_frame_size, evt_detail_size;
1408 * Allocate parent DMA tag
1410 if (bus_dma_tag_create(NULL, /* parent */
1413 BUS_SPACE_MAXADDR, /* lowaddr */
1414 BUS_SPACE_MAXADDR, /* highaddr */
1415 NULL, NULL, /* filter, filterarg */
1416 MRSAS_MAX_IO_SIZE,/* maxsize */
1417 MRSAS_MAX_SGL, /* nsegments */
1418 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1420 &sc->mrsas_parent_tag /* tag */
1422 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1427 * Allocate for version buffer
1429 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1430 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1431 1, 0, // algnmnt, boundary
1432 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1433 BUS_SPACE_MAXADDR, // highaddr
1434 NULL, NULL, // filter, filterarg
1435 verbuf_size, // maxsize
1437 verbuf_size, // maxsegsize
1438 BUS_DMA_ALLOCNOW, // flags
1440 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1443 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1444 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1445 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1448 bzero(sc->verbuf_mem, verbuf_size);
1449 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1450 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1451 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1456 * Allocate IO Request Frames
1458 io_req_size = sc->io_frames_alloc_sz;
1459 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1460 16, 0, // algnmnt, boundary
1461 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1462 BUS_SPACE_MAXADDR, // highaddr
1463 NULL, NULL, // filter, filterarg
1464 io_req_size, // maxsize
1466 io_req_size, // maxsegsize
1467 BUS_DMA_ALLOCNOW, // flags
1468 &sc->io_request_tag)) {
1469 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1472 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1473 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1474 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1477 bzero(sc->io_request_mem, io_req_size);
1478 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1479 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1480 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1481 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1486 * Allocate Chain Frames
1488 chain_frame_size = sc->chain_frames_alloc_sz;
1489 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1490 4, 0, // algnmnt, boundary
1491 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1492 BUS_SPACE_MAXADDR, // highaddr
1493 NULL, NULL, // filter, filterarg
1494 chain_frame_size, // maxsize
1496 chain_frame_size, // maxsegsize
1497 BUS_DMA_ALLOCNOW, // flags
1498 &sc->chain_frame_tag)) {
1499 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1502 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1503 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1504 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1507 bzero(sc->chain_frame_mem, chain_frame_size);
1508 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1509 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1510 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1511 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1516 * Allocate Reply Descriptor Array
1518 reply_desc_size = sc->reply_alloc_sz;
1519 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1520 16, 0, // algnmnt, boundary
1521 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1522 BUS_SPACE_MAXADDR, // highaddr
1523 NULL, NULL, // filter, filterarg
1524 reply_desc_size, // maxsize
1526 reply_desc_size, // maxsegsize
1527 BUS_DMA_ALLOCNOW, // flags
1528 &sc->reply_desc_tag)) {
1529 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1532 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1533 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1534 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1537 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1538 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1539 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1540 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1545 * Allocate Sense Buffer Array. Keep in lower 4GB
1547 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1548 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1549 64, 0, // algnmnt, boundary
1550 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1551 BUS_SPACE_MAXADDR, // highaddr
1552 NULL, NULL, // filter, filterarg
1553 sense_size, // maxsize
1555 sense_size, // maxsegsize
1556 BUS_DMA_ALLOCNOW, // flags
1558 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1561 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1562 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1563 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1566 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1567 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1569 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1574 * Allocate for Event detail structure
1576 evt_detail_size = sizeof(struct mrsas_evt_detail);
1577 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1578 1, 0, // algnmnt, boundary
1579 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1580 BUS_SPACE_MAXADDR, // highaddr
1581 NULL, NULL, // filter, filterarg
1582 evt_detail_size, // maxsize
1584 evt_detail_size, // maxsegsize
1585 BUS_DMA_ALLOCNOW, // flags
1586 &sc->evt_detail_tag)) {
1587 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1590 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1591 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1592 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1595 bzero(sc->evt_detail_mem, evt_detail_size);
1596 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1597 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1598 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1599 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1605 * Create a dma tag for data buffers; size will be the maximum
1606 * possible I/O size (280kB).
1608 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1611 BUS_SPACE_MAXADDR, // lowaddr
1612 BUS_SPACE_MAXADDR, // highaddr
1613 NULL, NULL, // filter, filterarg
1614 MRSAS_MAX_IO_SIZE, // maxsize
1615 MRSAS_MAX_SGL, // nsegments
1616 MRSAS_MAX_IO_SIZE, // maxsegsize
1617 BUS_DMA_ALLOCNOW, // flags
1619 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1627 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1628 * input: callback argument,
1629 * machine dependent type that describes DMA segments,
1630 * number of segments,
1633 * This function is for the driver to receive mapping information resultant
1634 * of the bus_dmamap_load(). The information is actually not being used,
1635 * but the address is saved anyway.
1638 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1643 *addr = segs[0].ds_addr;
1647 * mrsas_setup_raidmap: Set up RAID map.
1648 * input: Adapter instance soft state
1650 * Allocate DMA memory for the RAID maps and perform setup.
1652 static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1654 sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1655 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1657 for (int i=0; i < 2; i++)
1659 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1660 4, 0, // algnmnt, boundary
1661 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1662 BUS_SPACE_MAXADDR, // highaddr
1663 NULL, NULL, // filter, filterarg
1664 sc->map_sz, // maxsize
1666 sc->map_sz, // maxsegsize
1667 BUS_DMA_ALLOCNOW, // flags
1668 &sc->raidmap_tag[i])) {
1669 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
1672 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
1673 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1674 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
1677 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1678 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1680 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1683 if (!sc->raidmap_mem[i]) {
1684 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
1689 if (!mrsas_get_map_info(sc))
1690 mrsas_sync_map_info(sc);
1696 * mrsas_init_fw: Initialize Firmware
1697 * input: Adapter soft state
1699 * Calls transition_to_ready() to make sure Firmware is in operational
1700 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1701 * Firmware. It issues internal commands to get the controller info
1702 * after the IOC_INIT command response is received by Firmware.
1703 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1704 * are currently not being used, it is left here as placeholder.
1706 static int mrsas_init_fw(struct mrsas_softc *sc)
1708 u_int32_t max_sectors_1;
1709 u_int32_t max_sectors_2;
1710 u_int32_t tmp_sectors;
1711 struct mrsas_ctrl_info *ctrl_info;
1716 /* Make sure Firmware is ready */
1717 ret = mrsas_transition_to_ready(sc, ocr);
1718 if (ret != SUCCESS) {
1722 /* Get operational params, sge flags, send init cmd to ctlr */
1723 if (mrsas_init_adapter(sc) != SUCCESS){
1724 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1728 /* Allocate internal commands for pass-thru */
1729 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1730 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1734 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1735 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1739 /* For pass-thru, get PD/LD list and controller info */
1740 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1741 mrsas_get_pd_list(sc);
1743 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1744 mrsas_get_ld_list(sc);
1746 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
1748 ctrl_info = kmalloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1751 * Compute the max allowed sectors per IO: The controller info has two
1752 * limits on max sectors. Driver should use the minimum of these two.
1754 * 1 << stripe_sz_ops.min = max sectors per strip
1756 * Note that older firmwares ( < FW ver 30) didn't report information
1757 * to calculate max_sectors_1. So the number ended up as zero always.
1760 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
1761 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1762 ctrl_info->max_strips_per_io;
1763 max_sectors_2 = ctrl_info->max_request_size;
1764 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1765 sc->disableOnlineCtrlReset =
1766 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1767 sc->UnevenSpanSupport =
1768 ctrl_info->adapterOperations2.supportUnevenSpans;
1769 if(sc->UnevenSpanSupport) {
1770 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
1771 sc->UnevenSpanSupport);
1772 if (MR_ValidateMapInfo(sc))
1773 sc->fast_path_io = 1;
1775 sc->fast_path_io = 0;
1779 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1781 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1782 sc->max_sectors_per_req = tmp_sectors;
1785 kfree(ctrl_info, M_MRSAS);
1791 * mrsas_init_adapter: Initializes the adapter/controller
1792 * input: Adapter soft state
1794 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1795 * ROC/controller. The FW register is read to determined the number of
1796 * commands that is supported. All memory allocations for IO is based on
1797 * max_cmd. Appropriate calculations are performed in this function.
1799 int mrsas_init_adapter(struct mrsas_softc *sc)
1805 /* Read FW status register */
1806 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1808 /* Get operational params from status register */
1809 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1811 /* Decrement the max supported by 1, to correlate with FW */
1812 sc->max_fw_cmds = sc->max_fw_cmds-1;
1813 max_cmd = sc->max_fw_cmds;
1815 /* Determine allocation size of command frames */
1816 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1817 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1818 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1819 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1820 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1821 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1822 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1824 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1825 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1827 /* Used for pass thru MFI frame (DCMD) */
1828 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1830 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1831 sizeof(MPI2_SGE_IO_UNION))/16;
1833 sc->last_reply_idx = 0;
1835 ret = mrsas_alloc_mem(sc);
1839 ret = mrsas_alloc_mpt_cmds(sc);
1843 ret = mrsas_ioc_init(sc);
1852 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1853 * input: Adapter soft state
1855 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1857 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1861 /* Allocate IOC INIT command */
1862 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1863 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1864 1, 0, // algnmnt, boundary
1865 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1866 BUS_SPACE_MAXADDR, // highaddr
1867 NULL, NULL, // filter, filterarg
1868 ioc_init_size, // maxsize
1870 ioc_init_size, // maxsegsize
1871 BUS_DMA_ALLOCNOW, // flags
1872 &sc->ioc_init_tag)) {
1873 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1876 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1877 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1878 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1881 bzero(sc->ioc_init_mem, ioc_init_size);
1882 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1883 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1884 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1885 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1893 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
1894 * input: Adapter soft state
1896 * Deallocates memory of the IOC Init cmd.
1898 void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1900 if (sc->ioc_init_phys_mem)
1901 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1902 if (sc->ioc_init_mem != NULL)
1903 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1904 if (sc->ioc_init_tag != NULL)
1905 bus_dma_tag_destroy(sc->ioc_init_tag);
1909 * mrsas_ioc_init: Sends IOC Init command to FW
1910 * input: Adapter soft state
1912 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1914 int mrsas_ioc_init(struct mrsas_softc *sc)
1916 struct mrsas_init_frame *init_frame;
1917 pMpi2IOCInitRequest_t IOCInitMsg;
1918 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1919 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1920 bus_addr_t phys_addr;
1923 /* Allocate memory for the IOC INIT command */
1924 if (mrsas_alloc_ioc_cmd(sc)) {
1925 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1929 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1930 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1931 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1932 IOCInitMsg->MsgVersion = MPI2_VERSION;
1933 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1934 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1935 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1936 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1937 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1939 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1940 init_frame->cmd = MFI_CMD_INIT;
1941 init_frame->cmd_status = 0xFF;
1942 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1944 if (sc->verbuf_mem) {
1945 ksnprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1947 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
1948 init_frame->driver_ver_hi = 0;
1951 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
1952 init_frame->queue_info_new_phys_addr_lo = phys_addr;
1953 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1955 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
1956 req_desc.MFAIo.RequestFlags =
1957 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1959 mrsas_disable_intr(sc);
1960 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
1961 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
1962 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
1965 * Poll response timer to wait for Firmware response. While this
1966 * timer with the DELAY call could block CPU, the time interval for
1967 * this is only 1 millisecond.
1969 if (init_frame->cmd_status == 0xFF) {
1970 for (i=0; i < (max_wait * 1000); i++){
1971 if (init_frame->cmd_status == 0xFF)
1978 if (init_frame->cmd_status == 0)
1979 mrsas_dprint(sc, MRSAS_OCR,
1980 "IOC INIT response received from FW.\n");
1981 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
1984 if (init_frame->cmd_status == 0xFF)
1985 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
1987 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1991 mrsas_free_ioc_cmd(sc);
1996 * mrsas_alloc_mpt_cmds: Allocates the command packets
1997 * input: Adapter instance soft state
1999 * This function allocates the internal commands for IOs. Each command that is
2000 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
2001 * An array is allocated with mrsas_mpt_cmd context. The free commands are
2002 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2005 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2009 struct mrsas_mpt_cmd *cmd;
2010 pMpi2ReplyDescriptorsUnion_t reply_desc;
2011 u_int32_t offset, chain_offset, sense_offset;
2012 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2013 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2015 max_cmd = sc->max_fw_cmds;
2017 sc->req_desc = kmalloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2018 if (!sc->req_desc) {
2019 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2022 memset(sc->req_desc, 0, sc->request_alloc_sz);
2025 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2026 * dynamic array first and then allocate individual commands.
2028 sc->mpt_cmd_list = kmalloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2029 if (!sc->mpt_cmd_list) {
2030 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2033 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2034 for (i = 0; i < max_cmd; i++) {
2035 sc->mpt_cmd_list[i] = kmalloc(sizeof(struct mrsas_mpt_cmd),
2037 if (!sc->mpt_cmd_list[i]) {
2038 for (j = 0; j < i; j++)
2039 kfree(sc->mpt_cmd_list[j],M_MRSAS);
2040 kfree(sc->mpt_cmd_list, M_MRSAS);
2041 sc->mpt_cmd_list = NULL;
2046 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2047 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2048 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2049 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2050 sense_base = (u_int8_t*)sc->sense_mem;
2051 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2052 for (i = 0; i < max_cmd; i++) {
2053 cmd = sc->mpt_cmd_list[i];
2054 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2055 chain_offset = 1024 * i;
2056 sense_offset = MRSAS_SENSE_LEN * i;
2057 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2059 cmd->ccb_ptr = NULL;
2060 callout_init(&cmd->cm_callout);
2061 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2063 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2064 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2065 cmd->io_request_phys_addr = io_req_base_phys + offset;
2066 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2067 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2068 cmd->sense = sense_base + sense_offset;
2069 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2070 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2073 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2076 /* Initialize reply descriptor array to 0xFFFFFFFF */
2077 reply_desc = sc->reply_desc_mem;
2078 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2079 reply_desc->Words = MRSAS_ULONG_MAX;
2085 * mrsas_fire_cmd: Sends command to FW
2086 * input: Adapter soft state
2087 * request descriptor address low
2088 * request descriptor address high
2090 * This functions fires the command to Firmware by writing to the
2091 * inbound_low_queue_port and inbound_high_queue_port.
2093 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2094 u_int32_t req_desc_hi)
2096 lockmgr(&sc->pci_lock, LK_EXCLUSIVE);
2097 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2099 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2101 lockmgr(&sc->pci_lock, LK_RELEASE);
2105 * mrsas_transition_to_ready: Move FW to Ready state
2106 * input: Adapter instance soft state
2108 * During the initialization, FW passes can potentially be in any one of
2109 * several possible states. If the FW in operational, waiting-for-handshake
2110 * states, driver must take steps to bring it to ready state. Otherwise, it
2111 * has to wait for the ready state.
2113 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2117 u_int32_t val, fw_state;
2118 u_int32_t cur_state;
2119 u_int32_t abs_state, curr_abs_state;
2121 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2122 fw_state = val & MFI_STATE_MASK;
2123 max_wait = MRSAS_RESET_WAIT_TIME;
2125 if (fw_state != MFI_STATE_READY)
2126 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2128 while (fw_state != MFI_STATE_READY) {
2129 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2131 case MFI_STATE_FAULT:
2132 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2134 cur_state = MFI_STATE_FAULT;
2139 case MFI_STATE_WAIT_HANDSHAKE:
2140 /* Set the CLR bit in inbound doorbell */
2141 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2142 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2143 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2145 case MFI_STATE_BOOT_MESSAGE_PENDING:
2146 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2148 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2150 case MFI_STATE_OPERATIONAL:
2151 /* Bring it to READY state; assuming max wait 10 secs */
2152 mrsas_disable_intr(sc);
2153 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2154 for (i=0; i < max_wait * 1000; i++) {
2155 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2160 cur_state = MFI_STATE_OPERATIONAL;
2162 case MFI_STATE_UNDEFINED:
2163 /* This state should not last for more than 2 seconds */
2164 cur_state = MFI_STATE_UNDEFINED;
2166 case MFI_STATE_BB_INIT:
2167 cur_state = MFI_STATE_BB_INIT;
2169 case MFI_STATE_FW_INIT:
2170 cur_state = MFI_STATE_FW_INIT;
2172 case MFI_STATE_FW_INIT_2:
2173 cur_state = MFI_STATE_FW_INIT_2;
2175 case MFI_STATE_DEVICE_SCAN:
2176 cur_state = MFI_STATE_DEVICE_SCAN;
2178 case MFI_STATE_FLUSH_CACHE:
2179 cur_state = MFI_STATE_FLUSH_CACHE;
2182 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2187 * The cur_state should not last for more than max_wait secs
2189 for (i = 0; i < (max_wait * 1000); i++) {
2190 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2191 outbound_scratch_pad))& MFI_STATE_MASK);
2192 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2193 outbound_scratch_pad));
2194 if (abs_state == curr_abs_state)
2201 * Return error if fw_state hasn't changed after max_wait
2203 if (curr_abs_state == abs_state) {
2204 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2205 "in %d secs\n", fw_state, max_wait);
2209 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2210 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2215 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2216 * input: Adapter soft state
2218 * This function removes an MFI command from the command list.
2220 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2222 struct mrsas_mfi_cmd *cmd = NULL;
2224 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2225 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2226 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2227 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2229 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2235 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2236 * input: Adapter Context.
2238 * This function will check FW status register and flag
2239 * do_timeout_reset flag. It will do OCR/Kill adapter if
2240 * FW is in fault state or IO timed out has trigger reset.
2243 mrsas_ocr_thread(void *arg)
2245 struct mrsas_softc *sc;
2246 u_int32_t fw_status, fw_state;
2248 sc = (struct mrsas_softc *)arg;
2250 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2252 sc->ocr_thread_active = 1;
2253 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
2255 /* Sleep for 1 second and check the queue status*/
2256 lksleep(&sc->ocr_chan, &sc->sim_lock, 0,
2257 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2258 if (sc->remove_in_progress) {
2259 mrsas_dprint(sc, MRSAS_OCR,
2260 "Exit due to shutdown from %s\n", __func__);
2263 fw_status = mrsas_read_reg(sc,
2264 offsetof(mrsas_reg_set, outbound_scratch_pad));
2265 fw_state = fw_status & MFI_STATE_MASK;
2266 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2267 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2268 sc->do_timedout_reset?"IO Timeout":
2269 "FW fault detected");
2270 spin_lock(&sc->ioctl_lock);
2271 sc->reset_in_progress = 1;
2273 spin_unlock(&sc->ioctl_lock);
2274 mrsas_xpt_freeze(sc);
2275 mrsas_reset_ctrl(sc);
2276 mrsas_xpt_release(sc);
2277 sc->reset_in_progress = 0;
2278 sc->do_timedout_reset = 0;
2281 lockmgr(&sc->sim_lock, LK_RELEASE);
2282 sc->ocr_thread_active = 0;
2287 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2288 * input: Adapter Context.
2290 * This function will clear reply descriptor so that post OCR
2291 * driver and FW will lost old history.
2293 void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2296 pMpi2ReplyDescriptorsUnion_t reply_desc;
2298 sc->last_reply_idx = 0;
2299 reply_desc = sc->reply_desc_mem;
2300 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2301 reply_desc->Words = MRSAS_ULONG_MAX;
2306 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2307 * input: Adapter Context.
2309 * This function will run from thread context so that it can sleep.
2310 * 1. Do not handle OCR if FW is in HW critical error.
2311 * 2. Wait for outstanding command to complete for 180 seconds.
2312 * 3. If #2 does not find any outstanding command Controller is in working
2313 * state, so skip OCR.
2314 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2315 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2317 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2320 int mrsas_reset_ctrl(struct mrsas_softc *sc)
2322 int retval = SUCCESS, i, j, retry = 0;
2323 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2325 struct mrsas_mfi_cmd *mfi_cmd;
2326 struct mrsas_mpt_cmd *mpt_cmd;
2327 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2329 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2330 device_printf(sc->mrsas_dev,
2331 "mrsas: Hardware critical error, returning FAIL.\n");
2335 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2336 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2337 mrsas_disable_intr(sc);
2340 /* First try waiting for commands to complete */
2341 if (mrsas_wait_for_outstanding(sc)) {
2342 mrsas_dprint(sc, MRSAS_OCR,
2343 "resetting adapter from %s.\n",
2345 /* Now return commands back to the CAM layer */
2346 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2347 mpt_cmd = sc->mpt_cmd_list[i];
2348 if (mpt_cmd->ccb_ptr) {
2349 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2350 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2351 mrsas_cmd_done(sc, mpt_cmd);
2352 atomic_dec(&sc->fw_outstanding);
2356 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2357 outbound_scratch_pad));
2358 abs_state = status_reg & MFI_STATE_MASK;
2359 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2360 if (sc->disableOnlineCtrlReset ||
2361 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2362 /* Reset not supported, kill adapter */
2363 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2365 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2370 /* Now try to reset the chip */
2371 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2372 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2373 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2374 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2375 MPI2_WRSEQ_1ST_KEY_VALUE);
2376 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2377 MPI2_WRSEQ_2ND_KEY_VALUE);
2378 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2379 MPI2_WRSEQ_3RD_KEY_VALUE);
2380 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2381 MPI2_WRSEQ_4TH_KEY_VALUE);
2382 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2383 MPI2_WRSEQ_5TH_KEY_VALUE);
2384 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2385 MPI2_WRSEQ_6TH_KEY_VALUE);
2387 /* Check that the diag write enable (DRWE) bit is on */
2388 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2391 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2393 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2395 if (retry++ == 100) {
2396 mrsas_dprint(sc, MRSAS_OCR,
2397 "Host diag unlock failed!\n");
2401 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2404 /* Send chip reset command */
2405 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2406 host_diag | HOST_DIAG_RESET_ADAPTER);
2409 /* Make sure reset adapter bit is cleared */
2410 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2413 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2415 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2417 if (retry++ == 1000) {
2418 mrsas_dprint(sc, MRSAS_OCR,
2419 "Diag reset adapter never cleared!\n");
2423 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2426 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2427 outbound_scratch_pad)) & MFI_STATE_MASK;
2430 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2432 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2433 outbound_scratch_pad)) & MFI_STATE_MASK;
2435 if (abs_state <= MFI_STATE_FW_INIT) {
2436 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2437 " state = 0x%x\n", abs_state);
2441 /* Wait for FW to become ready */
2442 if (mrsas_transition_to_ready(sc, 1)) {
2443 mrsas_dprint(sc, MRSAS_OCR,
2444 "mrsas: Failed to transition controller to ready.\n");
2448 mrsas_reset_reply_desc(sc);
2449 if (mrsas_ioc_init(sc)) {
2450 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2454 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2455 mrsas_enable_intr(sc);
2456 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2458 /* Re-fire management commands */
2459 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2460 mpt_cmd = sc->mpt_cmd_list[j];
2461 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2462 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2463 if (mfi_cmd->frame->dcmd.opcode ==
2464 MR_DCMD_LD_MAP_GET_INFO) {
2465 mrsas_release_mfi_cmd(mfi_cmd);
2466 mrsas_release_mpt_cmd(mpt_cmd);
2468 req_desc = mrsas_get_request_desc(sc,
2469 mfi_cmd->cmd_id.context.smid - 1);
2470 mrsas_dprint(sc, MRSAS_OCR,
2471 "Re-fire command DCMD opcode 0x%x index %d\n ",
2472 mfi_cmd->frame->dcmd.opcode, j);
2474 device_printf(sc->mrsas_dev,
2475 "Cannot build MPT cmd.\n");
2477 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2478 req_desc->addr.u.high);
2483 /* Reset load balance info */
2484 memset(sc->load_balance_info, 0,
2485 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
2487 if (!mrsas_get_map_info(sc))
2488 mrsas_sync_map_info(sc);
2490 /* Adapter reset completed successfully */
2491 device_printf(sc->mrsas_dev, "Reset successful\n");
2495 /* Reset failed, kill the adapter */
2496 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2500 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2501 mrsas_enable_intr(sc);
2502 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2505 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2506 mrsas_dprint(sc, MRSAS_OCR,
2507 "Reset Exit with %d.\n", retval);
2512 * mrsas_kill_hba Kill HBA when OCR is not supported.
2513 * input: Adapter Context.
2515 * This function will kill HBA when OCR is not supported.
2517 void mrsas_kill_hba (struct mrsas_softc *sc)
2519 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2520 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2523 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2527 * mrsas_wait_for_outstanding Wait for outstanding commands
2528 * input: Adapter Context.
2530 * This function will wait for 180 seconds for outstanding
2531 * commands to be completed.
2533 int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2535 int i, outstanding, retval = 0;
2538 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2539 if (sc->remove_in_progress) {
2540 mrsas_dprint(sc, MRSAS_OCR,
2541 "Driver remove or shutdown called.\n");
2545 /* Check if firmware is in fault state */
2546 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2547 outbound_scratch_pad)) & MFI_STATE_MASK;
2548 if (fw_state == MFI_STATE_FAULT) {
2549 mrsas_dprint(sc, MRSAS_OCR,
2550 "Found FW in FAULT state, will reset adapter.\n");
2554 outstanding = atomic_read(&sc->fw_outstanding);
2558 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2559 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2560 "commands to complete\n",i,outstanding);
2561 mrsas_complete_cmd(sc);
2566 if (atomic_read(&sc->fw_outstanding)) {
2567 mrsas_dprint(sc, MRSAS_OCR,
2568 " pending commands remain after waiting,"
2569 " will reset adapter.\n");
2577 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2578 * input: Command packet for return to free cmd pool
2580 * This function returns the MFI command to the command list.
2582 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2584 struct mrsas_softc *sc = cmd->sc;
2586 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2587 cmd->ccb_ptr = NULL;
2588 cmd->cmd_id.frame_count = 0;
2589 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2590 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2596 * mrsas_get_controller_info - Returns FW's controller structure
2597 * input: Adapter soft state
2598 * Controller information structure
2600 * Issues an internal command (DCMD) to get the FW's controller structure.
2601 * This information is mainly used to find out the maximum IO transfer per
2602 * command supported by the FW.
2604 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2605 struct mrsas_ctrl_info *ctrl_info)
2608 struct mrsas_mfi_cmd *cmd;
2609 struct mrsas_dcmd_frame *dcmd;
2611 cmd = mrsas_get_mfi_cmd(sc);
2614 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2617 dcmd = &cmd->frame->dcmd;
2619 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2620 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2621 mrsas_release_mfi_cmd(cmd);
2624 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2626 dcmd->cmd = MFI_CMD_DCMD;
2627 dcmd->cmd_status = 0xFF;
2628 dcmd->sge_count = 1;
2629 dcmd->flags = MFI_FRAME_DIR_READ;
2632 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2633 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2634 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2635 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2637 if (!mrsas_issue_polled(sc, cmd))
2638 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2642 mrsas_free_ctlr_info_cmd(sc);
2643 mrsas_release_mfi_cmd(cmd);
2648 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2649 * input: Adapter soft state
2651 * Allocates DMAable memory for the controller info internal command.
2653 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2657 /* Allocate get controller info command */
2658 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2659 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2660 1, 0, // algnmnt, boundary
2661 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2662 BUS_SPACE_MAXADDR, // highaddr
2663 NULL, NULL, // filter, filterarg
2664 ctlr_info_size, // maxsize
2666 ctlr_info_size, // maxsegsize
2667 BUS_DMA_ALLOCNOW, // flags
2668 &sc->ctlr_info_tag)) {
2669 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2672 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2673 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2674 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2677 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2678 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2679 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2680 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2684 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2689 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2690 * input: Adapter soft state
2692 * Deallocates memory of the get controller info cmd.
2694 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2696 if (sc->ctlr_info_phys_addr)
2697 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2698 if (sc->ctlr_info_mem != NULL)
2699 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2700 if (sc->ctlr_info_tag != NULL)
2701 bus_dma_tag_destroy(sc->ctlr_info_tag);
2705 * mrsas_issue_polled: Issues a polling command
2706 * inputs: Adapter soft state
2707 * Command packet to be issued
2709 * This function is for posting of internal commands to Firmware. MFI
2710 * requires the cmd_status to be set to 0xFF before posting. The maximun
2711 * wait time of the poll response timer is 180 seconds.
2713 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2715 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2716 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2719 frame_hdr->cmd_status = 0xFF;
2720 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2722 /* Issue the frame using inbound queue port */
2723 if (mrsas_issue_dcmd(sc, cmd)) {
2724 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2729 * Poll response timer to wait for Firmware response. While this
2730 * timer with the DELAY call could block CPU, the time interval for
2731 * this is only 1 millisecond.
2733 if (frame_hdr->cmd_status == 0xFF) {
2734 for (i=0; i < (max_wait * 1000); i++){
2735 if (frame_hdr->cmd_status == 0xFF)
2741 if (frame_hdr->cmd_status != 0)
2743 if (frame_hdr->cmd_status == 0xFF)
2744 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2746 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2753 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2754 * input: Adapter soft state
2757 * This function is called by mrsas_issued_blocked_cmd() and
2758 * mrsas_issued_polled(), to build the MPT command and then fire the
2759 * command to Firmware.
2762 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2764 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2766 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2768 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2772 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2778 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2779 * input: Adapter soft state
2782 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2783 * passthru command and prepares the MPT command to send to Firmware.
2785 MRSAS_REQUEST_DESCRIPTOR_UNION *
2786 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2788 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2791 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2792 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2796 index = cmd->cmd_id.context.smid;
2798 req_desc = mrsas_get_request_desc(sc, index-1);
2802 req_desc->addr.Words = 0;
2803 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2805 req_desc->SCSIIO.SMID = index;
2811 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2812 * input: Adapter soft state
2815 * The MPT command and the io_request are setup as a passthru command.
2816 * The SGE chain address is set to frame_phys_addr of the MFI command.
2819 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2821 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2822 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2823 struct mrsas_mpt_cmd *mpt_cmd;
2824 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2826 mpt_cmd = mrsas_get_mpt_cmd(sc);
2830 /* Save the smid. To be used for returning the cmd */
2831 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2833 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2836 * For cmds where the flag is set, store the flag and check
2837 * on completion. For cmds with this flag, don't call
2838 * mrsas_complete_cmd.
2841 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2842 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2844 io_req = mpt_cmd->io_request;
2846 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2847 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2848 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2849 sgl_ptr_end->Flags = 0;
2852 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2854 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2855 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2856 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2858 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2860 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2861 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2863 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2869 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2870 * input: Adapter soft state
2871 * Command to be issued
2873 * This function waits on an event for the command to be returned
2874 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2875 * Used for issuing internal and ioctl commands.
2877 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2879 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2880 unsigned long total_time = 0;
2883 /* Initialize cmd_status */
2884 cmd->cmd_status = ECONNREFUSED;
2886 /* Build MPT-MFI command for issue to FW */
2887 if (mrsas_issue_dcmd(sc, cmd)){
2888 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2892 sc->chan = (void*)&cmd;
2894 /* The following is for debug only... */
2895 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2896 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2899 if (cmd->cmd_status == ECONNREFUSED){
2900 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2905 if (total_time >= max_wait) {
2906 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2915 * mrsas_complete_mptmfi_passthru - Completes a command
2916 * input: sc: Adapter soft state
2917 * cmd: Command to be completed
2918 * status: cmd completion status
2920 * This function is called from mrsas_complete_cmd() after an interrupt
2921 * is received from Firmware, and io_request->Function is
2922 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2925 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2928 struct mrsas_header *hdr = &cmd->frame->hdr;
2929 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2931 /* Reset the retry counter for future re-tries */
2932 cmd->retry_for_fw_reset = 0;
2935 cmd->ccb_ptr = NULL;
2938 case MFI_CMD_INVALID:
2939 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
2941 case MFI_CMD_PD_SCSI_IO:
2942 case MFI_CMD_LD_SCSI_IO:
2944 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
2945 * issued either through an IO path or an IOCTL path. If it
2946 * was via IOCTL, we will send it to internal completion.
2948 if (cmd->sync_cmd) {
2950 mrsas_wakeup(sc, cmd);
2956 /* Check for LD map update */
2957 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2958 (cmd->frame->dcmd.mbox.b[1] == 1)) {
2959 sc->fast_path_io = 0;
2960 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE);
2961 if (cmd_status != 0) {
2962 if (cmd_status != MFI_STAT_NOT_FOUND)
2963 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
2965 mrsas_release_mfi_cmd(cmd);
2966 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2972 mrsas_release_mfi_cmd(cmd);
2973 if (MR_ValidateMapInfo(sc))
2974 sc->fast_path_io = 0;
2976 sc->fast_path_io = 1;
2977 mrsas_sync_map_info(sc);
2978 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2981 #if 0 //currently not supporting event handling, so commenting out
2982 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2983 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2984 mrsas_poll_wait_aen = 0;
2987 /* See if got an event notification */
2988 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
2989 mrsas_complete_aen(sc, cmd);
2991 mrsas_wakeup(sc, cmd);
2994 /* Command issued to abort another cmd return */
2995 mrsas_complete_abort(sc, cmd);
2998 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
3004 * mrsas_wakeup - Completes an internal command
3005 * input: Adapter soft state
3006 * Command to be completed
3008 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
3009 * a wait timer is started. This function is called from
3010 * mrsas_complete_mptmfi_passthru() as it completes the command,
3011 * to wake up from the command wait.
3013 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3015 cmd->cmd_status = cmd->frame->io.cmd_status;
3017 if (cmd->cmd_status == ECONNREFUSED)
3018 cmd->cmd_status = 0;
3020 /* For debug only ... */
3021 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3023 sc->chan = (void*)&cmd;
3024 wakeup_one((void *)&sc->chan);
3029 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3030 * input: Adapter soft state
3031 * Shutdown/Hibernate
3033 * This function issues a DCMD internal command to Firmware to initiate
3034 * shutdown of the controller.
3036 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3038 struct mrsas_mfi_cmd *cmd;
3039 struct mrsas_dcmd_frame *dcmd;
3041 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3044 cmd = mrsas_get_mfi_cmd(sc);
3046 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3051 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3053 if (sc->map_update_cmd)
3054 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3056 dcmd = &cmd->frame->dcmd;
3057 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3059 dcmd->cmd = MFI_CMD_DCMD;
3060 dcmd->cmd_status = 0x0;
3061 dcmd->sge_count = 0;
3062 dcmd->flags = MFI_FRAME_DIR_NONE;
3065 dcmd->data_xfer_len = 0;
3066 dcmd->opcode = opcode;
3068 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3070 mrsas_issue_blocked_cmd(sc, cmd);
3071 mrsas_release_mfi_cmd(cmd);
3077 * mrsas_flush_cache: Requests FW to flush all its caches
3078 * input: Adapter soft state
3080 * This function is issues a DCMD internal command to Firmware to initiate
3081 * flushing of all caches.
3083 static void mrsas_flush_cache(struct mrsas_softc *sc)
3085 struct mrsas_mfi_cmd *cmd;
3086 struct mrsas_dcmd_frame *dcmd;
3088 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3091 cmd = mrsas_get_mfi_cmd(sc);
3093 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3097 dcmd = &cmd->frame->dcmd;
3098 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3100 dcmd->cmd = MFI_CMD_DCMD;
3101 dcmd->cmd_status = 0x0;
3102 dcmd->sge_count = 0;
3103 dcmd->flags = MFI_FRAME_DIR_NONE;
3106 dcmd->data_xfer_len = 0;
3107 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3108 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3110 mrsas_issue_blocked_cmd(sc, cmd);
3111 mrsas_release_mfi_cmd(cmd);
3117 * mrsas_get_map_info: Load and validate RAID map
3118 * input: Adapter instance soft state
3120 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3121 * to load and validate RAID map. It returns 0 if successful, 1 other-
3124 static int mrsas_get_map_info(struct mrsas_softc *sc)
3126 uint8_t retcode = 0;
3128 sc->fast_path_io = 0;
3129 if (!mrsas_get_ld_map_info(sc)) {
3130 retcode = MR_ValidateMapInfo(sc);
3132 sc->fast_path_io = 1;
3140 * mrsas_get_ld_map_info: Get FW's ld_map structure
3141 * input: Adapter instance soft state
3143 * Issues an internal command (DCMD) to get the FW's controller PD
3146 static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3149 struct mrsas_mfi_cmd *cmd;
3150 struct mrsas_dcmd_frame *dcmd;
3151 MR_FW_RAID_MAP_ALL *map;
3152 bus_addr_t map_phys_addr = 0;
3154 cmd = mrsas_get_mfi_cmd(sc);
3156 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
3160 dcmd = &cmd->frame->dcmd;
3162 map = sc->raidmap_mem[(sc->map_id & 1)];
3163 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3165 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
3166 mrsas_release_mfi_cmd(cmd);
3169 memset(map, 0, sizeof(*map));
3170 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3172 dcmd->cmd = MFI_CMD_DCMD;
3173 dcmd->cmd_status = 0xFF;
3174 dcmd->sge_count = 1;
3175 dcmd->flags = MFI_FRAME_DIR_READ;
3178 dcmd->data_xfer_len = sc->map_sz;
3179 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3180 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3181 dcmd->sgl.sge32[0].length = sc->map_sz;
3182 if (!mrsas_issue_polled(sc, cmd))
3186 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
3189 mrsas_release_mfi_cmd(cmd);
3194 * mrsas_sync_map_info: Get FW's ld_map structure
3195 * input: Adapter instance soft state
3197 * Issues an internal command (DCMD) to get the FW's controller PD
3200 static int mrsas_sync_map_info(struct mrsas_softc *sc)
3203 struct mrsas_mfi_cmd *cmd;
3204 struct mrsas_dcmd_frame *dcmd;
3205 uint32_t size_sync_info, num_lds;
3206 MR_LD_TARGET_SYNC *target_map = NULL;
3207 MR_FW_RAID_MAP_ALL *map;
3209 MR_LD_TARGET_SYNC *ld_sync;
3210 bus_addr_t map_phys_addr = 0;
3212 cmd = mrsas_get_mfi_cmd(sc);
3214 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
3218 map = sc->raidmap_mem[sc->map_id & 1];
3219 num_lds = map->raidMap.ldCount;
3221 dcmd = &cmd->frame->dcmd;
3222 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3223 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3225 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3226 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
3228 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3230 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3232 for (i = 0; i < num_lds; i++, ld_sync++) {
3233 raid = MR_LdRaidGet(i, map);
3234 ld_sync->targetId = MR_GetLDTgtId(i, map);
3235 ld_sync->seqNum = raid->seqNum;
3238 dcmd->cmd = MFI_CMD_DCMD;
3239 dcmd->cmd_status = 0xFF;
3240 dcmd->sge_count = 1;
3241 dcmd->flags = MFI_FRAME_DIR_WRITE;
3244 dcmd->data_xfer_len = sc->map_sz;
3245 dcmd->mbox.b[0] = num_lds;
3246 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3247 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3248 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3249 dcmd->sgl.sge32[0].length = sc->map_sz;
3251 sc->map_update_cmd = cmd;
3252 if (mrsas_issue_dcmd(sc, cmd)) {
3253 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
3260 * mrsas_get_pd_list: Returns FW's PD list structure
3261 * input: Adapter soft state
3263 * Issues an internal command (DCMD) to get the FW's controller PD
3264 * list structure. This information is mainly used to find out about
3265 * system supported by Firmware.
3267 static int mrsas_get_pd_list(struct mrsas_softc *sc)
3269 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3270 struct mrsas_mfi_cmd *cmd;
3271 struct mrsas_dcmd_frame *dcmd;
3272 struct MR_PD_LIST *pd_list_mem;
3273 struct MR_PD_ADDRESS *pd_addr;
3274 bus_addr_t pd_list_phys_addr = 0;
3275 struct mrsas_tmp_dcmd *tcmd;
3277 cmd = mrsas_get_mfi_cmd(sc);
3279 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
3283 dcmd = &cmd->frame->dcmd;
3285 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3286 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3287 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3288 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
3289 mrsas_release_mfi_cmd(cmd);
3293 pd_list_mem = tcmd->tmp_dcmd_mem;
3294 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3296 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3298 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3299 dcmd->mbox.b[1] = 0;
3300 dcmd->cmd = MFI_CMD_DCMD;
3301 dcmd->cmd_status = 0xFF;
3302 dcmd->sge_count = 1;
3303 dcmd->flags = MFI_FRAME_DIR_READ;
3306 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3307 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3308 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3309 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3311 if (!mrsas_issue_polled(sc, cmd))
3316 /* Get the instance PD list */
3317 pd_count = MRSAS_MAX_PD;
3318 pd_addr = pd_list_mem->addr;
3319 if (retcode == 0 && pd_list_mem->count < pd_count) {
3320 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3321 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3322 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3323 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
3324 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
3329 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3330 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3331 mrsas_free_tmp_dcmd(tcmd);
3332 mrsas_release_mfi_cmd(cmd);
3333 kfree(tcmd, M_MRSAS);
3338 * mrsas_get_ld_list: Returns FW's LD list structure
3339 * input: Adapter soft state
3341 * Issues an internal command (DCMD) to get the FW's controller PD
3342 * list structure. This information is mainly used to find out about
3343 * supported by the FW.
3345 static int mrsas_get_ld_list(struct mrsas_softc *sc)
3347 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3348 struct mrsas_mfi_cmd *cmd;
3349 struct mrsas_dcmd_frame *dcmd;
3350 struct MR_LD_LIST *ld_list_mem;
3351 bus_addr_t ld_list_phys_addr = 0;
3352 struct mrsas_tmp_dcmd *tcmd;
3354 cmd = mrsas_get_mfi_cmd(sc);
3356 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
3360 dcmd = &cmd->frame->dcmd;
3362 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3363 ld_list_size = sizeof(struct MR_LD_LIST);
3364 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3365 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
3366 mrsas_release_mfi_cmd(cmd);
3370 ld_list_mem = tcmd->tmp_dcmd_mem;
3371 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3373 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3375 dcmd->cmd = MFI_CMD_DCMD;
3376 dcmd->cmd_status = 0xFF;
3377 dcmd->sge_count = 1;
3378 dcmd->flags = MFI_FRAME_DIR_READ;
3380 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3381 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3382 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3383 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3386 if (!mrsas_issue_polled(sc, cmd))
3391 /* Get the instance LD list */
3392 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
3393 sc->CurLdCount = ld_list_mem->ldCount;
3394 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
3395 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3396 if (ld_list_mem->ldList[ld_index].state != 0) {
3397 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3398 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3403 mrsas_free_tmp_dcmd(tcmd);
3404 mrsas_release_mfi_cmd(cmd);
3405 kfree(tcmd, M_MRSAS);
3410 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3411 * input: Adapter soft state
3415 * Allocates DMAable memory for a temporary internal command. The allocated
3416 * memory is initialized to all zeros upon successful loading of the dma
3419 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3422 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3423 1, 0, // algnmnt, boundary
3424 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3425 BUS_SPACE_MAXADDR, // highaddr
3426 NULL, NULL, // filter, filterarg
3430 BUS_DMA_ALLOCNOW, // flags
3431 &tcmd->tmp_dcmd_tag)) {
3432 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3435 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3436 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3437 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3440 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3441 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3442 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3443 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3447 memset(tcmd->tmp_dcmd_mem, 0, size);
3452 * mrsas_free_tmp_dcmd: Free memory for temporary command
3453 * input: temporary dcmd pointer
3455 * Deallocates memory of the temporary command for use in the construction
3456 * of the internal DCMD.
3458 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3460 if (tmp->tmp_dcmd_phys_addr)
3461 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3462 if (tmp->tmp_dcmd_mem != NULL)
3463 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3464 if (tmp->tmp_dcmd_tag != NULL)
3465 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3469 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3470 * input: Adapter soft state
3471 * Previously issued cmd to be aborted
3473 * This function is used to abort previously issued commands, such as AEN and
3474 * RAID map sync map commands. The abort command is sent as a DCMD internal
3475 * command and subsequently the driver will wait for a return status. The
3476 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3478 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3479 struct mrsas_mfi_cmd *cmd_to_abort)
3481 struct mrsas_mfi_cmd *cmd;
3482 struct mrsas_abort_frame *abort_fr;
3483 u_int8_t retcode = 0;
3484 unsigned long total_time = 0;
3485 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3487 cmd = mrsas_get_mfi_cmd(sc);
3489 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3493 abort_fr = &cmd->frame->abort;
3495 /* Prepare and issue the abort frame */
3496 abort_fr->cmd = MFI_CMD_ABORT;
3497 abort_fr->cmd_status = 0xFF;
3498 abort_fr->flags = 0;
3499 abort_fr->abort_context = cmd_to_abort->index;
3500 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3501 abort_fr->abort_mfi_phys_addr_hi = 0;
3504 cmd->cmd_status = 0xFF;
3506 if (mrsas_issue_dcmd(sc, cmd)) {
3507 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3511 /* Wait for this cmd to complete */
3512 sc->chan = (void*)&cmd;
3514 if (cmd->cmd_status == 0xFF){
3515 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3520 if (total_time >= max_wait) {
3521 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3528 mrsas_release_mfi_cmd(cmd);
3533 * mrsas_complete_abort: Completes aborting a command
3534 * input: Adapter soft state
3535 * Cmd that was issued to abort another cmd
3537 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3538 * to change after sending the command. This function is called from
3539 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3541 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3543 if (cmd->sync_cmd) {
3545 cmd->cmd_status = 0;
3546 sc->chan = (void*)&cmd;
3547 wakeup_one((void *)&sc->chan);
3553 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3554 * input: Adapter soft state
3557 void mrsas_aen_handler(struct mrsas_softc *sc)
3559 union mrsas_evt_class_locale class_locale;
3565 device_printf(sc->mrsas_dev, "invalid instance!\n");
3569 if (sc->evt_detail_mem) {
3570 switch (sc->evt_detail_mem->code) {
3571 case MR_EVT_PD_INSERTED:
3572 mrsas_get_pd_list(sc);
3573 mrsas_bus_scan_sim(sc, sc->sim_1);
3576 case MR_EVT_PD_REMOVED:
3577 mrsas_get_pd_list(sc);
3578 mrsas_bus_scan_sim(sc, sc->sim_1);
3581 case MR_EVT_LD_OFFLINE:
3582 case MR_EVT_CFG_CLEARED:
3583 case MR_EVT_LD_DELETED:
3584 mrsas_bus_scan_sim(sc, sc->sim_0);
3587 case MR_EVT_LD_CREATED:
3588 mrsas_get_ld_list(sc);
3589 mrsas_bus_scan_sim(sc, sc->sim_0);
3592 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3593 case MR_EVT_FOREIGN_CFG_IMPORTED:
3594 case MR_EVT_LD_STATE_CHANGE:
3602 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3606 mrsas_get_pd_list(sc);
3607 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3608 mrsas_bus_scan_sim(sc, sc->sim_1);
3609 mrsas_get_ld_list(sc);
3610 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3611 mrsas_bus_scan_sim(sc, sc->sim_0);
3614 seq_num = sc->evt_detail_mem->seq_num + 1;
3616 // Register AEN with FW for latest sequence number plus 1
3617 class_locale.members.reserved = 0;
3618 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3619 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3621 if (sc->aen_cmd != NULL )
3624 lockmgr(&sc->aen_lock, LK_EXCLUSIVE);
3625 error = mrsas_register_aen(sc, seq_num,
3627 lockmgr(&sc->aen_lock, LK_RELEASE);
3630 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3636 * mrsas_complete_aen: Completes AEN command
3637 * input: Adapter soft state
3638 * Cmd that was issued to abort another cmd
3640 * This function will be called from ISR and will continue
3641 * event processing from thread context by enqueuing task
3642 * in ev_tq (callback function "mrsas_aen_handler").
3644 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3647 * Don't signal app if it is just an aborted previously registered aen
3649 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3656 mrsas_release_mfi_cmd(cmd);
3658 if (!sc->remove_in_progress)
3659 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3664 static device_method_t mrsas_methods[] = {
3665 DEVMETHOD(device_probe, mrsas_probe),
3666 DEVMETHOD(device_attach, mrsas_attach),
3667 DEVMETHOD(device_detach, mrsas_detach),
3668 DEVMETHOD(device_suspend, mrsas_suspend),
3669 DEVMETHOD(device_resume, mrsas_resume),
3670 DEVMETHOD(bus_print_child, bus_generic_print_child),
3671 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3675 static driver_t mrsas_driver = {
3678 sizeof(struct mrsas_softc)
3681 static devclass_t mrsas_devclass;
3682 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, NULL, NULL);
3683 MODULE_VERSION(mrsas, 1);
3684 MODULE_DEPEND(mrsas, cam, 1, 1, 1);