2 * Copyright (c) 2014, LSI Corp.
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
42 * $FreeBSD: head/sys/dev/mrsas/mrsas.c 265555 2014-05-07 16:16:49Z ambrisko $
45 #include <dev/raid/mrsas/mrsas.h>
46 #include <dev/raid/mrsas/mrsas_ioctl.h>
48 #include <bus/cam/cam.h>
49 #include <bus/cam/cam_ccb.h>
51 #include <sys/sysctl.h>
52 #include <sys/types.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/device.h>
56 #include <sys/spinlock2.h>
62 static d_open_t mrsas_open;
63 static d_close_t mrsas_close;
64 static d_read_t mrsas_read;
65 static d_write_t mrsas_write;
66 static d_ioctl_t mrsas_ioctl;
68 static struct mrsas_ident *mrsas_find_ident(device_t);
69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70 static void mrsas_flush_cache(struct mrsas_softc *sc);
71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72 static void mrsas_ocr_thread(void *arg);
73 static int mrsas_get_map_info(struct mrsas_softc *sc);
74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75 static int mrsas_sync_map_info(struct mrsas_softc *sc);
76 static int mrsas_get_pd_list(struct mrsas_softc *sc);
77 static int mrsas_get_ld_list(struct mrsas_softc *sc);
78 static int mrsas_setup_irq(struct mrsas_softc *sc);
79 static int mrsas_alloc_mem(struct mrsas_softc *sc);
80 static int mrsas_init_fw(struct mrsas_softc *sc);
81 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82 static int mrsas_complete_cmd(struct mrsas_softc *sc);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85 struct mrsas_ctrl_info *ctrl_info);
86 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87 struct mrsas_mfi_cmd *cmd_to_abort);
88 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89 u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90 struct mrsas_mfi_cmd *mfi_cmd);
91 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92 int mrsas_init_adapter(struct mrsas_softc *sc);
93 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96 int mrsas_ioc_init(struct mrsas_softc *sc);
97 int mrsas_bus_scan(struct mrsas_softc *sc);
98 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100 int mrsas_reset_ctrl(struct mrsas_softc *sc);
101 int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *cmd);
104 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
106 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 void mrsas_disable_intr(struct mrsas_softc *sc);
111 void mrsas_enable_intr(struct mrsas_softc *sc);
112 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113 void mrsas_free_mem(struct mrsas_softc *sc);
114 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115 void mrsas_isr(void *arg);
116 void mrsas_teardown_intr(struct mrsas_softc *sc);
117 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118 void mrsas_kill_hba (struct mrsas_softc *sc);
119 void mrsas_aen_handler(struct mrsas_softc *sc);
120 void mrsas_write_reg(struct mrsas_softc *sc, int offset,
122 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123 u_int32_t req_desc_hi);
124 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125 void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126 struct mrsas_mfi_cmd *cmd, u_int8_t status);
127 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
129 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130 MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131 struct mrsas_mfi_cmd *cmd);
133 extern int mrsas_cam_attach(struct mrsas_softc *sc);
134 extern void mrsas_cam_detach(struct mrsas_softc *sc);
135 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
143 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
144 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145 extern void mrsas_xpt_release(struct mrsas_softc *sc);
146 extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
148 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
155 * PCI device struct and table
158 typedef struct mrsas_ident {
166 MRSAS_CTLR_ID device_table[] = {
167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
174 * Character device entry points
177 static struct dev_ops mrsas_ops = {
179 .d_open = mrsas_open,
180 .d_close = mrsas_close,
181 .d_read = mrsas_read,
182 .d_write = mrsas_write,
183 .d_ioctl = mrsas_ioctl,
186 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
188 static int mrsas_msi_enable = 1;
189 TUNABLE_INT("hw.mrsas.msi.enable", &mrsas_msi_enable);
192 * In the cdevsw routines, we find our softc by using the si_drv1 member
193 * of struct cdev. We set this variable to point to our softc in our
194 * attach routine when we create the /dev entry.
197 mrsas_open(struct dev_open_args *ap)
199 cdev_t dev = ap->a_head.a_dev;
200 struct mrsas_softc *sc;
207 mrsas_close(struct dev_close_args *ap)
209 cdev_t dev = ap->a_head.a_dev;
210 struct mrsas_softc *sc;
217 mrsas_read(struct dev_read_args *ap)
219 cdev_t dev = ap->a_head.a_dev;
220 struct mrsas_softc *sc;
226 mrsas_write(struct dev_write_args *ap)
228 cdev_t dev = ap->a_head.a_dev;
229 struct mrsas_softc *sc;
236 * Register Read/Write Functions
240 mrsas_write_reg(struct mrsas_softc *sc, int offset,
243 bus_space_tag_t bus_tag = sc->bus_tag;
244 bus_space_handle_t bus_handle = sc->bus_handle;
246 bus_space_write_4(bus_tag, bus_handle, offset, value);
250 mrsas_read_reg(struct mrsas_softc *sc, int offset)
252 bus_space_tag_t bus_tag = sc->bus_tag;
253 bus_space_handle_t bus_handle = sc->bus_handle;
255 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
260 * Interrupt Disable/Enable/Clear Functions
263 void mrsas_disable_intr(struct mrsas_softc *sc)
265 u_int32_t mask = 0xFFFFFFFF;
268 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
269 /* Dummy read to force pci flush */
270 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
273 void mrsas_enable_intr(struct mrsas_softc *sc)
275 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
278 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
279 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
285 static int mrsas_clear_intr(struct mrsas_softc *sc)
287 u_int32_t status, fw_status, fw_state;
289 /* Read received interrupt */
290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
292 /* If FW state change interrupt is received, write to it again to clear */
293 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
294 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
295 outbound_scratch_pad));
296 fw_state = fw_status & MFI_STATE_MASK;
297 if (fw_state == MFI_STATE_FAULT) {
298 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
299 if(sc->ocr_thread_active)
300 wakeup(&sc->ocr_chan);
302 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
303 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
307 /* Not our interrupt, so just return */
308 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
311 /* We got a reply interrupt */
316 * PCI Support Functions
319 static struct mrsas_ident * mrsas_find_ident(device_t dev)
321 struct mrsas_ident *pci_device;
323 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
325 if ((pci_device->vendor == pci_get_vendor(dev)) &&
326 (pci_device->device == pci_get_device(dev)) &&
327 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
328 (pci_device->subvendor == 0xffff)) &&
329 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
330 (pci_device->subdevice == 0xffff)))
336 static int mrsas_probe(device_t dev)
338 static u_int8_t first_ctrl = 1;
339 struct mrsas_ident *id;
341 if ((id = mrsas_find_ident(dev)) != NULL) {
343 kprintf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
346 device_set_desc(dev, id->desc);
347 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
354 * mrsas_setup_sysctl: setup sysctl values for mrsas
355 * input: Adapter instance soft state
357 * Setup sysctl entries for mrsas driver.
360 mrsas_setup_sysctl(struct mrsas_softc *sc)
362 struct sysctl_ctx_list *sysctl_ctx = NULL;
363 struct sysctl_oid *sysctl_tree = NULL;
364 char tmpstr[80], tmpstr2[80];
367 * Setup the sysctl variable so the user can change the debug level
370 ksnprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
371 device_get_unit(sc->mrsas_dev));
372 ksnprintf(tmpstr2, sizeof(tmpstr2), "mrsas%d", device_get_unit(sc->mrsas_dev));
375 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
376 if (sysctl_ctx != NULL)
377 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
379 if (sysctl_tree == NULL) {
381 sysctl_ctx_init(&sc->sysctl_ctx);
382 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
383 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, tmpstr2,
384 CTLFLAG_RD, 0, tmpstr);
385 if (sc->sysctl_tree == NULL)
387 sysctl_ctx = &sc->sysctl_ctx;
388 sysctl_tree = sc->sysctl_tree;
392 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
393 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
394 "Disable the use of OCR");
396 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
397 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
398 strlen(MRSAS_VERSION), "driver version");
400 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
401 OID_AUTO, "reset_count", CTLFLAG_RD,
402 &sc->reset_count, 0, "number of ocr from start of the day");
404 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
405 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
406 &sc->fw_outstanding, 0, "FW outstanding commands");
408 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
409 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
410 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
412 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
413 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
414 "Driver debug level");
416 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
417 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
418 0, "Driver IO timeout value in mili-second.");
420 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
421 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
422 &sc->mrsas_fw_fault_check_delay,
423 0, "FW fault check thread delay in seconds. <default is 1 sec>");
425 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
427 &sc->reset_in_progress, 0, "ocr in progress status");
432 * mrsas_get_tunables: get tunable parameters.
433 * input: Adapter instance soft state
435 * Get tunable parameters. This will help to debug driver at boot time.
438 mrsas_get_tunables(struct mrsas_softc *sc)
442 /* XXX default to some debugging for now */
443 sc->mrsas_debug = MRSAS_FAULT;
444 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
445 sc->mrsas_fw_fault_check_delay = 1;
447 sc->reset_in_progress = 0;
450 * Grab the global variables.
452 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
454 /* Grab the unit-instance variables */
455 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
456 device_get_unit(sc->mrsas_dev));
457 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
461 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
462 * Used to get sequence number at driver load time.
463 * input: Adapter soft state
465 * Allocates DMAable memory for the event log info internal command.
467 int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
471 /* Allocate get event log info command */
472 el_info_size = sizeof(struct mrsas_evt_log_info);
473 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
474 1, 0, // algnmnt, boundary
475 BUS_SPACE_MAXADDR_32BIT,// lowaddr
476 BUS_SPACE_MAXADDR, // highaddr
477 NULL, NULL, // filter, filterarg
478 el_info_size, // maxsize
480 el_info_size, // maxsegsize
481 BUS_DMA_ALLOCNOW, // flags
483 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
486 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
487 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
488 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
491 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
492 sc->el_info_mem, el_info_size, mrsas_addr_cb,
493 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
494 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
498 memset(sc->el_info_mem, 0, el_info_size);
503 * mrsas_free_evt_info_cmd: Free memory for Event log info command
504 * input: Adapter soft state
506 * Deallocates memory for the event log info internal command.
508 void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
510 if (sc->el_info_phys_addr)
511 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
512 if (sc->el_info_mem != NULL)
513 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
514 if (sc->el_info_tag != NULL)
515 bus_dma_tag_destroy(sc->el_info_tag);
519 * mrsas_get_seq_num: Get latest event sequence number
520 * @sc: Adapter soft state
521 * @eli: Firmware event log sequence number information.
522 * Firmware maintains a log of all events in a non-volatile area.
523 * Driver get the sequence number using DCMD
524 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
528 mrsas_get_seq_num(struct mrsas_softc *sc,
529 struct mrsas_evt_log_info *eli)
531 struct mrsas_mfi_cmd *cmd;
532 struct mrsas_dcmd_frame *dcmd;
534 cmd = mrsas_get_mfi_cmd(sc);
537 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
541 dcmd = &cmd->frame->dcmd;
543 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
544 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
545 mrsas_release_mfi_cmd(cmd);
549 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
551 dcmd->cmd = MFI_CMD_DCMD;
552 dcmd->cmd_status = 0x0;
554 dcmd->flags = MFI_FRAME_DIR_READ;
557 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
558 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
559 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
560 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
562 mrsas_issue_blocked_cmd(sc, cmd);
565 * Copy the data back into callers buffer
567 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
568 mrsas_free_evt_log_info_cmd(sc);
569 mrsas_release_mfi_cmd(cmd);
576 * mrsas_register_aen: Register for asynchronous event notification
577 * @sc: Adapter soft state
578 * @seq_num: Starting sequence number
579 * @class_locale: Class of the event
580 * This function subscribes for events beyond the @seq_num
581 * and type @class_locale.
585 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
586 u_int32_t class_locale_word)
589 struct mrsas_mfi_cmd *cmd;
590 struct mrsas_dcmd_frame *dcmd;
591 union mrsas_evt_class_locale curr_aen;
592 union mrsas_evt_class_locale prev_aen;
595 * If there an AEN pending already (aen_cmd), check if the
596 * class_locale of that pending AEN is inclusive of the new
597 * AEN request we currently have. If it is, then we don't have
598 * to do anything. In other words, whichever events the current
599 * AEN request is subscribing to, have already been subscribed
601 * If the old_cmd is _not_ inclusive, then we have to abort
602 * that command, form a class_locale that is superset of both
603 * old and current and re-issue to the FW
606 curr_aen.word = class_locale_word;
610 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
613 * A class whose enum value is smaller is inclusive of all
614 * higher values. If a PROGRESS (= -1) was previously
615 * registered, then a new registration requests for higher
616 * classes need not be sent to FW. They are automatically
618 * Locale numbers don't have such hierarchy. They are bitmap values
620 if ((prev_aen.members.class <= curr_aen.members.class) &&
621 !((prev_aen.members.locale & curr_aen.members.locale) ^
622 curr_aen.members.locale)) {
624 * Previously issued event registration includes
625 * current request. Nothing to do.
629 curr_aen.members.locale |= prev_aen.members.locale;
631 if (prev_aen.members.class < curr_aen.members.class)
632 curr_aen.members.class = prev_aen.members.class;
634 sc->aen_cmd->abort_aen = 1;
635 ret_val = mrsas_issue_blocked_abort_cmd(sc,
639 kprintf("mrsas: Failed to abort "
640 "previous AEN command\n");
646 cmd = mrsas_get_mfi_cmd(sc);
651 dcmd = &cmd->frame->dcmd;
653 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
656 * Prepare DCMD for aen registration
658 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
660 dcmd->cmd = MFI_CMD_DCMD;
661 dcmd->cmd_status = 0x0;
663 dcmd->flags = MFI_FRAME_DIR_READ;
666 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
667 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
668 dcmd->mbox.w[0] = seq_num;
669 sc->last_seq_num = seq_num;
670 dcmd->mbox.w[1] = curr_aen.word;
671 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
672 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
674 if (sc->aen_cmd != NULL) {
675 mrsas_release_mfi_cmd(cmd);
680 * Store reference to the cmd used to register for AEN. When an
681 * application wants us to register for AEN, we have to abort this
682 * cmd and re-register with a new EVENT LOCALE supplied by that app
687 Issue the aen registration frame
689 if (mrsas_issue_dcmd(sc, cmd)){
690 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
697 * mrsas_start_aen - Subscribes to AEN during driver load time
698 * @instance: Adapter soft state
700 static int mrsas_start_aen(struct mrsas_softc *sc)
702 struct mrsas_evt_log_info eli;
703 union mrsas_evt_class_locale class_locale;
706 /* Get the latest sequence number from FW*/
708 memset(&eli, 0, sizeof(eli));
710 if (mrsas_get_seq_num(sc, &eli))
713 /* Register AEN with FW for latest sequence number plus 1*/
714 class_locale.members.reserved = 0;
715 class_locale.members.locale = MR_EVT_LOCALE_ALL;
716 class_locale.members.class = MR_EVT_CLASS_DEBUG;
718 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
723 * mrsas_attach: PCI entry point
724 * input: device struct pointer
726 * Performs setup of PCI and registers, initializes mutexes and
727 * linked lists, registers interrupts and CAM, and initializes
728 * the adapter/controller to its proper state.
730 static int mrsas_attach(device_t dev)
732 struct mrsas_softc *sc = device_get_softc(dev);
733 uint32_t cmd, bar, error;
735 /* Look up our softc and initialize its fields. */
737 sc->device_id = pci_get_device(dev);
739 mrsas_get_tunables(sc);
742 * Set up PCI and registers
744 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
745 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
748 /* Force the busmaster enable bit on. */
749 cmd |= PCIM_CMD_BUSMASTEREN;
750 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
752 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
753 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
755 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
756 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
757 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
759 device_printf(dev, "Cannot allocate PCI registers\n");
762 sc->bus_tag = rman_get_bustag(sc->reg_res);
763 sc->bus_handle = rman_get_bushandle(sc->reg_res);
765 /* Intialize mutexes */
766 lockinit(&sc->sim_lock, "mrsas_sim_lock", 0, LK_CANRECURSE);
767 lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE);
768 lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE);
769 lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE);
770 spin_init(&sc->ioctl_lock);
771 lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0,
773 lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0,
775 lockinit(&sc->raidmap_lock, "mrsas_raidmap_lock", 0, LK_CANRECURSE);
777 /* Intialize linked list */
778 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
779 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
781 atomic_set(&sc->fw_outstanding,0);
783 sc->io_cmds_highwater = 0;
785 /* Create a /dev entry for this device. */
786 sc->mrsas_cdev = make_dev(&mrsas_ops, device_get_unit(dev), UID_ROOT,
787 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
788 device_get_unit(dev));
790 sc->mrsas_cdev->si_drv1 = sc;
792 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
793 sc->UnevenSpanSupport = 0;
795 /* Initialize Firmware */
796 if (mrsas_init_fw(sc) != SUCCESS) {
800 /* Register SCSI mid-layer */
801 if ((mrsas_cam_attach(sc) != SUCCESS)) {
802 goto attach_fail_cam;
806 if (mrsas_setup_irq(sc) != SUCCESS) {
807 goto attach_fail_irq;
810 /* Enable Interrupts */
811 mrsas_enable_intr(sc);
813 error = kthread_create(mrsas_ocr_thread, sc, &sc->ocr_thread, "mrsas_ocr%d",
814 device_get_unit(sc->mrsas_dev));
816 kprintf("Error %d starting rescan thread\n", error);
817 goto attach_fail_irq;
820 mrsas_setup_sysctl(sc);
822 /* Initiate AEN (Asynchronous Event Notification)*/
824 if (mrsas_start_aen(sc)) {
825 kprintf("Error: start aen failed\n");
833 mrsas_teardown_intr(sc);
835 mrsas_cam_detach(sc);
837 //attach_fail_raidmap:
839 lockuninit(&sc->sim_lock);
840 lockuninit(&sc->aen_lock);
841 lockuninit(&sc->pci_lock);
842 lockuninit(&sc->io_lock);
843 spin_uninit(&sc->ioctl_lock);
844 lockuninit(&sc->mpt_cmd_pool_lock);
845 lockuninit(&sc->mfi_cmd_pool_lock);
846 lockuninit(&sc->raidmap_lock);
848 destroy_dev(sc->mrsas_cdev);
850 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
851 sc->reg_res_id, sc->reg_res);
857 * mrsas_detach: De-allocates and teardown resources
858 * input: device struct pointer
860 * This function is the entry point for device disconnect and detach. It
861 * performs memory de-allocations, shutdown of the controller and various
862 * teardown and destroy resource functions.
864 static int mrsas_detach(device_t dev)
866 struct mrsas_softc *sc;
869 sc = device_get_softc(dev);
870 sc->remove_in_progress = 1;
871 if(sc->ocr_thread_active)
872 wakeup(&sc->ocr_chan);
873 while(sc->reset_in_progress){
875 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
876 mrsas_dprint(sc, MRSAS_INFO,
877 "[%2d]waiting for ocr to be finished\n",i);
879 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
882 while(sc->ocr_thread_active){
884 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
885 mrsas_dprint(sc, MRSAS_INFO,
887 "mrsas_ocr thread to quit ocr %d\n",i,
888 sc->ocr_thread_active);
890 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
892 mrsas_flush_cache(sc);
893 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
894 mrsas_disable_intr(sc);
895 mrsas_cam_detach(sc);
896 mrsas_teardown_intr(sc);
898 lockuninit(&sc->sim_lock);
899 lockuninit(&sc->aen_lock);
900 lockuninit(&sc->pci_lock);
901 lockuninit(&sc->io_lock);
902 spin_uninit(&sc->ioctl_lock);
903 lockuninit(&sc->mpt_cmd_pool_lock);
904 lockuninit(&sc->mfi_cmd_pool_lock);
905 lockuninit(&sc->raidmap_lock);
907 bus_release_resource(sc->mrsas_dev,
908 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
910 destroy_dev(sc->mrsas_cdev);
911 if (sc->sysctl_tree != NULL)
912 sysctl_ctx_free(&sc->sysctl_ctx);
917 * mrsas_free_mem: Frees allocated memory
918 * input: Adapter instance soft state
920 * This function is called from mrsas_detach() to free previously allocated
923 void mrsas_free_mem(struct mrsas_softc *sc)
927 struct mrsas_mfi_cmd *mfi_cmd;
928 struct mrsas_mpt_cmd *mpt_cmd;
931 * Free RAID map memory
933 for (i=0; i < 2; i++)
935 if (sc->raidmap_phys_addr[i])
936 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
937 if (sc->raidmap_mem[i] != NULL)
938 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
939 if (sc->raidmap_tag[i] != NULL)
940 bus_dma_tag_destroy(sc->raidmap_tag[i]);
944 * Free version buffer memroy
946 if (sc->verbuf_phys_addr)
947 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
948 if (sc->verbuf_mem != NULL)
949 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
950 if (sc->verbuf_tag != NULL)
951 bus_dma_tag_destroy(sc->verbuf_tag);
955 * Free sense buffer memory
957 if (sc->sense_phys_addr)
958 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
959 if (sc->sense_mem != NULL)
960 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
961 if (sc->sense_tag != NULL)
962 bus_dma_tag_destroy(sc->sense_tag);
965 * Free chain frame memory
967 if (sc->chain_frame_phys_addr)
968 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
969 if (sc->chain_frame_mem != NULL)
970 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
971 if (sc->chain_frame_tag != NULL)
972 bus_dma_tag_destroy(sc->chain_frame_tag);
975 * Free IO Request memory
977 if (sc->io_request_phys_addr)
978 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
979 if (sc->io_request_mem != NULL)
980 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
981 if (sc->io_request_tag != NULL)
982 bus_dma_tag_destroy(sc->io_request_tag);
985 * Free Reply Descriptor memory
987 if (sc->reply_desc_phys_addr)
988 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
989 if (sc->reply_desc_mem != NULL)
990 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
991 if (sc->reply_desc_tag != NULL)
992 bus_dma_tag_destroy(sc->reply_desc_tag);
995 * Free event detail memory
997 if (sc->evt_detail_phys_addr)
998 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
999 if (sc->evt_detail_mem != NULL)
1000 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1001 if (sc->evt_detail_tag != NULL)
1002 bus_dma_tag_destroy(sc->evt_detail_tag);
1007 if (sc->mfi_cmd_list) {
1008 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1009 mfi_cmd = sc->mfi_cmd_list[i];
1010 mrsas_free_frame(sc, mfi_cmd);
1013 if (sc->mficmd_frame_tag != NULL)
1014 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1017 * Free MPT internal command list
1019 max_cmd = sc->max_fw_cmds;
1020 if (sc->mpt_cmd_list) {
1021 for (i = 0; i < max_cmd; i++) {
1022 mpt_cmd = sc->mpt_cmd_list[i];
1023 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1024 kfree(sc->mpt_cmd_list[i], M_MRSAS);
1026 kfree(sc->mpt_cmd_list, M_MRSAS);
1027 sc->mpt_cmd_list = NULL;
1031 * Free MFI internal command list
1034 if (sc->mfi_cmd_list) {
1035 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1036 kfree(sc->mfi_cmd_list[i], M_MRSAS);
1038 kfree(sc->mfi_cmd_list, M_MRSAS);
1039 sc->mfi_cmd_list = NULL;
1043 * Free request descriptor memory
1045 kfree(sc->req_desc, M_MRSAS);
1046 sc->req_desc = NULL;
1049 * Destroy parent tag
1051 if (sc->mrsas_parent_tag != NULL)
1052 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1056 * mrsas_teardown_intr: Teardown interrupt
1057 * input: Adapter instance soft state
1059 * This function is called from mrsas_detach() to teardown and release
1060 * bus interrupt resourse.
1062 void mrsas_teardown_intr(struct mrsas_softc *sc)
1064 if (sc->intr_handle)
1065 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1066 if (sc->mrsas_irq != NULL)
1067 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1068 if (sc->irq_type == PCI_INTR_TYPE_MSI)
1069 pci_release_msi(sc->mrsas_dev);
1070 sc->intr_handle = NULL;
1074 * mrsas_suspend: Suspend entry point
1075 * input: Device struct pointer
1077 * This function is the entry point for system suspend from the OS.
1079 static int mrsas_suspend(device_t dev)
1081 struct mrsas_softc *sc;
1083 sc = device_get_softc(dev);
1088 * mrsas_resume: Resume entry point
1089 * input: Device struct pointer
1091 * This function is the entry point for system resume from the OS.
1093 static int mrsas_resume(device_t dev)
1095 struct mrsas_softc *sc;
1097 sc = device_get_softc(dev);
1102 * mrsas_ioctl: IOCtl commands entry point.
1104 * This function is the entry point for IOCtls from the OS. It calls the
1105 * appropriate function for processing depending on the command received.
1108 mrsas_ioctl(struct dev_ioctl_args *ap)
1110 cdev_t dev = ap->a_head.a_dev;
1111 u_long cmd = ap->a_cmd;
1112 caddr_t arg = ap->a_data;
1113 struct mrsas_softc *sc;
1116 sc = (struct mrsas_softc *)(dev->si_drv1);
1118 if (sc->remove_in_progress) {
1119 mrsas_dprint(sc, MRSAS_INFO,
1120 "Driver remove or shutdown called.\n");
1124 spin_lock(&sc->ioctl_lock);
1125 if (!sc->reset_in_progress) {
1126 spin_unlock(&sc->ioctl_lock);
1130 /* Release ioclt_lock, and wait for OCR
1132 spin_unlock(&sc->ioctl_lock);
1133 while(sc->reset_in_progress){
1135 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1136 mrsas_dprint(sc, MRSAS_INFO,
1138 "OCR to be finished %d\n",i,
1139 sc->ocr_thread_active);
1141 tsleep(mrsas_ioctl, 0, "mr_ioctl", hz);
1146 case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1147 ret = mrsas_passthru(sc, (void *)arg);
1149 case MRSAS_IOC_SCAN_BUS:
1150 ret = mrsas_bus_scan(sc);
1158 * mrsas_setup_irq: Set up interrupt.
1159 * input: Adapter instance soft state
1161 * This function sets up interrupts as a bus resource, with flags indicating
1162 * resource permitting contemporaneous sharing and for resource to activate
1165 static int mrsas_setup_irq(struct mrsas_softc *sc)
1170 sc->irq_type = pci_alloc_1intr(sc->mrsas_dev, mrsas_msi_enable,
1171 &sc->irq_id, &irq_flags);
1173 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1174 &sc->irq_id, irq_flags);
1175 if (sc->mrsas_irq == NULL){
1176 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1179 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE,
1180 mrsas_isr, sc, &sc->intr_handle, NULL)) {
1181 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1189 * mrsas_isr: ISR entry point
1190 * input: argument pointer
1192 * This function is the interrupt service routine entry point. There
1193 * are two types of interrupts, state change interrupt and response
1194 * interrupt. If an interrupt is not ours, we just return.
1196 void mrsas_isr(void *arg)
1198 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1201 /* Clear FW state change interrupt */
1202 status = mrsas_clear_intr(sc);
1204 /* Not our interrupt */
1208 /* If we are resetting, bail */
1209 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1210 kprintf(" Entered into ISR when OCR is going active. \n");
1211 mrsas_clear_intr(sc);
1214 /* Process for reply request and clear response interrupt */
1215 if (mrsas_complete_cmd(sc) != SUCCESS)
1216 mrsas_clear_intr(sc);
1222 * mrsas_complete_cmd: Process reply request
1223 * input: Adapter instance soft state
1225 * This function is called from mrsas_isr() to process reply request and
1226 * clear response interrupt. Processing of the reply request entails
1227 * walking through the reply descriptor array for the command request
1228 * pended from Firmware. We look at the Function field to determine
1229 * the command type and perform the appropriate action. Before we
1230 * return, we clear the response interrupt.
1232 static int mrsas_complete_cmd(struct mrsas_softc *sc)
1234 Mpi2ReplyDescriptorsUnion_t *desc;
1235 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1236 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1237 struct mrsas_mpt_cmd *cmd_mpt;
1238 struct mrsas_mfi_cmd *cmd_mfi;
1239 u_int8_t arm, reply_descript_type;
1240 u_int16_t smid, num_completed;
1241 u_int8_t status, extStatus;
1242 union desc_value desc_val;
1243 PLD_LOAD_BALANCE_INFO lbinfo;
1244 u_int32_t device_id;
1245 int threshold_reply_count = 0;
1248 /* If we have a hardware error, not need to continue */
1249 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1252 desc = sc->reply_desc_mem;
1253 desc += sc->last_reply_idx;
1255 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1257 desc_val.word = desc->Words;
1260 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1262 /* Find our reply descriptor for the command and process */
1263 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1265 smid = reply_desc->SMID;
1266 cmd_mpt = sc->mpt_cmd_list[smid -1];
1267 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1269 status = scsi_io_req->RaidContext.status;
1270 extStatus = scsi_io_req->RaidContext.exStatus;
1272 switch (scsi_io_req->Function)
1274 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1275 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1276 lbinfo = &sc->load_balance_info[device_id];
1277 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1278 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1279 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1280 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1282 //Fall thru and complete IO
1283 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1284 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1285 mrsas_cmd_done(sc, cmd_mpt);
1286 scsi_io_req->RaidContext.status = 0;
1287 scsi_io_req->RaidContext.exStatus = 0;
1288 atomic_dec(&sc->fw_outstanding);
1290 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1291 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1292 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1294 mrsas_release_mpt_cmd(cmd_mpt);
1298 sc->last_reply_idx++;
1299 if (sc->last_reply_idx >= sc->reply_q_depth)
1300 sc->last_reply_idx = 0;
1302 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1304 threshold_reply_count++;
1306 /* Get the next reply descriptor */
1307 if (!sc->last_reply_idx)
1308 desc = sc->reply_desc_mem;
1312 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1313 desc_val.word = desc->Words;
1315 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1317 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1321 * Write to reply post index after completing threshold reply count
1322 * and still there are more replies in reply queue pending to be
1325 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1326 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1327 sc->last_reply_idx);
1328 threshold_reply_count = 0;
1332 /* No match, just return */
1333 if (num_completed == 0)
1336 /* Clear response interrupt */
1337 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1343 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1344 * input: Adapter instance soft state
1346 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1347 * It checks the command status and maps the appropriate CAM status for the CCB.
1349 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1351 struct mrsas_softc *sc = cmd->sc;
1352 u_int8_t *sense_data;
1356 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1358 case MFI_STAT_SCSI_IO_FAILED:
1359 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1360 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1361 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1363 /* For now just copy 18 bytes back */
1364 memcpy(sense_data, cmd->sense, 18);
1365 cmd->ccb_ptr->csio.sense_len = 18;
1366 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1369 case MFI_STAT_LD_OFFLINE:
1370 case MFI_STAT_DEVICE_NOT_FOUND:
1371 if (cmd->ccb_ptr->ccb_h.target_lun)
1372 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1374 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1376 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1377 /*send status to CAM layer to retry sending command without
1378 * decrementing retry counter*/
1379 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1382 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1383 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1384 cmd->ccb_ptr->csio.scsi_status = status;
1390 * mrsas_alloc_mem: Allocate DMAable memory.
1391 * input: Adapter instance soft state
1393 * This function creates the parent DMA tag and allocates DMAable memory.
1394 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1395 * into Kernel virtual address. Callback argument is physical memory address.
1397 static int mrsas_alloc_mem(struct mrsas_softc *sc)
1399 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1400 chain_frame_size, evt_detail_size;
1403 * Allocate parent DMA tag
1405 if (bus_dma_tag_create(NULL, /* parent */
1408 BUS_SPACE_MAXADDR, /* lowaddr */
1409 BUS_SPACE_MAXADDR, /* highaddr */
1410 NULL, NULL, /* filter, filterarg */
1411 MRSAS_MAX_IO_SIZE,/* maxsize */
1412 MRSAS_MAX_SGL, /* nsegments */
1413 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1415 &sc->mrsas_parent_tag /* tag */
1417 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1422 * Allocate for version buffer
1424 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1425 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1426 1, 0, // algnmnt, boundary
1427 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1428 BUS_SPACE_MAXADDR, // highaddr
1429 NULL, NULL, // filter, filterarg
1430 verbuf_size, // maxsize
1432 verbuf_size, // maxsegsize
1433 BUS_DMA_ALLOCNOW, // flags
1435 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1438 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1439 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1440 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1443 bzero(sc->verbuf_mem, verbuf_size);
1444 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1445 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1446 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1451 * Allocate IO Request Frames
1453 io_req_size = sc->io_frames_alloc_sz;
1454 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1455 16, 0, // algnmnt, boundary
1456 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1457 BUS_SPACE_MAXADDR, // highaddr
1458 NULL, NULL, // filter, filterarg
1459 io_req_size, // maxsize
1461 io_req_size, // maxsegsize
1462 BUS_DMA_ALLOCNOW, // flags
1463 &sc->io_request_tag)) {
1464 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1467 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1468 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1469 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1472 bzero(sc->io_request_mem, io_req_size);
1473 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1474 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1475 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1476 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1481 * Allocate Chain Frames
1483 chain_frame_size = sc->chain_frames_alloc_sz;
1484 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1485 4, 0, // algnmnt, boundary
1486 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1487 BUS_SPACE_MAXADDR, // highaddr
1488 NULL, NULL, // filter, filterarg
1489 chain_frame_size, // maxsize
1491 chain_frame_size, // maxsegsize
1492 BUS_DMA_ALLOCNOW, // flags
1493 &sc->chain_frame_tag)) {
1494 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1497 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1498 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1499 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1502 bzero(sc->chain_frame_mem, chain_frame_size);
1503 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1504 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1505 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1506 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1511 * Allocate Reply Descriptor Array
1513 reply_desc_size = sc->reply_alloc_sz;
1514 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1515 16, 0, // algnmnt, boundary
1516 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1517 BUS_SPACE_MAXADDR, // highaddr
1518 NULL, NULL, // filter, filterarg
1519 reply_desc_size, // maxsize
1521 reply_desc_size, // maxsegsize
1522 BUS_DMA_ALLOCNOW, // flags
1523 &sc->reply_desc_tag)) {
1524 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1527 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1528 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1529 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1532 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1533 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1534 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1535 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1540 * Allocate Sense Buffer Array. Keep in lower 4GB
1542 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1543 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1544 64, 0, // algnmnt, boundary
1545 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1546 BUS_SPACE_MAXADDR, // highaddr
1547 NULL, NULL, // filter, filterarg
1548 sense_size, // maxsize
1550 sense_size, // maxsegsize
1551 BUS_DMA_ALLOCNOW, // flags
1553 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1556 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1557 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1558 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1561 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1562 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1564 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1569 * Allocate for Event detail structure
1571 evt_detail_size = sizeof(struct mrsas_evt_detail);
1572 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1573 1, 0, // algnmnt, boundary
1574 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1575 BUS_SPACE_MAXADDR, // highaddr
1576 NULL, NULL, // filter, filterarg
1577 evt_detail_size, // maxsize
1579 evt_detail_size, // maxsegsize
1580 BUS_DMA_ALLOCNOW, // flags
1581 &sc->evt_detail_tag)) {
1582 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1585 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1586 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1587 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1590 bzero(sc->evt_detail_mem, evt_detail_size);
1591 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1592 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1593 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1594 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1600 * Create a dma tag for data buffers; size will be the maximum
1601 * possible I/O size (280kB).
1603 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1606 BUS_SPACE_MAXADDR, // lowaddr
1607 BUS_SPACE_MAXADDR, // highaddr
1608 NULL, NULL, // filter, filterarg
1609 MRSAS_MAX_IO_SIZE, // maxsize
1610 MRSAS_MAX_SGL, // nsegments
1611 MRSAS_MAX_IO_SIZE, // maxsegsize
1612 BUS_DMA_ALLOCNOW, // flags
1614 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1622 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1623 * input: callback argument,
1624 * machine dependent type that describes DMA segments,
1625 * number of segments,
1628 * This function is for the driver to receive mapping information resultant
1629 * of the bus_dmamap_load(). The information is actually not being used,
1630 * but the address is saved anyway.
1633 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1638 *addr = segs[0].ds_addr;
1642 * mrsas_setup_raidmap: Set up RAID map.
1643 * input: Adapter instance soft state
1645 * Allocate DMA memory for the RAID maps and perform setup.
1647 static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1649 sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1650 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1652 for (int i=0; i < 2; i++)
1654 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1655 4, 0, // algnmnt, boundary
1656 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1657 BUS_SPACE_MAXADDR, // highaddr
1658 NULL, NULL, // filter, filterarg
1659 sc->map_sz, // maxsize
1661 sc->map_sz, // maxsegsize
1662 BUS_DMA_ALLOCNOW, // flags
1663 &sc->raidmap_tag[i])) {
1664 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
1667 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
1668 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1669 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
1672 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1673 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1675 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1678 if (!sc->raidmap_mem[i]) {
1679 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
1684 if (!mrsas_get_map_info(sc))
1685 mrsas_sync_map_info(sc);
1691 * mrsas_init_fw: Initialize Firmware
1692 * input: Adapter soft state
1694 * Calls transition_to_ready() to make sure Firmware is in operational
1695 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1696 * Firmware. It issues internal commands to get the controller info
1697 * after the IOC_INIT command response is received by Firmware.
1698 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1699 * are currently not being used, it is left here as placeholder.
1701 static int mrsas_init_fw(struct mrsas_softc *sc)
1703 u_int32_t max_sectors_1;
1704 u_int32_t max_sectors_2;
1705 u_int32_t tmp_sectors;
1706 struct mrsas_ctrl_info *ctrl_info;
1711 /* Make sure Firmware is ready */
1712 ret = mrsas_transition_to_ready(sc, ocr);
1713 if (ret != SUCCESS) {
1717 /* Get operational params, sge flags, send init cmd to ctlr */
1718 if (mrsas_init_adapter(sc) != SUCCESS){
1719 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1723 /* Allocate internal commands for pass-thru */
1724 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1725 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1729 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1730 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1734 /* For pass-thru, get PD/LD list and controller info */
1735 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1736 mrsas_get_pd_list(sc);
1738 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1739 mrsas_get_ld_list(sc);
1741 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
1743 ctrl_info = kmalloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1746 * Compute the max allowed sectors per IO: The controller info has two
1747 * limits on max sectors. Driver should use the minimum of these two.
1749 * 1 << stripe_sz_ops.min = max sectors per strip
1751 * Note that older firmwares ( < FW ver 30) didn't report information
1752 * to calculate max_sectors_1. So the number ended up as zero always.
1755 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
1756 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1757 ctrl_info->max_strips_per_io;
1758 max_sectors_2 = ctrl_info->max_request_size;
1759 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1760 sc->disableOnlineCtrlReset =
1761 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1762 sc->UnevenSpanSupport =
1763 ctrl_info->adapterOperations2.supportUnevenSpans;
1764 if(sc->UnevenSpanSupport) {
1765 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
1766 sc->UnevenSpanSupport);
1767 if (MR_ValidateMapInfo(sc))
1768 sc->fast_path_io = 1;
1770 sc->fast_path_io = 0;
1774 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1776 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1777 sc->max_sectors_per_req = tmp_sectors;
1780 kfree(ctrl_info, M_MRSAS);
1786 * mrsas_init_adapter: Initializes the adapter/controller
1787 * input: Adapter soft state
1789 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1790 * ROC/controller. The FW register is read to determined the number of
1791 * commands that is supported. All memory allocations for IO is based on
1792 * max_cmd. Appropriate calculations are performed in this function.
1794 int mrsas_init_adapter(struct mrsas_softc *sc)
1800 /* Read FW status register */
1801 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1803 /* Get operational params from status register */
1804 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1806 /* Decrement the max supported by 1, to correlate with FW */
1807 sc->max_fw_cmds = sc->max_fw_cmds-1;
1808 max_cmd = sc->max_fw_cmds;
1810 /* Determine allocation size of command frames */
1811 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1812 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1813 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1814 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1815 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1816 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1817 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1819 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1820 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1822 /* Used for pass thru MFI frame (DCMD) */
1823 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1825 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1826 sizeof(MPI2_SGE_IO_UNION))/16;
1828 sc->last_reply_idx = 0;
1830 ret = mrsas_alloc_mem(sc);
1834 ret = mrsas_alloc_mpt_cmds(sc);
1838 ret = mrsas_ioc_init(sc);
1847 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1848 * input: Adapter soft state
1850 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1852 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1856 /* Allocate IOC INIT command */
1857 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1858 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1859 1, 0, // algnmnt, boundary
1860 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1861 BUS_SPACE_MAXADDR, // highaddr
1862 NULL, NULL, // filter, filterarg
1863 ioc_init_size, // maxsize
1865 ioc_init_size, // maxsegsize
1866 BUS_DMA_ALLOCNOW, // flags
1867 &sc->ioc_init_tag)) {
1868 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1871 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1872 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1873 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1876 bzero(sc->ioc_init_mem, ioc_init_size);
1877 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1878 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1879 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1880 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1888 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
1889 * input: Adapter soft state
1891 * Deallocates memory of the IOC Init cmd.
1893 void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1895 if (sc->ioc_init_phys_mem)
1896 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1897 if (sc->ioc_init_mem != NULL)
1898 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1899 if (sc->ioc_init_tag != NULL)
1900 bus_dma_tag_destroy(sc->ioc_init_tag);
1904 * mrsas_ioc_init: Sends IOC Init command to FW
1905 * input: Adapter soft state
1907 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1909 int mrsas_ioc_init(struct mrsas_softc *sc)
1911 struct mrsas_init_frame *init_frame;
1912 pMpi2IOCInitRequest_t IOCInitMsg;
1913 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1914 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1915 bus_addr_t phys_addr;
1918 /* Allocate memory for the IOC INIT command */
1919 if (mrsas_alloc_ioc_cmd(sc)) {
1920 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1924 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1925 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1926 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1927 IOCInitMsg->MsgVersion = MPI2_VERSION;
1928 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1929 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1930 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1931 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1932 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1934 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1935 init_frame->cmd = MFI_CMD_INIT;
1936 init_frame->cmd_status = 0xFF;
1937 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1939 if (sc->verbuf_mem) {
1940 ksnprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1942 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
1943 init_frame->driver_ver_hi = 0;
1946 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
1947 init_frame->queue_info_new_phys_addr_lo = phys_addr;
1948 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1950 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
1951 req_desc.MFAIo.RequestFlags =
1952 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1954 mrsas_disable_intr(sc);
1955 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
1956 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
1957 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
1960 * Poll response timer to wait for Firmware response. While this
1961 * timer with the DELAY call could block CPU, the time interval for
1962 * this is only 1 millisecond.
1964 if (init_frame->cmd_status == 0xFF) {
1965 for (i=0; i < (max_wait * 1000); i++){
1966 if (init_frame->cmd_status == 0xFF)
1973 if (init_frame->cmd_status == 0)
1974 mrsas_dprint(sc, MRSAS_OCR,
1975 "IOC INIT response received from FW.\n");
1976 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
1979 if (init_frame->cmd_status == 0xFF)
1980 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
1982 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1986 mrsas_free_ioc_cmd(sc);
1991 * mrsas_alloc_mpt_cmds: Allocates the command packets
1992 * input: Adapter instance soft state
1994 * This function allocates the internal commands for IOs. Each command that is
1995 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
1996 * An array is allocated with mrsas_mpt_cmd context. The free commands are
1997 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2000 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2004 struct mrsas_mpt_cmd *cmd;
2005 pMpi2ReplyDescriptorsUnion_t reply_desc;
2006 u_int32_t offset, chain_offset, sense_offset;
2007 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2008 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2010 max_cmd = sc->max_fw_cmds;
2012 sc->req_desc = kmalloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2013 if (!sc->req_desc) {
2014 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2017 memset(sc->req_desc, 0, sc->request_alloc_sz);
2020 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2021 * dynamic array first and then allocate individual commands.
2023 sc->mpt_cmd_list = kmalloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2024 if (!sc->mpt_cmd_list) {
2025 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2028 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2029 for (i = 0; i < max_cmd; i++) {
2030 sc->mpt_cmd_list[i] = kmalloc(sizeof(struct mrsas_mpt_cmd),
2032 if (!sc->mpt_cmd_list[i]) {
2033 for (j = 0; j < i; j++)
2034 kfree(sc->mpt_cmd_list[j],M_MRSAS);
2035 kfree(sc->mpt_cmd_list, M_MRSAS);
2036 sc->mpt_cmd_list = NULL;
2041 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2042 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2043 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2044 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2045 sense_base = (u_int8_t*)sc->sense_mem;
2046 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2047 for (i = 0; i < max_cmd; i++) {
2048 cmd = sc->mpt_cmd_list[i];
2049 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2050 chain_offset = 1024 * i;
2051 sense_offset = MRSAS_SENSE_LEN * i;
2052 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2054 cmd->ccb_ptr = NULL;
2055 callout_init(&cmd->cm_callout);
2056 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2058 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2059 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2060 cmd->io_request_phys_addr = io_req_base_phys + offset;
2061 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2062 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2063 cmd->sense = sense_base + sense_offset;
2064 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2065 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2068 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2071 /* Initialize reply descriptor array to 0xFFFFFFFF */
2072 reply_desc = sc->reply_desc_mem;
2073 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2074 reply_desc->Words = MRSAS_ULONG_MAX;
2080 * mrsas_fire_cmd: Sends command to FW
2081 * input: Adapter soft state
2082 * request descriptor address low
2083 * request descriptor address high
2085 * This functions fires the command to Firmware by writing to the
2086 * inbound_low_queue_port and inbound_high_queue_port.
2088 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2089 u_int32_t req_desc_hi)
2091 lockmgr(&sc->pci_lock, LK_EXCLUSIVE);
2092 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2094 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2096 lockmgr(&sc->pci_lock, LK_RELEASE);
2100 * mrsas_transition_to_ready: Move FW to Ready state
2101 * input: Adapter instance soft state
2103 * During the initialization, FW passes can potentially be in any one of
2104 * several possible states. If the FW in operational, waiting-for-handshake
2105 * states, driver must take steps to bring it to ready state. Otherwise, it
2106 * has to wait for the ready state.
2108 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2112 u_int32_t val, fw_state;
2113 u_int32_t cur_state;
2114 u_int32_t abs_state, curr_abs_state;
2116 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2117 fw_state = val & MFI_STATE_MASK;
2118 max_wait = MRSAS_RESET_WAIT_TIME;
2120 if (fw_state != MFI_STATE_READY)
2121 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2123 while (fw_state != MFI_STATE_READY) {
2124 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2126 case MFI_STATE_FAULT:
2127 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2129 cur_state = MFI_STATE_FAULT;
2134 case MFI_STATE_WAIT_HANDSHAKE:
2135 /* Set the CLR bit in inbound doorbell */
2136 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2137 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2138 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2140 case MFI_STATE_BOOT_MESSAGE_PENDING:
2141 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2143 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2145 case MFI_STATE_OPERATIONAL:
2146 /* Bring it to READY state; assuming max wait 10 secs */
2147 mrsas_disable_intr(sc);
2148 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2149 for (i=0; i < max_wait * 1000; i++) {
2150 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2155 cur_state = MFI_STATE_OPERATIONAL;
2157 case MFI_STATE_UNDEFINED:
2158 /* This state should not last for more than 2 seconds */
2159 cur_state = MFI_STATE_UNDEFINED;
2161 case MFI_STATE_BB_INIT:
2162 cur_state = MFI_STATE_BB_INIT;
2164 case MFI_STATE_FW_INIT:
2165 cur_state = MFI_STATE_FW_INIT;
2167 case MFI_STATE_FW_INIT_2:
2168 cur_state = MFI_STATE_FW_INIT_2;
2170 case MFI_STATE_DEVICE_SCAN:
2171 cur_state = MFI_STATE_DEVICE_SCAN;
2173 case MFI_STATE_FLUSH_CACHE:
2174 cur_state = MFI_STATE_FLUSH_CACHE;
2177 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2182 * The cur_state should not last for more than max_wait secs
2184 for (i = 0; i < (max_wait * 1000); i++) {
2185 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2186 outbound_scratch_pad))& MFI_STATE_MASK);
2187 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2188 outbound_scratch_pad));
2189 if (abs_state == curr_abs_state)
2196 * Return error if fw_state hasn't changed after max_wait
2198 if (curr_abs_state == abs_state) {
2199 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2200 "in %d secs\n", fw_state, max_wait);
2204 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2205 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2210 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2211 * input: Adapter soft state
2213 * This function removes an MFI command from the command list.
2215 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2217 struct mrsas_mfi_cmd *cmd = NULL;
2219 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2220 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2221 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2222 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2224 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2230 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2231 * input: Adapter Context.
2233 * This function will check FW status register and flag
2234 * do_timeout_reset flag. It will do OCR/Kill adapter if
2235 * FW is in fault state or IO timed out has trigger reset.
2238 mrsas_ocr_thread(void *arg)
2240 struct mrsas_softc *sc;
2241 u_int32_t fw_status, fw_state;
2243 sc = (struct mrsas_softc *)arg;
2245 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2247 sc->ocr_thread_active = 1;
2248 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
2250 /* Sleep for 1 second and check the queue status*/
2251 lksleep(&sc->ocr_chan, &sc->sim_lock, 0,
2252 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2253 if (sc->remove_in_progress) {
2254 mrsas_dprint(sc, MRSAS_OCR,
2255 "Exit due to shutdown from %s\n", __func__);
2258 fw_status = mrsas_read_reg(sc,
2259 offsetof(mrsas_reg_set, outbound_scratch_pad));
2260 fw_state = fw_status & MFI_STATE_MASK;
2261 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2262 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2263 sc->do_timedout_reset?"IO Timeout":
2264 "FW fault detected");
2265 spin_lock(&sc->ioctl_lock);
2266 sc->reset_in_progress = 1;
2268 spin_unlock(&sc->ioctl_lock);
2269 mrsas_xpt_freeze(sc);
2270 mrsas_reset_ctrl(sc);
2271 mrsas_xpt_release(sc);
2272 sc->reset_in_progress = 0;
2273 sc->do_timedout_reset = 0;
2276 lockmgr(&sc->sim_lock, LK_RELEASE);
2277 sc->ocr_thread_active = 0;
2282 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2283 * input: Adapter Context.
2285 * This function will clear reply descriptor so that post OCR
2286 * driver and FW will lost old history.
2288 void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2291 pMpi2ReplyDescriptorsUnion_t reply_desc;
2293 sc->last_reply_idx = 0;
2294 reply_desc = sc->reply_desc_mem;
2295 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2296 reply_desc->Words = MRSAS_ULONG_MAX;
2301 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2302 * input: Adapter Context.
2304 * This function will run from thread context so that it can sleep.
2305 * 1. Do not handle OCR if FW is in HW critical error.
2306 * 2. Wait for outstanding command to complete for 180 seconds.
2307 * 3. If #2 does not find any outstanding command Controller is in working
2308 * state, so skip OCR.
2309 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2310 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2312 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2315 int mrsas_reset_ctrl(struct mrsas_softc *sc)
2317 int retval = SUCCESS, i, j, retry = 0;
2318 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2320 struct mrsas_mfi_cmd *mfi_cmd;
2321 struct mrsas_mpt_cmd *mpt_cmd;
2322 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2324 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2325 device_printf(sc->mrsas_dev,
2326 "mrsas: Hardware critical error, returning FAIL.\n");
2330 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2331 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2332 mrsas_disable_intr(sc);
2335 /* First try waiting for commands to complete */
2336 if (mrsas_wait_for_outstanding(sc)) {
2337 mrsas_dprint(sc, MRSAS_OCR,
2338 "resetting adapter from %s.\n",
2340 /* Now return commands back to the CAM layer */
2341 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2342 mpt_cmd = sc->mpt_cmd_list[i];
2343 if (mpt_cmd->ccb_ptr) {
2344 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2345 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2346 mrsas_cmd_done(sc, mpt_cmd);
2347 atomic_dec(&sc->fw_outstanding);
2351 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2352 outbound_scratch_pad));
2353 abs_state = status_reg & MFI_STATE_MASK;
2354 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2355 if (sc->disableOnlineCtrlReset ||
2356 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2357 /* Reset not supported, kill adapter */
2358 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2360 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2365 /* Now try to reset the chip */
2366 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2367 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2368 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2369 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2370 MPI2_WRSEQ_1ST_KEY_VALUE);
2371 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2372 MPI2_WRSEQ_2ND_KEY_VALUE);
2373 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2374 MPI2_WRSEQ_3RD_KEY_VALUE);
2375 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2376 MPI2_WRSEQ_4TH_KEY_VALUE);
2377 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2378 MPI2_WRSEQ_5TH_KEY_VALUE);
2379 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2380 MPI2_WRSEQ_6TH_KEY_VALUE);
2382 /* Check that the diag write enable (DRWE) bit is on */
2383 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2386 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2388 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2390 if (retry++ == 100) {
2391 mrsas_dprint(sc, MRSAS_OCR,
2392 "Host diag unlock failed!\n");
2396 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2399 /* Send chip reset command */
2400 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2401 host_diag | HOST_DIAG_RESET_ADAPTER);
2404 /* Make sure reset adapter bit is cleared */
2405 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2408 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2410 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2412 if (retry++ == 1000) {
2413 mrsas_dprint(sc, MRSAS_OCR,
2414 "Diag reset adapter never cleared!\n");
2418 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2421 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2422 outbound_scratch_pad)) & MFI_STATE_MASK;
2425 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2427 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2428 outbound_scratch_pad)) & MFI_STATE_MASK;
2430 if (abs_state <= MFI_STATE_FW_INIT) {
2431 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2432 " state = 0x%x\n", abs_state);
2436 /* Wait for FW to become ready */
2437 if (mrsas_transition_to_ready(sc, 1)) {
2438 mrsas_dprint(sc, MRSAS_OCR,
2439 "mrsas: Failed to transition controller to ready.\n");
2443 mrsas_reset_reply_desc(sc);
2444 if (mrsas_ioc_init(sc)) {
2445 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2449 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2450 mrsas_enable_intr(sc);
2451 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2453 /* Re-fire management commands */
2454 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2455 mpt_cmd = sc->mpt_cmd_list[j];
2456 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2457 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2458 if (mfi_cmd->frame->dcmd.opcode ==
2459 MR_DCMD_LD_MAP_GET_INFO) {
2460 mrsas_release_mfi_cmd(mfi_cmd);
2461 mrsas_release_mpt_cmd(mpt_cmd);
2463 req_desc = mrsas_get_request_desc(sc,
2464 mfi_cmd->cmd_id.context.smid - 1);
2465 mrsas_dprint(sc, MRSAS_OCR,
2466 "Re-fire command DCMD opcode 0x%x index %d\n ",
2467 mfi_cmd->frame->dcmd.opcode, j);
2469 device_printf(sc->mrsas_dev,
2470 "Cannot build MPT cmd.\n");
2472 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2473 req_desc->addr.u.high);
2478 /* Reset load balance info */
2479 memset(sc->load_balance_info, 0,
2480 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
2482 if (!mrsas_get_map_info(sc))
2483 mrsas_sync_map_info(sc);
2485 /* Adapter reset completed successfully */
2486 device_printf(sc->mrsas_dev, "Reset successful\n");
2490 /* Reset failed, kill the adapter */
2491 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2495 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2496 mrsas_enable_intr(sc);
2497 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2500 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2501 mrsas_dprint(sc, MRSAS_OCR,
2502 "Reset Exit with %d.\n", retval);
2507 * mrsas_kill_hba Kill HBA when OCR is not supported.
2508 * input: Adapter Context.
2510 * This function will kill HBA when OCR is not supported.
2512 void mrsas_kill_hba (struct mrsas_softc *sc)
2514 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2515 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2518 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2522 * mrsas_wait_for_outstanding Wait for outstanding commands
2523 * input: Adapter Context.
2525 * This function will wait for 180 seconds for outstanding
2526 * commands to be completed.
2528 int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2530 int i, outstanding, retval = 0;
2533 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2534 if (sc->remove_in_progress) {
2535 mrsas_dprint(sc, MRSAS_OCR,
2536 "Driver remove or shutdown called.\n");
2540 /* Check if firmware is in fault state */
2541 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2542 outbound_scratch_pad)) & MFI_STATE_MASK;
2543 if (fw_state == MFI_STATE_FAULT) {
2544 mrsas_dprint(sc, MRSAS_OCR,
2545 "Found FW in FAULT state, will reset adapter.\n");
2549 outstanding = atomic_read(&sc->fw_outstanding);
2553 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2554 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2555 "commands to complete\n",i,outstanding);
2556 mrsas_complete_cmd(sc);
2561 if (atomic_read(&sc->fw_outstanding)) {
2562 mrsas_dprint(sc, MRSAS_OCR,
2563 " pending commands remain after waiting,"
2564 " will reset adapter.\n");
2572 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2573 * input: Command packet for return to free cmd pool
2575 * This function returns the MFI command to the command list.
2577 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2579 struct mrsas_softc *sc = cmd->sc;
2581 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2582 cmd->ccb_ptr = NULL;
2583 cmd->cmd_id.frame_count = 0;
2584 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2585 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2591 * mrsas_get_controller_info - Returns FW's controller structure
2592 * input: Adapter soft state
2593 * Controller information structure
2595 * Issues an internal command (DCMD) to get the FW's controller structure.
2596 * This information is mainly used to find out the maximum IO transfer per
2597 * command supported by the FW.
2599 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2600 struct mrsas_ctrl_info *ctrl_info)
2603 struct mrsas_mfi_cmd *cmd;
2604 struct mrsas_dcmd_frame *dcmd;
2606 cmd = mrsas_get_mfi_cmd(sc);
2609 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2612 dcmd = &cmd->frame->dcmd;
2614 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2615 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2616 mrsas_release_mfi_cmd(cmd);
2619 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2621 dcmd->cmd = MFI_CMD_DCMD;
2622 dcmd->cmd_status = 0xFF;
2623 dcmd->sge_count = 1;
2624 dcmd->flags = MFI_FRAME_DIR_READ;
2627 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2628 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2629 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2630 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2632 if (!mrsas_issue_polled(sc, cmd))
2633 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2637 mrsas_free_ctlr_info_cmd(sc);
2638 mrsas_release_mfi_cmd(cmd);
2643 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2644 * input: Adapter soft state
2646 * Allocates DMAable memory for the controller info internal command.
2648 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2652 /* Allocate get controller info command */
2653 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2654 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2655 1, 0, // algnmnt, boundary
2656 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2657 BUS_SPACE_MAXADDR, // highaddr
2658 NULL, NULL, // filter, filterarg
2659 ctlr_info_size, // maxsize
2661 ctlr_info_size, // maxsegsize
2662 BUS_DMA_ALLOCNOW, // flags
2663 &sc->ctlr_info_tag)) {
2664 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2667 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2668 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2669 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2672 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2673 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2674 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2675 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2679 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2684 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2685 * input: Adapter soft state
2687 * Deallocates memory of the get controller info cmd.
2689 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2691 if (sc->ctlr_info_phys_addr)
2692 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2693 if (sc->ctlr_info_mem != NULL)
2694 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2695 if (sc->ctlr_info_tag != NULL)
2696 bus_dma_tag_destroy(sc->ctlr_info_tag);
2700 * mrsas_issue_polled: Issues a polling command
2701 * inputs: Adapter soft state
2702 * Command packet to be issued
2704 * This function is for posting of internal commands to Firmware. MFI
2705 * requires the cmd_status to be set to 0xFF before posting. The maximun
2706 * wait time of the poll response timer is 180 seconds.
2708 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2710 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2711 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2714 frame_hdr->cmd_status = 0xFF;
2715 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2717 /* Issue the frame using inbound queue port */
2718 if (mrsas_issue_dcmd(sc, cmd)) {
2719 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2724 * Poll response timer to wait for Firmware response. While this
2725 * timer with the DELAY call could block CPU, the time interval for
2726 * this is only 1 millisecond.
2728 if (frame_hdr->cmd_status == 0xFF) {
2729 for (i=0; i < (max_wait * 1000); i++){
2730 if (frame_hdr->cmd_status == 0xFF)
2736 if (frame_hdr->cmd_status != 0)
2738 if (frame_hdr->cmd_status == 0xFF)
2739 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2741 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2748 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2749 * input: Adapter soft state
2752 * This function is called by mrsas_issued_blocked_cmd() and
2753 * mrsas_issued_polled(), to build the MPT command and then fire the
2754 * command to Firmware.
2757 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2759 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2761 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2763 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2767 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2773 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2774 * input: Adapter soft state
2777 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2778 * passthru command and prepares the MPT command to send to Firmware.
2780 MRSAS_REQUEST_DESCRIPTOR_UNION *
2781 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2783 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2786 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2787 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2791 index = cmd->cmd_id.context.smid;
2793 req_desc = mrsas_get_request_desc(sc, index-1);
2797 req_desc->addr.Words = 0;
2798 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2800 req_desc->SCSIIO.SMID = index;
2806 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2807 * input: Adapter soft state
2810 * The MPT command and the io_request are setup as a passthru command.
2811 * The SGE chain address is set to frame_phys_addr of the MFI command.
2814 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2816 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2817 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2818 struct mrsas_mpt_cmd *mpt_cmd;
2819 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2821 mpt_cmd = mrsas_get_mpt_cmd(sc);
2825 /* Save the smid. To be used for returning the cmd */
2826 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2828 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2831 * For cmds where the flag is set, store the flag and check
2832 * on completion. For cmds with this flag, don't call
2833 * mrsas_complete_cmd.
2836 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2837 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2839 io_req = mpt_cmd->io_request;
2841 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2842 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2843 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2844 sgl_ptr_end->Flags = 0;
2847 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2849 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2850 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2851 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2853 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2855 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2856 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2858 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2864 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2865 * input: Adapter soft state
2866 * Command to be issued
2868 * This function waits on an event for the command to be returned
2869 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2870 * Used for issuing internal and ioctl commands.
2872 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2874 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2875 unsigned long total_time = 0;
2878 /* Initialize cmd_status */
2879 cmd->cmd_status = ECONNREFUSED;
2881 /* Build MPT-MFI command for issue to FW */
2882 if (mrsas_issue_dcmd(sc, cmd)){
2883 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2887 sc->chan = (void*)&cmd;
2889 /* The following is for debug only... */
2890 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2891 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2894 if (cmd->cmd_status == ECONNREFUSED){
2895 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2900 if (total_time >= max_wait) {
2901 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2910 * mrsas_complete_mptmfi_passthru - Completes a command
2911 * input: sc: Adapter soft state
2912 * cmd: Command to be completed
2913 * status: cmd completion status
2915 * This function is called from mrsas_complete_cmd() after an interrupt
2916 * is received from Firmware, and io_request->Function is
2917 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2920 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2923 struct mrsas_header *hdr = &cmd->frame->hdr;
2924 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2926 /* Reset the retry counter for future re-tries */
2927 cmd->retry_for_fw_reset = 0;
2930 cmd->ccb_ptr = NULL;
2933 case MFI_CMD_INVALID:
2934 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
2936 case MFI_CMD_PD_SCSI_IO:
2937 case MFI_CMD_LD_SCSI_IO:
2939 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
2940 * issued either through an IO path or an IOCTL path. If it
2941 * was via IOCTL, we will send it to internal completion.
2943 if (cmd->sync_cmd) {
2945 mrsas_wakeup(sc, cmd);
2951 /* Check for LD map update */
2952 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2953 (cmd->frame->dcmd.mbox.b[1] == 1)) {
2954 sc->fast_path_io = 0;
2955 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE);
2956 if (cmd_status != 0) {
2957 if (cmd_status != MFI_STAT_NOT_FOUND)
2958 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
2960 mrsas_release_mfi_cmd(cmd);
2961 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2967 mrsas_release_mfi_cmd(cmd);
2968 if (MR_ValidateMapInfo(sc))
2969 sc->fast_path_io = 0;
2971 sc->fast_path_io = 1;
2972 mrsas_sync_map_info(sc);
2973 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2976 #if 0 //currently not supporting event handling, so commenting out
2977 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2978 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2979 mrsas_poll_wait_aen = 0;
2982 /* See if got an event notification */
2983 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
2984 mrsas_complete_aen(sc, cmd);
2986 mrsas_wakeup(sc, cmd);
2989 /* Command issued to abort another cmd return */
2990 mrsas_complete_abort(sc, cmd);
2993 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
2999 * mrsas_wakeup - Completes an internal command
3000 * input: Adapter soft state
3001 * Command to be completed
3003 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
3004 * a wait timer is started. This function is called from
3005 * mrsas_complete_mptmfi_passthru() as it completes the command,
3006 * to wake up from the command wait.
3008 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3010 cmd->cmd_status = cmd->frame->io.cmd_status;
3012 if (cmd->cmd_status == ECONNREFUSED)
3013 cmd->cmd_status = 0;
3015 /* For debug only ... */
3016 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3018 sc->chan = (void*)&cmd;
3019 wakeup_one((void *)&sc->chan);
3024 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3025 * input: Adapter soft state
3026 * Shutdown/Hibernate
3028 * This function issues a DCMD internal command to Firmware to initiate
3029 * shutdown of the controller.
3031 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3033 struct mrsas_mfi_cmd *cmd;
3034 struct mrsas_dcmd_frame *dcmd;
3036 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3039 cmd = mrsas_get_mfi_cmd(sc);
3041 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3046 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3048 if (sc->map_update_cmd)
3049 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3051 dcmd = &cmd->frame->dcmd;
3052 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3054 dcmd->cmd = MFI_CMD_DCMD;
3055 dcmd->cmd_status = 0x0;
3056 dcmd->sge_count = 0;
3057 dcmd->flags = MFI_FRAME_DIR_NONE;
3060 dcmd->data_xfer_len = 0;
3061 dcmd->opcode = opcode;
3063 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3065 mrsas_issue_blocked_cmd(sc, cmd);
3066 mrsas_release_mfi_cmd(cmd);
3072 * mrsas_flush_cache: Requests FW to flush all its caches
3073 * input: Adapter soft state
3075 * This function is issues a DCMD internal command to Firmware to initiate
3076 * flushing of all caches.
3078 static void mrsas_flush_cache(struct mrsas_softc *sc)
3080 struct mrsas_mfi_cmd *cmd;
3081 struct mrsas_dcmd_frame *dcmd;
3083 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3086 cmd = mrsas_get_mfi_cmd(sc);
3088 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3092 dcmd = &cmd->frame->dcmd;
3093 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3095 dcmd->cmd = MFI_CMD_DCMD;
3096 dcmd->cmd_status = 0x0;
3097 dcmd->sge_count = 0;
3098 dcmd->flags = MFI_FRAME_DIR_NONE;
3101 dcmd->data_xfer_len = 0;
3102 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3103 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3105 mrsas_issue_blocked_cmd(sc, cmd);
3106 mrsas_release_mfi_cmd(cmd);
3112 * mrsas_get_map_info: Load and validate RAID map
3113 * input: Adapter instance soft state
3115 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3116 * to load and validate RAID map. It returns 0 if successful, 1 other-
3119 static int mrsas_get_map_info(struct mrsas_softc *sc)
3121 uint8_t retcode = 0;
3123 sc->fast_path_io = 0;
3124 if (!mrsas_get_ld_map_info(sc)) {
3125 retcode = MR_ValidateMapInfo(sc);
3127 sc->fast_path_io = 1;
3135 * mrsas_get_ld_map_info: Get FW's ld_map structure
3136 * input: Adapter instance soft state
3138 * Issues an internal command (DCMD) to get the FW's controller PD
3141 static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3144 struct mrsas_mfi_cmd *cmd;
3145 struct mrsas_dcmd_frame *dcmd;
3146 MR_FW_RAID_MAP_ALL *map;
3147 bus_addr_t map_phys_addr = 0;
3149 cmd = mrsas_get_mfi_cmd(sc);
3151 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
3155 dcmd = &cmd->frame->dcmd;
3157 map = sc->raidmap_mem[(sc->map_id & 1)];
3158 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3160 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
3161 mrsas_release_mfi_cmd(cmd);
3164 memset(map, 0, sizeof(*map));
3165 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3167 dcmd->cmd = MFI_CMD_DCMD;
3168 dcmd->cmd_status = 0xFF;
3169 dcmd->sge_count = 1;
3170 dcmd->flags = MFI_FRAME_DIR_READ;
3173 dcmd->data_xfer_len = sc->map_sz;
3174 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3175 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3176 dcmd->sgl.sge32[0].length = sc->map_sz;
3177 if (!mrsas_issue_polled(sc, cmd))
3181 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
3184 mrsas_release_mfi_cmd(cmd);
3189 * mrsas_sync_map_info: Get FW's ld_map structure
3190 * input: Adapter instance soft state
3192 * Issues an internal command (DCMD) to get the FW's controller PD
3195 static int mrsas_sync_map_info(struct mrsas_softc *sc)
3198 struct mrsas_mfi_cmd *cmd;
3199 struct mrsas_dcmd_frame *dcmd;
3200 uint32_t size_sync_info, num_lds;
3201 MR_LD_TARGET_SYNC *target_map = NULL;
3202 MR_FW_RAID_MAP_ALL *map;
3204 MR_LD_TARGET_SYNC *ld_sync;
3205 bus_addr_t map_phys_addr = 0;
3207 cmd = mrsas_get_mfi_cmd(sc);
3209 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
3213 map = sc->raidmap_mem[sc->map_id & 1];
3214 num_lds = map->raidMap.ldCount;
3216 dcmd = &cmd->frame->dcmd;
3217 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3218 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3220 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3221 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
3223 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3225 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3227 for (i = 0; i < num_lds; i++, ld_sync++) {
3228 raid = MR_LdRaidGet(i, map);
3229 ld_sync->targetId = MR_GetLDTgtId(i, map);
3230 ld_sync->seqNum = raid->seqNum;
3233 dcmd->cmd = MFI_CMD_DCMD;
3234 dcmd->cmd_status = 0xFF;
3235 dcmd->sge_count = 1;
3236 dcmd->flags = MFI_FRAME_DIR_WRITE;
3239 dcmd->data_xfer_len = sc->map_sz;
3240 dcmd->mbox.b[0] = num_lds;
3241 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3242 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3243 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3244 dcmd->sgl.sge32[0].length = sc->map_sz;
3246 sc->map_update_cmd = cmd;
3247 if (mrsas_issue_dcmd(sc, cmd)) {
3248 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
3255 * mrsas_get_pd_list: Returns FW's PD list structure
3256 * input: Adapter soft state
3258 * Issues an internal command (DCMD) to get the FW's controller PD
3259 * list structure. This information is mainly used to find out about
3260 * system supported by Firmware.
3262 static int mrsas_get_pd_list(struct mrsas_softc *sc)
3264 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3265 struct mrsas_mfi_cmd *cmd;
3266 struct mrsas_dcmd_frame *dcmd;
3267 struct MR_PD_LIST *pd_list_mem;
3268 struct MR_PD_ADDRESS *pd_addr;
3269 bus_addr_t pd_list_phys_addr = 0;
3270 struct mrsas_tmp_dcmd *tcmd;
3272 cmd = mrsas_get_mfi_cmd(sc);
3274 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
3278 dcmd = &cmd->frame->dcmd;
3280 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3281 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3282 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3283 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
3284 mrsas_release_mfi_cmd(cmd);
3288 pd_list_mem = tcmd->tmp_dcmd_mem;
3289 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3291 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3293 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3294 dcmd->mbox.b[1] = 0;
3295 dcmd->cmd = MFI_CMD_DCMD;
3296 dcmd->cmd_status = 0xFF;
3297 dcmd->sge_count = 1;
3298 dcmd->flags = MFI_FRAME_DIR_READ;
3301 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3302 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3303 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3304 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3306 if (!mrsas_issue_polled(sc, cmd))
3311 /* Get the instance PD list */
3312 pd_count = MRSAS_MAX_PD;
3313 pd_addr = pd_list_mem->addr;
3314 if (retcode == 0 && pd_list_mem->count < pd_count) {
3315 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3316 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3317 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3318 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
3319 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
3324 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3325 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3326 mrsas_free_tmp_dcmd(tcmd);
3327 mrsas_release_mfi_cmd(cmd);
3328 kfree(tcmd, M_MRSAS);
3333 * mrsas_get_ld_list: Returns FW's LD list structure
3334 * input: Adapter soft state
3336 * Issues an internal command (DCMD) to get the FW's controller PD
3337 * list structure. This information is mainly used to find out about
3338 * supported by the FW.
3340 static int mrsas_get_ld_list(struct mrsas_softc *sc)
3342 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3343 struct mrsas_mfi_cmd *cmd;
3344 struct mrsas_dcmd_frame *dcmd;
3345 struct MR_LD_LIST *ld_list_mem;
3346 bus_addr_t ld_list_phys_addr = 0;
3347 struct mrsas_tmp_dcmd *tcmd;
3349 cmd = mrsas_get_mfi_cmd(sc);
3351 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
3355 dcmd = &cmd->frame->dcmd;
3357 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3358 ld_list_size = sizeof(struct MR_LD_LIST);
3359 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3360 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
3361 mrsas_release_mfi_cmd(cmd);
3365 ld_list_mem = tcmd->tmp_dcmd_mem;
3366 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3368 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3370 dcmd->cmd = MFI_CMD_DCMD;
3371 dcmd->cmd_status = 0xFF;
3372 dcmd->sge_count = 1;
3373 dcmd->flags = MFI_FRAME_DIR_READ;
3375 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3376 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3377 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3378 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3381 if (!mrsas_issue_polled(sc, cmd))
3386 /* Get the instance LD list */
3387 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
3388 sc->CurLdCount = ld_list_mem->ldCount;
3389 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
3390 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3391 if (ld_list_mem->ldList[ld_index].state != 0) {
3392 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3393 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3398 mrsas_free_tmp_dcmd(tcmd);
3399 mrsas_release_mfi_cmd(cmd);
3400 kfree(tcmd, M_MRSAS);
3405 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3406 * input: Adapter soft state
3410 * Allocates DMAable memory for a temporary internal command. The allocated
3411 * memory is initialized to all zeros upon successful loading of the dma
3414 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3417 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3418 1, 0, // algnmnt, boundary
3419 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3420 BUS_SPACE_MAXADDR, // highaddr
3421 NULL, NULL, // filter, filterarg
3425 BUS_DMA_ALLOCNOW, // flags
3426 &tcmd->tmp_dcmd_tag)) {
3427 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3430 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3431 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3432 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3435 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3436 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3437 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3438 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3442 memset(tcmd->tmp_dcmd_mem, 0, size);
3447 * mrsas_free_tmp_dcmd: Free memory for temporary command
3448 * input: temporary dcmd pointer
3450 * Deallocates memory of the temporary command for use in the construction
3451 * of the internal DCMD.
3453 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3455 if (tmp->tmp_dcmd_phys_addr)
3456 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3457 if (tmp->tmp_dcmd_mem != NULL)
3458 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3459 if (tmp->tmp_dcmd_tag != NULL)
3460 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3464 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3465 * input: Adapter soft state
3466 * Previously issued cmd to be aborted
3468 * This function is used to abort previously issued commands, such as AEN and
3469 * RAID map sync map commands. The abort command is sent as a DCMD internal
3470 * command and subsequently the driver will wait for a return status. The
3471 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3473 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3474 struct mrsas_mfi_cmd *cmd_to_abort)
3476 struct mrsas_mfi_cmd *cmd;
3477 struct mrsas_abort_frame *abort_fr;
3478 u_int8_t retcode = 0;
3479 unsigned long total_time = 0;
3480 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3482 cmd = mrsas_get_mfi_cmd(sc);
3484 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3488 abort_fr = &cmd->frame->abort;
3490 /* Prepare and issue the abort frame */
3491 abort_fr->cmd = MFI_CMD_ABORT;
3492 abort_fr->cmd_status = 0xFF;
3493 abort_fr->flags = 0;
3494 abort_fr->abort_context = cmd_to_abort->index;
3495 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3496 abort_fr->abort_mfi_phys_addr_hi = 0;
3499 cmd->cmd_status = 0xFF;
3501 if (mrsas_issue_dcmd(sc, cmd)) {
3502 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3506 /* Wait for this cmd to complete */
3507 sc->chan = (void*)&cmd;
3509 if (cmd->cmd_status == 0xFF){
3510 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3515 if (total_time >= max_wait) {
3516 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3523 mrsas_release_mfi_cmd(cmd);
3528 * mrsas_complete_abort: Completes aborting a command
3529 * input: Adapter soft state
3530 * Cmd that was issued to abort another cmd
3532 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3533 * to change after sending the command. This function is called from
3534 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3536 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3538 if (cmd->sync_cmd) {
3540 cmd->cmd_status = 0;
3541 sc->chan = (void*)&cmd;
3542 wakeup_one((void *)&sc->chan);
3548 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3549 * input: Adapter soft state
3552 void mrsas_aen_handler(struct mrsas_softc *sc)
3554 union mrsas_evt_class_locale class_locale;
3560 device_printf(sc->mrsas_dev, "invalid instance!\n");
3564 if (sc->evt_detail_mem) {
3565 switch (sc->evt_detail_mem->code) {
3566 case MR_EVT_PD_INSERTED:
3567 mrsas_get_pd_list(sc);
3568 mrsas_bus_scan_sim(sc, sc->sim_1);
3571 case MR_EVT_PD_REMOVED:
3572 mrsas_get_pd_list(sc);
3573 mrsas_bus_scan_sim(sc, sc->sim_1);
3576 case MR_EVT_LD_OFFLINE:
3577 case MR_EVT_CFG_CLEARED:
3578 case MR_EVT_LD_DELETED:
3579 mrsas_bus_scan_sim(sc, sc->sim_0);
3582 case MR_EVT_LD_CREATED:
3583 mrsas_get_ld_list(sc);
3584 mrsas_bus_scan_sim(sc, sc->sim_0);
3587 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3588 case MR_EVT_FOREIGN_CFG_IMPORTED:
3589 case MR_EVT_LD_STATE_CHANGE:
3597 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3601 mrsas_get_pd_list(sc);
3602 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3603 mrsas_bus_scan_sim(sc, sc->sim_1);
3604 mrsas_get_ld_list(sc);
3605 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3606 mrsas_bus_scan_sim(sc, sc->sim_0);
3609 seq_num = sc->evt_detail_mem->seq_num + 1;
3611 // Register AEN with FW for latest sequence number plus 1
3612 class_locale.members.reserved = 0;
3613 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3614 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3616 if (sc->aen_cmd != NULL )
3619 lockmgr(&sc->aen_lock, LK_EXCLUSIVE);
3620 error = mrsas_register_aen(sc, seq_num,
3622 lockmgr(&sc->aen_lock, LK_RELEASE);
3625 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3631 * mrsas_complete_aen: Completes AEN command
3632 * input: Adapter soft state
3633 * Cmd that was issued to abort another cmd
3635 * This function will be called from ISR and will continue
3636 * event processing from thread context by enqueuing task
3637 * in ev_tq (callback function "mrsas_aen_handler").
3639 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3642 * Don't signal app if it is just an aborted previously registered aen
3644 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3651 mrsas_release_mfi_cmd(cmd);
3653 if (!sc->remove_in_progress)
3654 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3659 static device_method_t mrsas_methods[] = {
3660 DEVMETHOD(device_probe, mrsas_probe),
3661 DEVMETHOD(device_attach, mrsas_attach),
3662 DEVMETHOD(device_detach, mrsas_detach),
3663 DEVMETHOD(device_suspend, mrsas_suspend),
3664 DEVMETHOD(device_resume, mrsas_resume),
3665 DEVMETHOD(bus_print_child, bus_generic_print_child),
3666 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3670 static driver_t mrsas_driver = {
3673 sizeof(struct mrsas_softc)
3676 static devclass_t mrsas_devclass;
3677 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, NULL, NULL);
3678 MODULE_VERSION(mrsas, 1);
3679 MODULE_DEPEND(mrsas, cam, 1, 1, 1);