2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53 * FreeBSD projects/head_mfi/ r233016
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
64 #include <sys/eventhandler.h>
66 #include <sys/bus_dma.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
75 #include <bus/cam/scsi/scsi_all.h>
77 #include <bus/pci/pcivar.h>
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
83 static int mfi_alloc_commands(struct mfi_softc *);
84 static int mfi_comms_init(struct mfi_softc *);
85 static int mfi_get_controller_info(struct mfi_softc *);
86 static int mfi_get_log_state(struct mfi_softc *,
87 struct mfi_evt_log_state **);
88 static int mfi_parse_entries(struct mfi_softc *, int, int);
89 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 uint32_t, void **, size_t);
91 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void mfi_startup(void *arg);
93 static void mfi_intr(void *arg);
94 static void mfi_ldprobe(struct mfi_softc *sc);
95 static void mfi_syspdprobe(struct mfi_softc *sc);
96 static void mfi_handle_evt(void *context, int pending);
97 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void mfi_aen_complete(struct mfi_command *);
99 static int mfi_add_ld(struct mfi_softc *sc, int);
100 static void mfi_add_ld_complete(struct mfi_command *);
101 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void mfi_timeout(void *);
111 static int mfi_user_command(struct mfi_softc *,
112 struct mfi_ioc_passthru *);
113 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_filter_detach(struct knote *);
130 static int mfi_filter_read(struct knote *, long);
131 static int mfi_filter_write(struct knote *, long);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_kqfilter_t mfi_kqfilter;
165 static struct dev_ops mfi_ops = {
168 .d_close = mfi_close,
169 .d_ioctl = mfi_ioctl,
170 .d_kqfilter = mfi_kqfilter,
173 static struct filterops mfi_read_filterops =
174 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
186 MFI_WRITE4(sc, MFI_OMSK, 0x01);
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
192 if (sc->mfi_flags & MFI_FLAGS_1078) {
193 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
201 panic("unknown adapter type");
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
208 return MFI_READ4(sc, MFI_OMSG0);
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
214 return MFI_READ4(sc, MFI_OSP0);
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
222 status = MFI_READ4(sc, MFI_OSTS);
223 if ((status & MFI_OSTS_INTR_VALID) == 0)
226 MFI_WRITE4(sc, MFI_OSTS, status);
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
235 status = MFI_READ4(sc, MFI_OSTS);
236 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
241 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 MFI_WRITE4(sc, MFI_OSTS, status);
244 MFI_WRITE4(sc, MFI_ODCR0, status);
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
266 mfi_transition_firmware(struct mfi_softc *sc)
268 uint32_t fw_state, cur_state;
270 uint32_t cur_abs_reg_val = 0;
271 uint32_t prev_abs_reg_val = 0;
273 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 while (fw_state != MFI_FWSTATE_READY) {
277 device_printf(sc->mfi_dev, "Waiting for firmware to "
279 cur_state = fw_state;
281 case MFI_FWSTATE_FAULT:
282 device_printf(sc->mfi_dev, "Firmware fault\n");
284 case MFI_FWSTATE_WAIT_HANDSHAKE:
285 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 max_wait = MFI_RESET_WAIT_TIME;
291 case MFI_FWSTATE_OPERATIONAL:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_UNDEFINED:
299 case MFI_FWSTATE_BB_INIT:
300 max_wait = MFI_RESET_WAIT_TIME;
302 case MFI_FWSTATE_FW_INIT_2:
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_FW_INIT:
306 case MFI_FWSTATE_FLUSH_CACHE:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_DEVICE_SCAN:
310 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 prev_abs_reg_val = cur_abs_reg_val;
313 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 max_wait = MFI_RESET_WAIT_TIME;
321 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
325 for (i = 0; i < (max_wait * 10); i++) {
326 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 if (fw_state == cur_state)
333 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 /* Check the device scanning progress */
335 if (prev_abs_reg_val != cur_abs_reg_val)
338 if (fw_state == cur_state) {
339 device_printf(sc->mfi_dev, "Firmware stuck in state "
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
353 *addr = segs[0].ds_addr;
357 mfi_attach(struct mfi_softc *sc)
360 int error, commsz, framessz, sensesz;
361 int frames, unit, max_fw_sge;
362 uint32_t tb_mem_size = 0;
367 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
370 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 TAILQ_INIT(&sc->mfi_ld_tqh);
373 TAILQ_INIT(&sc->mfi_syspd_tqh);
374 TAILQ_INIT(&sc->mfi_evt_queue);
375 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 TAILQ_INIT(&sc->mfi_aen_pids);
377 TAILQ_INIT(&sc->mfi_cam_ccbq);
385 sc->last_seq_num = 0;
386 sc->disableOnlineCtrlReset = 1;
387 sc->issuepend_done = 1;
388 sc->hw_crit_error = 0;
390 if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
403 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
405 sc->mfi_enable_intr = mfi_enable_intr_ppc;
406 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
412 /* Before we get too far, see if the firmware is working */
413 if ((error = mfi_transition_firmware(sc)) != 0) {
414 device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 "error %d\n", error);
419 /* Start: LSIP200113393 */
420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
421 1, 0, /* algnmnt, boundary */
422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 NULL, NULL, /* filter, filterarg */
425 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
427 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
429 &sc->verbuf_h_dmat)) {
430 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
433 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
434 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
435 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
438 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
439 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
440 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
441 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
442 /* End: LSIP200113393 */
445 * Get information needed for sizing the contiguous memory for the
446 * frame pool. Size down the sgl parameter since we know that
447 * we will never need more than what's required for MAXPHYS.
448 * It would be nice if these constants were available at runtime
449 * instead of compile time.
451 status = sc->mfi_read_fw_status(sc);
452 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
453 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
454 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
456 /* ThunderBolt Support get the contiguous memory */
458 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
459 mfi_tbolt_init_globals(sc);
460 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
461 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
462 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
464 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
465 1, 0, /* algnmnt, boundary */
466 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filter, filterarg */
469 tb_mem_size, /* maxsize */
471 tb_mem_size, /* maxsegsize */
474 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
477 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
478 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
479 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
482 bzero(sc->request_message_pool, tb_mem_size);
483 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
484 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
486 /* For ThunderBolt memory init */
487 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
488 0x100, 0, /* alignmnt, boundary */
489 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 BUS_SPACE_MAXADDR, /* highaddr */
491 NULL, NULL, /* filter, filterarg */
492 MFI_FRAME_SIZE, /* maxsize */
494 MFI_FRAME_SIZE, /* maxsegsize */
496 &sc->mfi_tb_init_dmat)) {
497 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
500 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
501 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
502 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
505 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
506 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
507 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
508 &sc->mfi_tb_init_busaddr, 0);
509 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
511 device_printf(sc->mfi_dev,
512 "Thunderbolt pool preparation error\n");
517 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
518 we are taking it diffrent from what we have allocated for Request
519 and reply descriptors to avoid confusion later
521 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
522 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
523 1, 0, /* algnmnt, boundary */
524 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
525 BUS_SPACE_MAXADDR, /* highaddr */
526 NULL, NULL, /* filter, filterarg */
527 tb_mem_size, /* maxsize */
529 tb_mem_size, /* maxsegsize */
531 &sc->mfi_tb_ioc_init_dmat)) {
532 device_printf(sc->mfi_dev,
533 "Cannot allocate comms DMA tag\n");
536 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
537 (void **)&sc->mfi_tb_ioc_init_desc,
538 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
539 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
542 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
543 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
544 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
545 &sc->mfi_tb_ioc_init_busaddr, 0);
548 * Create the dma tag for data buffers. Used both for block I/O
549 * and for various internal data queries.
551 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
552 1, 0, /* algnmnt, boundary */
553 BUS_SPACE_MAXADDR, /* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
557 sc->mfi_max_sge, /* nsegments */
558 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
559 BUS_DMA_ALLOCNOW, /* flags */
560 &sc->mfi_buffer_dmat)) {
561 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
566 * Allocate DMA memory for the comms queues. Keep it under 4GB for
567 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
568 * entry, so the calculated size here will be will be 1 more than
569 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
571 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
572 sizeof(struct mfi_hwcomms);
573 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
574 1, 0, /* algnmnt, boundary */
575 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 BUS_SPACE_MAXADDR, /* highaddr */
577 NULL, NULL, /* filter, filterarg */
578 commsz, /* maxsize */
580 commsz, /* maxsegsize */
582 &sc->mfi_comms_dmat)) {
583 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
586 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
587 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
588 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
591 bzero(sc->mfi_comms, commsz);
592 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
593 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
595 * Allocate DMA memory for the command frames. Keep them in the
596 * lower 4GB for efficiency. Calculate the size of the commands at
597 * the same time; each command is one 64 byte frame plus a set of
598 * additional frames for holding sg lists or other data.
599 * The assumption here is that the SG list will start at the second
600 * frame and not use the unused bytes in the first frame. While this
601 * isn't technically correct, it simplifies the calculation and allows
602 * for command frames that might be larger than an mfi_io_frame.
604 if (sizeof(bus_addr_t) == 8) {
605 sc->mfi_sge_size = sizeof(struct mfi_sg64);
606 sc->mfi_flags |= MFI_FLAGS_SG64;
608 sc->mfi_sge_size = sizeof(struct mfi_sg32);
610 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
611 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
612 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
613 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
614 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
615 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
616 64, 0, /* algnmnt, boundary */
617 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
618 BUS_SPACE_MAXADDR, /* highaddr */
619 NULL, NULL, /* filter, filterarg */
620 framessz, /* maxsize */
622 framessz, /* maxsegsize */
624 &sc->mfi_frames_dmat)) {
625 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
628 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
629 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
630 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
633 bzero(sc->mfi_frames, framessz);
634 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
635 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
637 * Allocate DMA memory for the frame sense data. Keep them in the
638 * lower 4GB for efficiency
640 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
641 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
642 4, 0, /* algnmnt, boundary */
643 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
644 BUS_SPACE_MAXADDR, /* highaddr */
645 NULL, NULL, /* filter, filterarg */
646 sensesz, /* maxsize */
648 sensesz, /* maxsegsize */
650 &sc->mfi_sense_dmat)) {
651 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
654 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
655 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
656 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
659 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
660 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
661 if ((error = mfi_alloc_commands(sc)) != 0)
665 * Before moving the FW to operational state, check whether
666 * hostmemory is required by the FW or not
669 /* ThunderBolt MFI_IOC2 INIT */
670 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
671 sc->mfi_disable_intr(sc);
672 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
673 device_printf(sc->mfi_dev,
674 "TB Init has failed with error %d\n",error);
678 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
680 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
681 mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
682 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
685 sc->mfi_enable_intr(sc);
688 if ((error = mfi_comms_init(sc)) != 0)
691 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
692 mfi_intr, sc, &sc->mfi_intr, NULL)) {
693 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
696 sc->mfi_enable_intr(sc);
698 if ((error = mfi_get_controller_info(sc)) != 0)
700 sc->disableOnlineCtrlReset = 0;
702 /* Register a config hook to probe the bus for arrays */
703 sc->mfi_ich.ich_func = mfi_startup;
704 sc->mfi_ich.ich_arg = sc;
705 sc->mfi_ich.ich_desc = "mfi";
706 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
707 device_printf(sc->mfi_dev, "Cannot establish configuration "
711 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
712 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
717 * Register a shutdown handler.
719 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
720 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
721 device_printf(sc->mfi_dev, "Warning: shutdown event "
722 "registration failed\n");
726 * Create the control device for doing management
728 unit = device_get_unit(sc->mfi_dev);
729 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
730 0640, "mfi%d", unit);
732 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
733 if (sc->mfi_cdev != NULL)
734 sc->mfi_cdev->si_drv1 = sc;
735 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
736 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
737 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
738 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
739 if (sc->mfi_sysctl_tree == NULL) {
740 device_printf(sc->mfi_dev, "can't add sysctl node\n");
743 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
744 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
745 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
746 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
747 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
748 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
749 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
750 &sc->mfi_keep_deleted_volumes, 0,
751 "Don't detach the mfid device for a busy volume that is deleted");
753 device_add_child(sc->mfi_dev, "mfip", -1);
754 bus_generic_attach(sc->mfi_dev);
756 /* Start the timeout watchdog */
757 callout_init_mp(&sc->mfi_watchdog_callout);
758 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
765 mfi_alloc_commands(struct mfi_softc *sc)
767 struct mfi_command *cm;
771 * XXX Should we allocate all the commands up front, or allocate on
772 * demand later like 'aac' does?
774 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
776 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
777 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
779 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
782 for (i = 0; i < ncmds; i++) {
783 cm = &sc->mfi_commands[i];
784 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
785 sc->mfi_cmd_size * i);
786 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
787 sc->mfi_cmd_size * i;
788 cm->cm_frame->header.context = i;
789 cm->cm_sense = &sc->mfi_sense[i];
790 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
793 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
794 &cm->cm_dmamap) == 0) {
795 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
796 mfi_release_command(cm);
797 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
801 sc->mfi_total_cmds++;
808 mfi_release_command(struct mfi_command *cm)
810 struct mfi_frame_header *hdr;
813 mfi_lockassert(&cm->cm_sc->mfi_io_lock);
816 * Zero out the important fields of the frame, but make sure the
817 * context field is preserved. For efficiency, handle the fields
818 * as 32 bit words. Clear out the first S/G entry too for safety.
820 hdr = &cm->cm_frame->header;
821 if (cm->cm_data != NULL && hdr->sg_count) {
822 cm->cm_sg->sg32[0].len = 0;
823 cm->cm_sg->sg32[0].addr = 0;
826 hdr_data = (uint32_t *)cm->cm_frame;
827 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
828 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
829 hdr_data[4] = 0; /* flags, timeout */
830 hdr_data[5] = 0; /* data_len */
832 cm->cm_extra_frames = 0;
834 cm->cm_complete = NULL;
835 cm->cm_private = NULL;
838 cm->cm_total_frame_size = 0;
839 cm->retry_for_fw_reset = 0;
841 mfi_enqueue_free(cm);
845 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
846 uint32_t opcode, void **bufp, size_t bufsize)
848 struct mfi_command *cm;
849 struct mfi_dcmd_frame *dcmd;
851 uint32_t context = 0;
853 mfi_lockassert(&sc->mfi_io_lock);
855 cm = mfi_dequeue_free(sc);
859 /* Zero out the MFI frame */
860 context = cm->cm_frame->header.context;
861 bzero(cm->cm_frame, sizeof(union mfi_frame));
862 cm->cm_frame->header.context = context;
864 if ((bufsize > 0) && (bufp != NULL)) {
866 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
868 mfi_release_command(cm);
877 dcmd = &cm->cm_frame->dcmd;
878 bzero(dcmd->mbox, MFI_MBOX_SIZE);
879 dcmd->header.cmd = MFI_CMD_DCMD;
880 dcmd->header.timeout = 0;
881 dcmd->header.flags = 0;
882 dcmd->header.data_len = bufsize;
883 dcmd->header.scsi_status = 0;
884 dcmd->opcode = opcode;
885 cm->cm_sg = &dcmd->sgl;
886 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
889 cm->cm_private = buf;
890 cm->cm_len = bufsize;
893 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
899 mfi_comms_init(struct mfi_softc *sc)
901 struct mfi_command *cm;
902 struct mfi_init_frame *init;
903 struct mfi_init_qinfo *qinfo;
905 uint32_t context = 0;
907 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
908 if ((cm = mfi_dequeue_free(sc)) == NULL)
911 /* Zero out the MFI frame */
912 context = cm->cm_frame->header.context;
913 bzero(cm->cm_frame, sizeof(union mfi_frame));
914 cm->cm_frame->header.context = context;
917 * Abuse the SG list area of the frame to hold the init_qinfo
920 init = &cm->cm_frame->init;
921 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
923 bzero(qinfo, sizeof(struct mfi_init_qinfo));
924 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
925 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
926 offsetof(struct mfi_hwcomms, hw_reply_q);
927 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
928 offsetof(struct mfi_hwcomms, hw_pi);
929 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
930 offsetof(struct mfi_hwcomms, hw_ci);
932 init->header.cmd = MFI_CMD_INIT;
933 init->header.data_len = sizeof(struct mfi_init_qinfo);
934 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
936 cm->cm_flags = MFI_CMD_POLLED;
938 if ((error = mfi_mapcmd(sc, cm)) != 0) {
939 device_printf(sc->mfi_dev, "failed to send init command\n");
940 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
943 mfi_release_command(cm);
944 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
950 mfi_get_controller_info(struct mfi_softc *sc)
952 struct mfi_command *cm = NULL;
953 struct mfi_ctrl_info *ci = NULL;
954 uint32_t max_sectors_1, max_sectors_2;
957 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
958 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
959 (void **)&ci, sizeof(*ci));
962 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
964 if ((error = mfi_mapcmd(sc, cm)) != 0) {
965 device_printf(sc->mfi_dev, "Failed to get controller info\n");
966 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
972 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
973 BUS_DMASYNC_POSTREAD);
974 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
976 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
977 max_sectors_2 = ci->max_request_size;
978 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
979 sc->disableOnlineCtrlReset =
980 ci->properties.OnOffProperties.disableOnlineCtrlReset;
986 mfi_release_command(cm);
987 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
992 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
994 struct mfi_command *cm = NULL;
997 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
998 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
999 (void **)log_state, sizeof(**log_state));
1002 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1004 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1005 device_printf(sc->mfi_dev, "Failed to get log state\n");
1009 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1010 BUS_DMASYNC_POSTREAD);
1011 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1015 mfi_release_command(cm);
1016 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1022 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1024 struct mfi_evt_log_state *log_state = NULL;
1025 union mfi_evt class_locale;
1029 class_locale.members.reserved = 0;
1030 class_locale.members.locale = mfi_event_locale;
1031 class_locale.members.evt_class = mfi_event_class;
1033 if (seq_start == 0) {
1034 error = mfi_get_log_state(sc, &log_state);
1035 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1038 kfree(log_state, M_MFIBUF);
1043 * Walk through any events that fired since the last
1046 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1047 log_state->newest_seq_num);
1048 seq = log_state->newest_seq_num;
1051 mfi_aen_register(sc, seq, class_locale.word);
1052 if (log_state != NULL)
1053 kfree(log_state, M_MFIBUF);
1059 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1062 mfi_lockassert(&sc->mfi_io_lock);
1063 cm->cm_complete = NULL;
1067 * MegaCli can issue a DCMD of 0. In this case do nothing
1068 * and return 0 to it as status
1070 if (cm->cm_frame->dcmd.opcode == 0) {
1071 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1073 return (cm->cm_error);
1075 mfi_enqueue_ready(cm);
1077 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1078 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1079 return (cm->cm_error);
1083 mfi_free(struct mfi_softc *sc)
1085 struct mfi_command *cm;
1088 callout_stop_sync(&sc->mfi_watchdog_callout);
1090 if (sc->mfi_cdev != NULL)
1091 destroy_dev(sc->mfi_cdev);
1092 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1094 if (sc->mfi_total_cmds != 0) {
1095 for (i = 0; i < sc->mfi_total_cmds; i++) {
1096 cm = &sc->mfi_commands[i];
1097 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1099 kfree(sc->mfi_commands, M_MFIBUF);
1103 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1104 if (sc->mfi_irq != NULL)
1105 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1108 if (sc->mfi_sense_busaddr != 0)
1109 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1110 if (sc->mfi_sense != NULL)
1111 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1112 sc->mfi_sense_dmamap);
1113 if (sc->mfi_sense_dmat != NULL)
1114 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1116 if (sc->mfi_frames_busaddr != 0)
1117 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1118 if (sc->mfi_frames != NULL)
1119 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1120 sc->mfi_frames_dmamap);
1121 if (sc->mfi_frames_dmat != NULL)
1122 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1124 if (sc->mfi_comms_busaddr != 0)
1125 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1126 if (sc->mfi_comms != NULL)
1127 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1128 sc->mfi_comms_dmamap);
1129 if (sc->mfi_comms_dmat != NULL)
1130 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1132 /* ThunderBolt contiguous memory free here */
1133 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1134 if (sc->mfi_tb_busaddr != 0)
1135 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1136 if (sc->request_message_pool != NULL)
1137 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1139 if (sc->mfi_tb_dmat != NULL)
1140 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1142 /* Version buffer memory free */
1143 /* Start LSIP200113393 */
1144 if (sc->verbuf_h_busaddr != 0)
1145 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1146 if (sc->verbuf != NULL)
1147 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1148 sc->verbuf_h_dmamap);
1149 if (sc->verbuf_h_dmat != NULL)
1150 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1152 /* End LSIP200113393 */
1153 /* ThunderBolt INIT packet memory Free */
1154 if (sc->mfi_tb_init_busaddr != 0)
1155 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1156 if (sc->mfi_tb_init != NULL)
1157 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1158 sc->mfi_tb_init_dmamap);
1159 if (sc->mfi_tb_init_dmat != NULL)
1160 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1162 /* ThunderBolt IOC Init Desc memory free here */
1163 if (sc->mfi_tb_ioc_init_busaddr != 0)
1164 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1165 sc->mfi_tb_ioc_init_dmamap);
1166 if (sc->mfi_tb_ioc_init_desc != NULL)
1167 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1168 sc->mfi_tb_ioc_init_desc,
1169 sc->mfi_tb_ioc_init_dmamap);
1170 if (sc->mfi_tb_ioc_init_dmat != NULL)
1171 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1172 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1173 if (sc->mfi_cmd_pool_tbolt != NULL) {
1174 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1175 kfree(sc->mfi_cmd_pool_tbolt[i],
1177 sc->mfi_cmd_pool_tbolt[i] = NULL;
1181 if (sc->mfi_cmd_pool_tbolt != NULL) {
1182 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1183 sc->mfi_cmd_pool_tbolt = NULL;
1185 if (sc->request_desc_pool != NULL) {
1186 kfree(sc->request_desc_pool, M_MFIBUF);
1187 sc->request_desc_pool = NULL;
1190 if (sc->mfi_buffer_dmat != NULL)
1191 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1192 if (sc->mfi_parent_dmat != NULL)
1193 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1195 if (sc->mfi_sysctl_tree != NULL)
1196 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1198 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1199 if (mtx_initialized(&sc->mfi_io_lock))
1202 lockuninit(&sc->mfi_io_lock);
1203 lockuninit(&sc->mfi_config_lock);
1210 mfi_startup(void *arg)
1212 struct mfi_softc *sc;
1214 sc = (struct mfi_softc *)arg;
1216 config_intrhook_disestablish(&sc->mfi_ich);
1218 sc->mfi_enable_intr(sc);
1219 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1220 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1222 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1224 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1225 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1231 struct mfi_softc *sc;
1232 struct mfi_command *cm;
1233 uint32_t pi, ci, context;
1235 sc = (struct mfi_softc *)arg;
1237 if (sc->mfi_check_clear_intr(sc))
1241 pi = sc->mfi_comms->hw_pi;
1242 ci = sc->mfi_comms->hw_ci;
1243 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1245 context = sc->mfi_comms->hw_reply_q[ci];
1246 if (context < sc->mfi_max_fw_cmds) {
1247 cm = &sc->mfi_commands[context];
1248 mfi_remove_busy(cm);
1250 mfi_complete(sc, cm);
1252 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1257 sc->mfi_comms->hw_ci = ci;
1259 /* Give defered I/O a chance to run */
1260 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1261 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1263 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1266 * Dummy read to flush the bus; this ensures that the indexes are up
1267 * to date. Restart processing if more commands have come it.
1269 (void)sc->mfi_read_fw_status(sc);
1270 if (pi != sc->mfi_comms->hw_pi)
1277 mfi_shutdown(struct mfi_softc *sc)
1279 struct mfi_dcmd_frame *dcmd;
1280 struct mfi_command *cm;
1283 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1284 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1286 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1290 if (sc->mfi_aen_cm != NULL)
1291 mfi_abort(sc, sc->mfi_aen_cm);
1293 if (sc->map_update_cmd != NULL)
1294 mfi_abort(sc, sc->map_update_cmd);
1296 dcmd = &cm->cm_frame->dcmd;
1297 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1298 cm->cm_flags = MFI_CMD_POLLED;
1301 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1302 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1305 mfi_release_command(cm);
1306 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1311 mfi_syspdprobe(struct mfi_softc *sc)
1313 struct mfi_frame_header *hdr;
1314 struct mfi_command *cm = NULL;
1315 struct mfi_pd_list *pdlist = NULL;
1316 struct mfi_system_pd *syspd, *tmp;
1317 int error, i, found;
1319 mfi_lockassert(&sc->mfi_config_lock);
1320 mfi_lockassert(&sc->mfi_io_lock);
1321 /* Add SYSTEM PD's */
1322 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1323 (void **)&pdlist, sizeof(*pdlist));
1325 device_printf(sc->mfi_dev,
1326 "Error while forming SYSTEM PD list\n");
1330 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1331 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1332 cm->cm_frame->dcmd.mbox[1] = 0;
1333 if (mfi_mapcmd(sc, cm) != 0) {
1334 device_printf(sc->mfi_dev,
1335 "Failed to get syspd device listing\n");
1338 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1339 BUS_DMASYNC_POSTREAD);
1340 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1341 hdr = &cm->cm_frame->header;
1342 if (hdr->cmd_status != MFI_STAT_OK) {
1343 device_printf(sc->mfi_dev,
1344 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1347 /* Get each PD and add it to the system */
1348 for (i = 0; i < pdlist->count; i++) {
1349 if (pdlist->addr[i].device_id ==
1350 pdlist->addr[i].encl_device_id)
1353 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1354 if (syspd->pd_id == pdlist->addr[i].device_id)
1358 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1360 /* Delete SYSPD's whose state has been changed */
1361 TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1363 for (i = 0; i < pdlist->count; i++) {
1364 if (syspd->pd_id == pdlist->addr[i].device_id)
1368 kprintf("DELETE\n");
1369 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1371 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1373 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1378 kfree(pdlist, M_MFIBUF);
1380 mfi_release_command(cm);
1384 mfi_ldprobe(struct mfi_softc *sc)
1386 struct mfi_frame_header *hdr;
1387 struct mfi_command *cm = NULL;
1388 struct mfi_ld_list *list = NULL;
1389 struct mfi_disk *ld;
1392 mfi_lockassert(&sc->mfi_config_lock);
1393 mfi_lockassert(&sc->mfi_io_lock);
1395 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1396 (void **)&list, sizeof(*list));
1400 cm->cm_flags = MFI_CMD_DATAIN;
1401 if (mfi_wait_command(sc, cm) != 0) {
1402 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1406 hdr = &cm->cm_frame->header;
1407 if (hdr->cmd_status != MFI_STAT_OK) {
1408 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1413 for (i = 0; i < list->ld_count; i++) {
1414 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1415 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1418 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1423 kfree(list, M_MFIBUF);
1425 mfi_release_command(cm);
1431 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1432 * the bits in 24-31 are all set, then it is the number of seconds since
1436 format_timestamp(uint32_t timestamp)
1438 static char buffer[32];
1440 if ((timestamp & 0xff000000) == 0xff000000)
1441 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1444 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1449 format_class(int8_t class)
1451 static char buffer[6];
1454 case MFI_EVT_CLASS_DEBUG:
1456 case MFI_EVT_CLASS_PROGRESS:
1457 return ("progress");
1458 case MFI_EVT_CLASS_INFO:
1460 case MFI_EVT_CLASS_WARNING:
1462 case MFI_EVT_CLASS_CRITICAL:
1464 case MFI_EVT_CLASS_FATAL:
1466 case MFI_EVT_CLASS_DEAD:
1469 ksnprintf(buffer, sizeof(buffer), "%d", class);
1475 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1477 struct mfi_system_pd *syspd = NULL;
1479 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1480 format_timestamp(detail->time), detail->evt_class.members.locale,
1481 format_class(detail->evt_class.members.evt_class),
1482 detail->description);
1484 /* Don't act on old AEN's or while shutting down */
1485 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1488 switch (detail->arg_type) {
1489 case MR_EVT_ARGS_NONE:
1490 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1491 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1492 if (mfi_detect_jbod_change) {
1494 * Probe for new SYSPD's and Delete
1497 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1498 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1500 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1501 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1505 case MR_EVT_ARGS_LD_STATE:
1507 * During load time driver reads all the events starting
1508 * from the one that has been logged after shutdown. Avoid
1511 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1513 struct mfi_disk *ld;
1514 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1516 detail->args.ld_state.ld.target_id)
1520 Fix: for kernel panics when SSCD is removed
1521 KASSERT(ld != NULL, ("volume dissappeared"));
1525 device_delete_child(sc->mfi_dev, ld->ld_dev);
1530 case MR_EVT_ARGS_PD:
1531 if (detail->code == MR_EVT_PD_REMOVED) {
1532 if (mfi_detect_jbod_change) {
1534 * If the removed device is a SYSPD then
1537 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1540 detail->args.pd.device_id) {
1542 device_delete_child(
1551 if (detail->code == MR_EVT_PD_INSERTED) {
1552 if (mfi_detect_jbod_change) {
1553 /* Probe for new SYSPD's */
1554 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1555 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1557 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1558 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1566 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1568 struct mfi_evt_queue_elm *elm;
1570 mfi_lockassert(&sc->mfi_io_lock);
1571 elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1574 memcpy(&elm->detail, detail, sizeof(*detail));
1575 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1576 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1580 mfi_handle_evt(void *context, int pending)
1582 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1583 struct mfi_softc *sc;
1584 struct mfi_evt_queue_elm *elm;
1588 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1589 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1590 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1591 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1592 TAILQ_REMOVE(&queue, elm, link);
1593 mfi_decode_evt(sc, &elm->detail);
1594 kfree(elm, M_MFIBUF);
1599 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1601 struct mfi_command *cm;
1602 struct mfi_dcmd_frame *dcmd;
1603 union mfi_evt current_aen, prior_aen;
1604 struct mfi_evt_detail *ed = NULL;
1607 current_aen.word = locale;
1608 if (sc->mfi_aen_cm != NULL) {
1610 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1611 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1612 !((prior_aen.members.locale & current_aen.members.locale)
1613 ^current_aen.members.locale)) {
1616 prior_aen.members.locale |= current_aen.members.locale;
1617 if (prior_aen.members.evt_class
1618 < current_aen.members.evt_class)
1619 current_aen.members.evt_class =
1620 prior_aen.members.evt_class;
1621 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1622 mfi_abort(sc, sc->mfi_aen_cm);
1623 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1627 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1628 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1629 (void **)&ed, sizeof(*ed));
1630 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1635 dcmd = &cm->cm_frame->dcmd;
1636 ((uint32_t *)&dcmd->mbox)[0] = seq;
1637 ((uint32_t *)&dcmd->mbox)[1] = locale;
1638 cm->cm_flags = MFI_CMD_DATAIN;
1639 cm->cm_complete = mfi_aen_complete;
1641 sc->last_seq_num = seq;
1642 sc->mfi_aen_cm = cm;
1644 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1645 mfi_enqueue_ready(cm);
1647 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1654 mfi_aen_complete(struct mfi_command *cm)
1656 struct mfi_frame_header *hdr;
1657 struct mfi_softc *sc;
1658 struct mfi_evt_detail *detail;
1659 struct mfi_aen *mfi_aen_entry, *tmp;
1661 int seq = 0, aborted = 0;
1664 mfi_lockassert(&sc->mfi_io_lock);
1666 hdr = &cm->cm_frame->header;
1668 if (sc->mfi_aen_cm == NULL)
1671 if (sc->mfi_aen_cm->cm_aen_abort ||
1672 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1673 sc->mfi_aen_cm->cm_aen_abort = 0;
1676 sc->mfi_aen_triggered = 1;
1677 if (sc->mfi_poll_waiting) {
1678 sc->mfi_poll_waiting = 0;
1679 KNOTE(&sc->mfi_kq.ki_note, 0);
1681 detail = cm->cm_data;
1682 mfi_queue_evt(sc, detail);
1683 seq = detail->seq + 1;
1684 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1686 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1688 p = mfi_aen_entry->p;
1692 kfree(mfi_aen_entry, M_MFIBUF);
1696 kfree(cm->cm_data, M_MFIBUF);
1697 sc->mfi_aen_cm = NULL;
1698 wakeup(&sc->mfi_aen_cm);
1699 mfi_release_command(cm);
1701 /* set it up again so the driver can catch more events */
1703 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1704 mfi_aen_setup(sc, seq);
1705 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1709 #define MAX_EVENTS 15
1712 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1714 struct mfi_command *cm;
1715 struct mfi_dcmd_frame *dcmd;
1716 struct mfi_evt_list *el;
1717 union mfi_evt class_locale;
1718 int error, i, seq, size;
1720 class_locale.members.reserved = 0;
1721 class_locale.members.locale = mfi_event_locale;
1722 class_locale.members.evt_class = mfi_event_class;
1724 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1726 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1730 for (seq = start_seq;;) {
1731 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1732 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1733 kfree(el, M_MFIBUF);
1734 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1738 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1740 dcmd = &cm->cm_frame->dcmd;
1741 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1742 dcmd->header.cmd = MFI_CMD_DCMD;
1743 dcmd->header.timeout = 0;
1744 dcmd->header.data_len = size;
1745 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1746 ((uint32_t *)&dcmd->mbox)[0] = seq;
1747 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1748 cm->cm_sg = &dcmd->sgl;
1749 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1750 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1754 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1755 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1756 device_printf(sc->mfi_dev,
1757 "Failed to get controller entries\n");
1758 mfi_release_command(cm);
1759 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1763 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1764 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1765 BUS_DMASYNC_POSTREAD);
1766 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1768 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1769 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1770 mfi_release_command(cm);
1771 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1774 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1775 device_printf(sc->mfi_dev,
1776 "Error %d fetching controller entries\n",
1777 dcmd->header.cmd_status);
1778 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1779 mfi_release_command(cm);
1780 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1783 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1784 mfi_release_command(cm);
1785 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1787 for (i = 0; i < el->count; i++) {
1789 * If this event is newer than 'stop_seq' then
1790 * break out of the loop. Note that the log
1791 * is a circular buffer so we have to handle
1792 * the case that our stop point is earlier in
1793 * the buffer than our start point.
1795 if (el->event[i].seq >= stop_seq) {
1796 if (start_seq <= stop_seq)
1798 else if (el->event[i].seq < start_seq)
1801 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1802 mfi_queue_evt(sc, &el->event[i]);
1803 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1805 seq = el->event[el->count - 1].seq + 1;
1808 kfree(el, M_MFIBUF);
1813 mfi_add_ld(struct mfi_softc *sc, int id)
1815 struct mfi_command *cm;
1816 struct mfi_dcmd_frame *dcmd = NULL;
1817 struct mfi_ld_info *ld_info = NULL;
1820 mfi_lockassert(&sc->mfi_io_lock);
1822 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1823 (void **)&ld_info, sizeof(*ld_info));
1825 device_printf(sc->mfi_dev,
1826 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1828 kfree(ld_info, M_MFIBUF);
1831 cm->cm_flags = MFI_CMD_DATAIN;
1832 dcmd = &cm->cm_frame->dcmd;
1834 if (mfi_wait_command(sc, cm) != 0) {
1835 device_printf(sc->mfi_dev,
1836 "Failed to get logical drive: %d\n", id);
1837 kfree(ld_info, M_MFIBUF);
1840 if (ld_info->ld_config.params.isSSCD != 1) {
1841 mfi_add_ld_complete(cm);
1843 mfi_release_command(cm);
1844 if (ld_info) /* SSCD drives ld_info free here */
1845 kfree(ld_info, M_MFIBUF);
1851 mfi_add_ld_complete(struct mfi_command *cm)
1853 struct mfi_frame_header *hdr;
1854 struct mfi_ld_info *ld_info;
1855 struct mfi_softc *sc;
1859 hdr = &cm->cm_frame->header;
1860 ld_info = cm->cm_private;
1862 if (hdr->cmd_status != MFI_STAT_OK) {
1863 kfree(ld_info, M_MFIBUF);
1864 mfi_release_command(cm);
1867 mfi_release_command(cm);
1869 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1871 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1872 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1873 kfree(ld_info, M_MFIBUF);
1875 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1879 device_set_ivars(child, ld_info);
1880 device_set_desc(child, "MFI Logical Disk");
1881 bus_generic_attach(sc->mfi_dev);
1883 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1887 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1889 struct mfi_command *cm;
1890 struct mfi_dcmd_frame *dcmd = NULL;
1891 struct mfi_pd_info *pd_info = NULL;
1894 mfi_lockassert(&sc->mfi_io_lock);
1896 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1897 (void **)&pd_info, sizeof(*pd_info));
1899 device_printf(sc->mfi_dev,
1900 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1903 kfree(pd_info, M_MFIBUF);
1906 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1907 dcmd = &cm->cm_frame->dcmd;
1909 dcmd->header.scsi_status = 0;
1910 dcmd->header.pad0 = 0;
1911 if (mfi_mapcmd(sc, cm) != 0) {
1912 device_printf(sc->mfi_dev,
1913 "Failed to get physical drive info %d\n", id);
1914 kfree(pd_info, M_MFIBUF);
1917 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1918 BUS_DMASYNC_POSTREAD);
1919 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1920 mfi_add_sys_pd_complete(cm);
1925 mfi_add_sys_pd_complete(struct mfi_command *cm)
1927 struct mfi_frame_header *hdr;
1928 struct mfi_pd_info *pd_info;
1929 struct mfi_softc *sc;
1933 hdr = &cm->cm_frame->header;
1934 pd_info = cm->cm_private;
1936 if (hdr->cmd_status != MFI_STAT_OK) {
1937 kfree(pd_info, M_MFIBUF);
1938 mfi_release_command(cm);
1941 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1942 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1943 pd_info->ref.v.device_id);
1944 kfree(pd_info, M_MFIBUF);
1945 mfi_release_command(cm);
1948 mfi_release_command(cm);
1950 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1952 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1953 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1954 kfree(pd_info, M_MFIBUF);
1956 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1960 device_set_ivars(child, pd_info);
1961 device_set_desc(child, "MFI System PD");
1962 bus_generic_attach(sc->mfi_dev);
1964 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1967 static struct mfi_command *
1968 mfi_bio_command(struct mfi_softc *sc)
1971 struct mfi_command *cm = NULL;
1972 struct mfi_disk *mfid;
1974 /* reserving two commands to avoid starvation for IOCTL */
1975 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1977 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1979 mfid = bio->bio_driver_info;
1980 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1981 cm = mfi_build_syspdio(sc, bio);
1983 cm = mfi_build_ldio(sc, bio);
1985 mfi_enqueue_bio(sc, bio);
1989 static struct mfi_command *
1990 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1992 struct mfi_command *cm;
1994 struct mfi_system_pd *disk;
1995 struct mfi_pass_frame *pass;
1996 int flags = 0, blkcount = 0;
1997 uint32_t context = 0;
1999 if ((cm = mfi_dequeue_free(sc)) == NULL)
2002 /* Zero out the MFI frame */
2003 context = cm->cm_frame->header.context;
2004 bzero(cm->cm_frame, sizeof(union mfi_frame));
2005 cm->cm_frame->header.context = context;
2007 pass = &cm->cm_frame->pass;
2008 bzero(pass->cdb, 16);
2009 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2010 switch (bp->b_cmd & 0x03) {
2012 pass->cdb[0] = READ_10;
2013 flags = MFI_CMD_DATAIN;
2016 pass->cdb[0] = WRITE_10;
2017 flags = MFI_CMD_DATAOUT;
2020 panic("Invalid bio command");
2023 /* Cheat with the sector length to avoid a non-constant division */
2024 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2025 disk = bio->bio_driver_info;
2026 /* Fill the LBA and Transfer length in CDB */
2027 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2028 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2029 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2030 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2031 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2032 pass->cdb[8] = (blkcount & 0x00ff);
2033 pass->header.target_id = disk->pd_id;
2034 pass->header.timeout = 0;
2035 pass->header.flags = 0;
2036 pass->header.scsi_status = 0;
2037 pass->header.sense_len = MFI_SENSE_LEN;
2038 pass->header.data_len = bp->b_bcount;
2039 pass->header.cdb_len = 10;
2040 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2041 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2042 cm->cm_complete = mfi_bio_complete;
2043 cm->cm_private = bio;
2044 cm->cm_data = bp->b_data;
2045 cm->cm_len = bp->b_bcount;
2046 cm->cm_sg = &pass->sgl;
2047 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2048 cm->cm_flags = flags;
2052 static struct mfi_command *
2053 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2055 struct mfi_io_frame *io;
2057 struct mfi_disk *disk;
2058 struct mfi_command *cm;
2059 int flags, blkcount;
2060 uint32_t context = 0;
2062 if ((cm = mfi_dequeue_free(sc)) == NULL)
2065 /* Zero out the MFI frame */
2066 context = cm->cm_frame->header.context;
2067 bzero(cm->cm_frame, sizeof(union mfi_frame));
2068 cm->cm_frame->header.context = context;
2070 io = &cm->cm_frame->io;
2071 switch (bp->b_cmd & 0x03) {
2073 io->header.cmd = MFI_CMD_LD_READ;
2074 flags = MFI_CMD_DATAIN;
2077 io->header.cmd = MFI_CMD_LD_WRITE;
2078 flags = MFI_CMD_DATAOUT;
2081 panic("Invalid bio command");
2084 /* Cheat with the sector length to avoid a non-constant division */
2085 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2086 disk = bio->bio_driver_info;
2087 io->header.target_id = disk->ld_id;
2088 io->header.timeout = 0;
2089 io->header.flags = 0;
2090 io->header.scsi_status = 0;
2091 io->header.sense_len = MFI_SENSE_LEN;
2092 io->header.data_len = blkcount;
2093 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2094 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2095 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2096 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2097 cm->cm_complete = mfi_bio_complete;
2098 cm->cm_private = bio;
2099 cm->cm_data = bp->b_data;
2100 cm->cm_len = bp->b_bcount;
2101 cm->cm_sg = &io->sgl;
2102 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2103 cm->cm_flags = flags;
2108 mfi_bio_complete(struct mfi_command *cm)
2112 struct mfi_frame_header *hdr;
2113 struct mfi_softc *sc;
2115 bio = cm->cm_private;
2117 hdr = &cm->cm_frame->header;
2120 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2121 bp->b_flags |= B_ERROR;
2123 device_printf(sc->mfi_dev, "I/O error, status= %d "
2124 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2125 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2126 } else if (cm->cm_error != 0) {
2127 bp->b_flags |= B_ERROR;
2130 mfi_release_command(cm);
2131 mfi_disk_complete(bio);
2135 mfi_startio(struct mfi_softc *sc)
2137 struct mfi_command *cm;
2138 struct ccb_hdr *ccbh;
2141 /* Don't bother if we're short on resources */
2142 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2145 /* Try a command that has already been prepared */
2146 cm = mfi_dequeue_ready(sc);
2149 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2150 cm = sc->mfi_cam_start(ccbh);
2153 /* Nope, so look for work on the bioq */
2155 cm = mfi_bio_command(sc);
2157 /* No work available, so exit */
2161 /* Send the command to the controller */
2162 if (mfi_mapcmd(sc, cm) != 0) {
2163 mfi_requeue_ready(cm);
2170 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2174 mfi_lockassert(&sc->mfi_io_lock);
2176 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2177 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2178 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2179 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2180 if (error == EINPROGRESS) {
2181 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2185 if (sc->MFA_enabled)
2186 error = mfi_tbolt_send_frame(sc, cm);
2188 error = mfi_send_frame(sc, cm);
2195 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2197 struct mfi_frame_header *hdr;
2198 struct mfi_command *cm;
2200 struct mfi_softc *sc;
2201 int i, j, first, dir;
2203 cm = (struct mfi_command *)arg;
2205 hdr = &cm->cm_frame->header;
2209 kprintf("error %d in callback\n", error);
2210 cm->cm_error = error;
2211 mfi_complete(sc, cm);
2215 /* Use IEEE sgl only for IO's on a SKINNY controller
2216 * For other commands on a SKINNY controller use either
2217 * sg32 or sg64 based on the sizeof(bus_addr_t).
2218 * Also calculate the total frame size based on the type
2221 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2222 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2223 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2224 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2225 for (i = 0; i < nsegs; i++) {
2226 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2227 sgl->sg_skinny[i].len = segs[i].ds_len;
2228 sgl->sg_skinny[i].flag = 0;
2230 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2231 hdr->sg_count = nsegs;
2234 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2235 first = cm->cm_stp_len;
2236 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2237 sgl->sg32[j].addr = segs[0].ds_addr;
2238 sgl->sg32[j++].len = first;
2240 sgl->sg64[j].addr = segs[0].ds_addr;
2241 sgl->sg64[j++].len = first;
2245 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2246 for (i = 0; i < nsegs; i++) {
2247 sgl->sg32[j].addr = segs[i].ds_addr + first;
2248 sgl->sg32[j++].len = segs[i].ds_len - first;
2252 for (i = 0; i < nsegs; i++) {
2253 sgl->sg64[j].addr = segs[i].ds_addr + first;
2254 sgl->sg64[j++].len = segs[i].ds_len - first;
2257 hdr->flags |= MFI_FRAME_SGL64;
2263 if (cm->cm_flags & MFI_CMD_DATAIN) {
2264 dir |= BUS_DMASYNC_PREREAD;
2265 hdr->flags |= MFI_FRAME_DIR_READ;
2267 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2268 dir |= BUS_DMASYNC_PREWRITE;
2269 hdr->flags |= MFI_FRAME_DIR_WRITE;
2271 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2272 cm->cm_flags |= MFI_CMD_MAPPED;
2275 * Instead of calculating the total number of frames in the
2276 * compound frame, it's already assumed that there will be at
2277 * least 1 frame, so don't compensate for the modulo of the
2278 * following division.
2280 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2281 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2283 if (sc->MFA_enabled)
2284 mfi_tbolt_send_frame(sc, cm);
2286 mfi_send_frame(sc, cm);
2290 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2292 struct mfi_frame_header *hdr;
2293 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2295 hdr = &cm->cm_frame->header;
2297 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2298 cm->cm_timestamp = time_uptime;
2299 mfi_enqueue_busy(cm);
2301 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2302 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2306 * The bus address of the command is aligned on a 64 byte boundary,
2307 * leaving the least 6 bits as zero. For whatever reason, the
2308 * hardware wants the address shifted right by three, leaving just
2309 * 3 zero bits. These three bits are then used as a prefetching
2310 * hint for the hardware to predict how many frames need to be
2311 * fetched across the bus. If a command has more than 8 frames
2312 * then the 3 bits are set to 0x7 and the firmware uses other
2313 * information in the command to determine the total amount to fetch.
2314 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2315 * is enough for both 32bit and 64bit systems.
2317 if (cm->cm_extra_frames > 7)
2318 cm->cm_extra_frames = 7;
2320 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2322 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2325 /* This is a polled command, so busy-wait for it to complete. */
2326 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2333 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2334 device_printf(sc->mfi_dev, "Frame %p timed out "
2335 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2343 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2347 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2349 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2350 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2351 dir |= BUS_DMASYNC_POSTREAD;
2352 if (cm->cm_flags & MFI_CMD_DATAOUT)
2353 dir |= BUS_DMASYNC_POSTWRITE;
2355 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2356 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2357 cm->cm_flags &= ~MFI_CMD_MAPPED;
2360 cm->cm_flags |= MFI_CMD_COMPLETED;
2362 if (cm->cm_complete != NULL)
2363 cm->cm_complete(cm);
2369 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2371 struct mfi_command *cm;
2372 struct mfi_abort_frame *abort;
2374 uint32_t context = 0;
2376 mfi_lockassert(&sc->mfi_io_lock);
2378 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2382 /* Zero out the MFI frame */
2383 context = cm->cm_frame->header.context;
2384 bzero(cm->cm_frame, sizeof(union mfi_frame));
2385 cm->cm_frame->header.context = context;
2387 abort = &cm->cm_frame->abort;
2388 abort->header.cmd = MFI_CMD_ABORT;
2389 abort->header.flags = 0;
2390 abort->header.scsi_status = 0;
2391 abort->abort_context = cm_abort->cm_frame->header.context;
2392 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2393 abort->abort_mfi_addr_hi =
2394 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2396 cm->cm_flags = MFI_CMD_POLLED;
2399 sc->mfi_aen_cm->cm_aen_abort = 1;
2401 mfi_release_command(cm);
2403 while (i < 5 && sc->mfi_aen_cm != NULL) {
2404 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2413 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2416 struct mfi_command *cm;
2417 struct mfi_io_frame *io;
2419 uint32_t context = 0;
2421 if ((cm = mfi_dequeue_free(sc)) == NULL)
2424 /* Zero out the MFI frame */
2425 context = cm->cm_frame->header.context;
2426 bzero(cm->cm_frame, sizeof(union mfi_frame));
2427 cm->cm_frame->header.context = context;
2429 io = &cm->cm_frame->io;
2430 io->header.cmd = MFI_CMD_LD_WRITE;
2431 io->header.target_id = id;
2432 io->header.timeout = 0;
2433 io->header.flags = 0;
2434 io->header.scsi_status = 0;
2435 io->header.sense_len = MFI_SENSE_LEN;
2436 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2437 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2438 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2439 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2440 io->lba_lo = lba & 0xffffffff;
2443 cm->cm_sg = &io->sgl;
2444 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2445 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2447 error = mfi_mapcmd(sc, cm);
2448 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2449 BUS_DMASYNC_POSTWRITE);
2450 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2451 mfi_release_command(cm);
2457 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2460 struct mfi_command *cm;
2461 struct mfi_pass_frame *pass;
2465 if ((cm = mfi_dequeue_free(sc)) == NULL)
2468 pass = &cm->cm_frame->pass;
2469 bzero(pass->cdb, 16);
2470 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2471 pass->cdb[0] = WRITE_10;
2472 pass->cdb[2] = (lba & 0xff000000) >> 24;
2473 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2474 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2475 pass->cdb[5] = (lba & 0x000000ff);
2476 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2477 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2478 pass->cdb[8] = (blkcount & 0x00ff);
2479 pass->header.target_id = id;
2480 pass->header.timeout = 0;
2481 pass->header.flags = 0;
2482 pass->header.scsi_status = 0;
2483 pass->header.sense_len = MFI_SENSE_LEN;
2484 pass->header.data_len = len;
2485 pass->header.cdb_len = 10;
2486 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2487 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2490 cm->cm_sg = &pass->sgl;
2491 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2492 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2494 error = mfi_mapcmd(sc, cm);
2495 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2496 BUS_DMASYNC_POSTWRITE);
2497 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2498 mfi_release_command(cm);
2504 mfi_open(struct dev_open_args *ap)
2506 cdev_t dev = ap->a_head.a_dev;
2507 struct mfi_softc *sc;
2512 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2513 if (sc->mfi_detaching)
2516 sc->mfi_flags |= MFI_FLAGS_OPEN;
2519 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2525 mfi_close(struct dev_close_args *ap)
2527 cdev_t dev = ap->a_head.a_dev;
2528 struct mfi_softc *sc;
2529 struct mfi_aen *mfi_aen_entry, *tmp;
2533 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2534 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2536 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2537 if (mfi_aen_entry->p == curproc) {
2538 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2540 kfree(mfi_aen_entry, M_MFIBUF);
2543 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2548 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2552 case MFI_DCMD_LD_DELETE:
2553 case MFI_DCMD_CFG_ADD:
2554 case MFI_DCMD_CFG_CLEAR:
2555 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2556 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2564 mfi_config_unlock(struct mfi_softc *sc, int locked)
2568 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2572 * Perform pre-issue checks on commands from userland and possibly veto
2576 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2578 struct mfi_disk *ld, *ld2;
2580 struct mfi_system_pd *syspd = NULL;
2584 mfi_lockassert(&sc->mfi_io_lock);
2586 switch (cm->cm_frame->dcmd.opcode) {
2587 case MFI_DCMD_LD_DELETE:
2588 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2589 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2595 error = mfi_disk_disable(ld);
2597 case MFI_DCMD_CFG_CLEAR:
2598 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2599 error = mfi_disk_disable(ld);
2604 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2607 mfi_disk_enable(ld2);
2611 case MFI_DCMD_PD_STATE_SET:
2612 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2614 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2615 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2616 if (syspd->pd_id == syspd_id)
2623 error = mfi_syspd_disable(syspd);
2631 /* Perform post-issue checks on commands from userland. */
2633 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2635 struct mfi_disk *ld, *ldn;
2636 struct mfi_system_pd *syspd = NULL;
2640 switch (cm->cm_frame->dcmd.opcode) {
2641 case MFI_DCMD_LD_DELETE:
2642 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2643 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2646 KASSERT(ld != NULL, ("volume dissappeared"));
2647 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2648 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2650 device_delete_child(sc->mfi_dev, ld->ld_dev);
2652 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2654 mfi_disk_enable(ld);
2656 case MFI_DCMD_CFG_CLEAR:
2657 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2658 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2660 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2661 device_delete_child(sc->mfi_dev, ld->ld_dev);
2664 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2666 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2667 mfi_disk_enable(ld);
2670 case MFI_DCMD_CFG_ADD:
2671 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2672 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2675 case MFI_DCMD_PD_STATE_SET:
2676 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2678 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2679 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2680 if (syspd->pd_id == syspd_id)
2686 /* If the transition fails then enable the syspd again */
2687 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2688 mfi_syspd_enable(syspd);
2694 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2696 struct mfi_config_data *conf_data = cm->cm_data;
2697 struct mfi_command *ld_cm = NULL;
2698 struct mfi_ld_info *ld_info = NULL;
2701 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2702 (conf_data->ld[0].params.isSSCD == 1)) {
2704 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2705 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2706 (void **)&ld_info, sizeof(*ld_info));
2708 device_printf(sc->mfi_dev, "Failed to allocate"
2709 "MFI_DCMD_LD_GET_INFO %d", error);
2711 kfree(ld_info, M_MFIBUF);
2714 ld_cm->cm_flags = MFI_CMD_DATAIN;
2715 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2716 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2717 if (mfi_wait_command(sc, ld_cm) != 0) {
2718 device_printf(sc->mfi_dev, "failed to get log drv\n");
2719 mfi_release_command(ld_cm);
2720 kfree(ld_info, M_MFIBUF);
2724 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2725 kfree(ld_info, M_MFIBUF);
2726 mfi_release_command(ld_cm);
2729 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2732 if (ld_info->ld_config.params.isSSCD == 1)
2735 mfi_release_command(ld_cm);
2736 kfree(ld_info, M_MFIBUF);
2742 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2745 struct mfi_ioc_packet *ioc;
2746 ioc = (struct mfi_ioc_packet *)arg;
2747 int sge_size, error;
2748 struct megasas_sge *kern_sge;
2750 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2751 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2752 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2754 if (sizeof(bus_addr_t) == 8) {
2755 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2756 cm->cm_extra_frames = 2;
2757 sge_size = sizeof(struct mfi_sg64);
2759 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2760 sge_size = sizeof(struct mfi_sg32);
2763 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2764 for (i = 0; i < ioc->mfi_sge_count; i++) {
2765 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2766 1, 0, /* algnmnt, boundary */
2767 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2768 BUS_SPACE_MAXADDR, /* highaddr */
2769 NULL, NULL, /* filter, filterarg */
2770 ioc->mfi_sgl[i].iov_len,/* maxsize */
2772 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2773 BUS_DMA_ALLOCNOW, /* flags */
2774 &sc->mfi_kbuff_arr_dmat[i])) {
2775 device_printf(sc->mfi_dev,
2776 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2780 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2781 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2782 &sc->mfi_kbuff_arr_dmamap[i])) {
2783 device_printf(sc->mfi_dev,
2784 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2788 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2789 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2790 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2791 &sc->mfi_kbuff_arr_busaddr[i], 0);
2793 if (!sc->kbuff_arr[i]) {
2794 device_printf(sc->mfi_dev,
2795 "Could not allocate memory for kbuff_arr info\n");
2798 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2799 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2801 if (sizeof(bus_addr_t) == 8) {
2802 cm->cm_frame->stp.sgl.sg64[i].addr =
2803 kern_sge[i].phys_addr;
2804 cm->cm_frame->stp.sgl.sg64[i].len =
2805 ioc->mfi_sgl[i].iov_len;
2807 cm->cm_frame->stp.sgl.sg32[i].addr =
2808 kern_sge[i].phys_addr;
2809 cm->cm_frame->stp.sgl.sg32[i].len =
2810 ioc->mfi_sgl[i].iov_len;
2813 error = copyin(ioc->mfi_sgl[i].iov_base,
2815 ioc->mfi_sgl[i].iov_len);
2817 device_printf(sc->mfi_dev, "Copy in failed\n");
2822 cm->cm_flags |=MFI_CMD_MAPPED;
2827 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2829 struct mfi_command *cm;
2830 struct mfi_dcmd_frame *dcmd;
2831 void *ioc_buf = NULL;
2833 int error = 0, locked;
2836 if (ioc->buf_size > 0) {
2837 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2838 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2840 device_printf(sc->mfi_dev, "failed to copyin\n");
2841 kfree(ioc_buf, M_MFIBUF);
2846 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2848 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2849 while ((cm = mfi_dequeue_free(sc)) == NULL)
2850 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2852 /* Save context for later */
2853 context = cm->cm_frame->header.context;
2855 dcmd = &cm->cm_frame->dcmd;
2856 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2858 cm->cm_sg = &dcmd->sgl;
2859 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2860 cm->cm_data = ioc_buf;
2861 cm->cm_len = ioc->buf_size;
2863 /* restore context */
2864 cm->cm_frame->header.context = context;
2866 /* Cheat since we don't know if we're writing or reading */
2867 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2869 error = mfi_check_command_pre(sc, cm);
2873 error = mfi_wait_command(sc, cm);
2875 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2878 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2879 mfi_check_command_post(sc, cm);
2881 mfi_release_command(cm);
2882 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2883 mfi_config_unlock(sc, locked);
2884 if (ioc->buf_size > 0)
2885 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2887 kfree(ioc_buf, M_MFIBUF);
2891 #define PTRIN(p) ((void *)(uintptr_t)(p))
2894 mfi_ioctl(struct dev_ioctl_args *ap)
2896 cdev_t dev = ap->a_head.a_dev;
2897 u_long cmd = ap->a_cmd;
2898 int flag = ap->a_fflag;
2899 caddr_t arg = ap->a_data;
2900 struct mfi_softc *sc;
2901 union mfi_statrequest *ms;
2902 struct mfi_ioc_packet *ioc;
2903 struct mfi_ioc_aen *aen;
2904 struct mfi_command *cm = NULL;
2906 union mfi_sense_ptr sense_ptr;
2907 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2910 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2919 if (sc->hw_crit_error)
2922 if (sc->issuepend_done == 0)
2927 ms = (union mfi_statrequest *)arg;
2928 switch (ms->ms_item) {
2933 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2934 sizeof(struct mfi_qstat));
2941 case MFIIO_QUERY_DISK:
2943 struct mfi_query_disk *qd;
2944 struct mfi_disk *ld;
2946 qd = (struct mfi_query_disk *)arg;
2947 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2948 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2949 if (ld->ld_id == qd->array_id)
2954 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2958 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2960 bzero(qd->devname, SPECNAMELEN + 1);
2961 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2962 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2967 devclass_t devclass;
2968 ioc = (struct mfi_ioc_packet *)arg;
2971 adapter = ioc->mfi_adapter_no;
2972 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2973 devclass = devclass_find("mfi");
2974 sc = devclass_get_softc(devclass, adapter);
2976 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2977 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2978 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2981 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2985 * save off original context since copying from user
2986 * will clobber some data
2988 context = cm->cm_frame->header.context;
2989 cm->cm_frame->header.context = cm->cm_index;
2991 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2992 2 * MEGAMFI_FRAME_SIZE);
2993 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2994 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2995 cm->cm_frame->header.scsi_status = 0;
2996 cm->cm_frame->header.pad0 = 0;
2997 if (ioc->mfi_sge_count) {
2999 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3002 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3003 cm->cm_flags |= MFI_CMD_DATAIN;
3004 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3005 cm->cm_flags |= MFI_CMD_DATAOUT;
3006 /* Legacy app shim */
3007 if (cm->cm_flags == 0)
3008 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3009 cm->cm_len = cm->cm_frame->header.data_len;
3010 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3011 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3012 cm->cm_len += cm->cm_stp_len;
3015 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3016 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3022 /* restore header context */
3023 cm->cm_frame->header.context = context;
3025 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3026 res = mfi_stp_cmd(sc, cm, arg);
3031 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3032 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3033 for (i = 0; i < ioc->mfi_sge_count; i++) {
3034 addr = ioc->mfi_sgl[i].iov_base;
3035 len = ioc->mfi_sgl[i].iov_len;
3036 error = copyin(addr, temp, len);
3038 device_printf(sc->mfi_dev,
3039 "Copy in failed\n");
3047 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3048 locked = mfi_config_lock(sc,
3049 cm->cm_frame->dcmd.opcode);
3051 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3052 cm->cm_frame->pass.sense_addr_lo =
3053 (uint32_t)cm->cm_sense_busaddr;
3054 cm->cm_frame->pass.sense_addr_hi =
3055 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3057 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3058 skip_pre_post = mfi_check_for_sscd(sc, cm);
3059 if (!skip_pre_post) {
3060 error = mfi_check_command_pre(sc, cm);
3062 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3067 if ((error = mfi_wait_command(sc, cm)) != 0) {
3068 device_printf(sc->mfi_dev,
3069 "Controller polled failed\n");
3070 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3075 mfi_check_command_post(sc, cm);
3076 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3078 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3080 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3081 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3082 for (i = 0; i < ioc->mfi_sge_count; i++) {
3083 addr = ioc->mfi_sgl[i].iov_base;
3084 len = ioc->mfi_sgl[i].iov_len;
3085 error = copyout(temp, addr, len);
3087 device_printf(sc->mfi_dev,
3088 "Copy out failed\n");
3096 if (ioc->mfi_sense_len) {
3097 /* get user-space sense ptr then copy out sense */
3098 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3099 &sense_ptr.sense_ptr_data[0],
3100 sizeof(sense_ptr.sense_ptr_data));
3101 error = copyout(cm->cm_sense, sense_ptr.user_space,
3102 ioc->mfi_sense_len);
3104 device_printf(sc->mfi_dev,
3105 "Copy out failed\n");
3110 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3112 mfi_config_unlock(sc, locked);
3114 kfree(data, M_MFIBUF);
3115 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3116 for (i = 0; i < 2; i++) {
3117 if (sc->kbuff_arr[i]) {
3118 if (sc->mfi_kbuff_arr_busaddr != 0)
3120 sc->mfi_kbuff_arr_dmat[i],
3121 sc->mfi_kbuff_arr_dmamap[i]
3123 if (sc->kbuff_arr[i] != NULL)
3125 sc->mfi_kbuff_arr_dmat[i],
3127 sc->mfi_kbuff_arr_dmamap[i]
3129 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3130 bus_dma_tag_destroy(
3131 sc->mfi_kbuff_arr_dmat[i]);
3136 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3137 mfi_release_command(cm);
3138 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3144 aen = (struct mfi_ioc_aen *)arg;
3145 error = mfi_aen_register(sc, aen->aen_seq_num,
3146 aen->aen_class_locale);
3149 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3151 devclass_t devclass;
3152 struct mfi_linux_ioc_packet l_ioc;
3155 devclass = devclass_find("mfi");
3156 if (devclass == NULL)
3159 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3162 adapter = l_ioc.lioc_adapter_no;
3163 sc = devclass_get_softc(devclass, adapter);
3166 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3170 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3172 devclass_t devclass;
3173 struct mfi_linux_ioc_aen l_aen;
3176 devclass = devclass_find("mfi");
3177 if (devclass == NULL)
3180 error = copyin(arg, &l_aen, sizeof(l_aen));
3183 adapter = l_aen.laen_adapter_no;
3184 sc = devclass_get_softc(devclass, adapter);
3187 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3191 case MFIIO_PASSTHRU:
3192 error = mfi_user_command(sc, iop);
3195 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3204 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3206 struct mfi_softc *sc;
3207 struct mfi_linux_ioc_packet l_ioc;
3208 struct mfi_linux_ioc_aen l_aen;
3209 struct mfi_command *cm = NULL;
3210 struct mfi_aen *mfi_aen_entry;
3211 union mfi_sense_ptr sense_ptr;
3213 uint8_t *data = NULL, *temp;
3220 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3221 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3225 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3229 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3230 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3231 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3234 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3238 * save off original context since copying from user
3239 * will clobber some data
3241 context = cm->cm_frame->header.context;
3243 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3244 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3245 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3246 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3247 cm->cm_frame->header.scsi_status = 0;
3248 cm->cm_frame->header.pad0 = 0;
3249 if (l_ioc.lioc_sge_count)
3251 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3253 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3254 cm->cm_flags |= MFI_CMD_DATAIN;
3255 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3256 cm->cm_flags |= MFI_CMD_DATAOUT;
3257 cm->cm_len = cm->cm_frame->header.data_len;
3259 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3260 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3266 /* restore header context */
3267 cm->cm_frame->header.context = context;
3270 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3271 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3272 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3274 l_ioc.lioc_sgl[i].iov_len);
3276 device_printf(sc->mfi_dev,
3277 "Copy in failed\n");
3280 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3284 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3285 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3287 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3288 cm->cm_frame->pass.sense_addr_lo =
3289 (uint32_t)cm->cm_sense_busaddr;
3290 cm->cm_frame->pass.sense_addr_hi =
3291 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3294 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3295 error = mfi_check_command_pre(sc, cm);
3297 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3301 if ((error = mfi_wait_command(sc, cm)) != 0) {
3302 device_printf(sc->mfi_dev,
3303 "Controller polled failed\n");
3304 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3308 mfi_check_command_post(sc, cm);
3309 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3312 if (cm->cm_flags & MFI_CMD_DATAIN) {
3313 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3314 error = copyout(temp,
3315 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3316 l_ioc.lioc_sgl[i].iov_len);
3318 device_printf(sc->mfi_dev,
3319 "Copy out failed\n");
3322 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3326 if (l_ioc.lioc_sense_len) {
3327 /* get user-space sense ptr then copy out sense */
3328 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3329 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3330 &sense_ptr.sense_ptr_data[0],
3331 sizeof(sense_ptr.sense_ptr_data));
3334 * only 32bit Linux support so zero out any
3335 * address over 32bit
3337 sense_ptr.addr.high = 0;
3339 error = copyout(cm->cm_sense, sense_ptr.user_space,
3340 l_ioc.lioc_sense_len);
3342 device_printf(sc->mfi_dev,
3343 "Copy out failed\n");
3348 error = copyout(&cm->cm_frame->header.cmd_status,
3349 &((struct mfi_linux_ioc_packet*)arg)
3350 ->lioc_frame.hdr.cmd_status,
3353 device_printf(sc->mfi_dev,
3354 "Copy out failed\n");
3359 mfi_config_unlock(sc, locked);
3361 kfree(data, M_MFIBUF);
3363 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3364 mfi_release_command(cm);
3365 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3369 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3370 error = copyin(arg, &l_aen, sizeof(l_aen));
3373 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3374 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3376 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3377 if (mfi_aen_entry != NULL) {
3378 mfi_aen_entry->p = curproc;
3379 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3382 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3383 l_aen.laen_class_locale);
3386 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3388 kfree(mfi_aen_entry, M_MFIBUF);
3390 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3394 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3403 mfi_kqfilter(struct dev_kqfilter_args *ap)
3405 cdev_t dev = ap->a_head.a_dev;
3406 struct knote *kn = ap->a_kn;
3407 struct mfi_softc *sc;
3408 struct klist *klist;
3413 switch (kn->kn_filter) {
3415 kn->kn_fop = &mfi_read_filterops;
3416 kn->kn_hook = (caddr_t)sc;
3419 kn->kn_fop = &mfi_write_filterops;
3420 kn->kn_hook = (caddr_t)sc;
3423 ap->a_result = EOPNOTSUPP;
3427 klist = &sc->mfi_kq.ki_note;
3428 knote_insert(klist, kn);
3434 mfi_filter_detach(struct knote *kn)
3436 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3437 struct klist *klist = &sc->mfi_kq.ki_note;
3439 knote_remove(klist, kn);
3443 mfi_filter_read(struct knote *kn, long hint)
3445 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3448 if (sc->mfi_aen_triggered != 0) {
3450 sc->mfi_aen_triggered = 0;
3452 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3453 kn->kn_flags |= EV_ERROR;
3456 sc->mfi_poll_waiting = 1;
3462 mfi_filter_write(struct knote *kn, long hint)
3470 struct mfi_softc *sc;
3471 struct mfi_command *cm;
3477 dc = devclass_find("mfi");
3479 kprintf("No mfi dev class\n");
3483 for (i = 0; ; i++) {
3484 sc = devclass_get_softc(dc, i);
3487 device_printf(sc->mfi_dev, "Dumping\n\n");
3489 deadline = time_uptime - mfi_cmd_timeout;
3490 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3491 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3492 if (cm->cm_timestamp < deadline) {
3493 device_printf(sc->mfi_dev,
3494 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3495 cm, (int)(time_uptime - cm->cm_timestamp));
3506 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3513 mfi_timeout(void *data)
3515 struct mfi_softc *sc = (struct mfi_softc *)data;
3516 struct mfi_command *cm;
3520 deadline = time_uptime - mfi_cmd_timeout;
3521 if (sc->adpreset == 0) {
3522 if (!mfi_tbolt_reset(sc)) {
3523 callout_reset(&sc->mfi_watchdog_callout,
3524 mfi_cmd_timeout * hz, mfi_timeout, sc);
3528 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3529 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3530 if (sc->mfi_aen_cm == cm)
3532 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3533 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3534 cm->cm_timestamp = time_uptime;
3536 device_printf(sc->mfi_dev,
3537 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3538 cm, (int)(time_uptime - cm->cm_timestamp));
3540 MFI_VALIDATE_CMD(sc, cm);
3551 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3553 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,