2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53 * FreeBSD projects/head_mfi/ r233016
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
64 #include <sys/eventhandler.h>
66 #include <sys/bus_dma.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
75 #include <bus/cam/scsi/scsi_all.h>
77 #include <bus/pci/pcivar.h>
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
83 static int mfi_alloc_commands(struct mfi_softc *);
84 static int mfi_comms_init(struct mfi_softc *);
85 static int mfi_get_controller_info(struct mfi_softc *);
86 static int mfi_get_log_state(struct mfi_softc *,
87 struct mfi_evt_log_state **);
88 static int mfi_parse_entries(struct mfi_softc *, int, int);
89 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 uint32_t, void **, size_t);
91 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void mfi_startup(void *arg);
93 static void mfi_intr(void *arg);
94 static void mfi_ldprobe(struct mfi_softc *sc);
95 static void mfi_syspdprobe(struct mfi_softc *sc);
96 static void mfi_handle_evt(void *context, int pending);
97 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void mfi_aen_complete(struct mfi_command *);
99 static int mfi_add_ld(struct mfi_softc *sc, int);
100 static void mfi_add_ld_complete(struct mfi_command *);
101 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void mfi_timeout(void *);
111 static int mfi_user_command(struct mfi_softc *,
112 struct mfi_ioc_passthru *);
113 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_filter_detach(struct knote *);
130 static int mfi_filter_read(struct knote *, long);
131 static int mfi_filter_write(struct knote *, long);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_kqfilter_t mfi_kqfilter;
165 static struct dev_ops mfi_ops = {
168 .d_close = mfi_close,
169 .d_ioctl = mfi_ioctl,
170 .d_kqfilter = mfi_kqfilter,
173 static struct filterops mfi_read_filterops =
174 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
186 MFI_WRITE4(sc, MFI_OMSK, 0x01);
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
192 if (sc->mfi_flags & MFI_FLAGS_1078) {
193 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
201 panic("unknown adapter type");
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
208 return MFI_READ4(sc, MFI_OMSG0);
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
214 return MFI_READ4(sc, MFI_OSP0);
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
222 status = MFI_READ4(sc, MFI_OSTS);
223 if ((status & MFI_OSTS_INTR_VALID) == 0)
226 MFI_WRITE4(sc, MFI_OSTS, status);
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
235 status = MFI_READ4(sc, MFI_OSTS);
236 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
241 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 MFI_WRITE4(sc, MFI_OSTS, status);
244 MFI_WRITE4(sc, MFI_ODCR0, status);
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
266 mfi_transition_firmware(struct mfi_softc *sc)
268 uint32_t fw_state, cur_state;
270 uint32_t cur_abs_reg_val = 0;
271 uint32_t prev_abs_reg_val = 0;
273 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 while (fw_state != MFI_FWSTATE_READY) {
277 device_printf(sc->mfi_dev, "Waiting for firmware to "
279 cur_state = fw_state;
281 case MFI_FWSTATE_FAULT:
282 device_printf(sc->mfi_dev, "Firmware fault\n");
284 case MFI_FWSTATE_WAIT_HANDSHAKE:
285 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 max_wait = MFI_RESET_WAIT_TIME;
291 case MFI_FWSTATE_OPERATIONAL:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_UNDEFINED:
299 case MFI_FWSTATE_BB_INIT:
300 max_wait = MFI_RESET_WAIT_TIME;
302 case MFI_FWSTATE_FW_INIT_2:
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_FW_INIT:
306 case MFI_FWSTATE_FLUSH_CACHE:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_DEVICE_SCAN:
310 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 prev_abs_reg_val = cur_abs_reg_val;
313 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 max_wait = MFI_RESET_WAIT_TIME;
321 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
325 for (i = 0; i < (max_wait * 10); i++) {
326 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 if (fw_state == cur_state)
333 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 /* Check the device scanning progress */
335 if (prev_abs_reg_val != cur_abs_reg_val)
338 if (fw_state == cur_state) {
339 device_printf(sc->mfi_dev, "Firmware stuck in state "
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
353 *addr = segs[0].ds_addr;
357 mfi_attach(struct mfi_softc *sc)
360 int error, commsz, framessz, sensesz;
361 int frames, unit, max_fw_sge;
362 uint32_t tb_mem_size = 0;
367 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
370 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 TAILQ_INIT(&sc->mfi_ld_tqh);
373 TAILQ_INIT(&sc->mfi_syspd_tqh);
374 TAILQ_INIT(&sc->mfi_evt_queue);
375 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 TAILQ_INIT(&sc->mfi_aen_pids);
377 TAILQ_INIT(&sc->mfi_cam_ccbq);
385 sc->last_seq_num = 0;
386 sc->disableOnlineCtrlReset = 1;
387 sc->issuepend_done = 1;
388 sc->hw_crit_error = 0;
390 if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
403 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
405 sc->mfi_enable_intr = mfi_enable_intr_ppc;
406 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
412 /* Before we get too far, see if the firmware is working */
413 if ((error = mfi_transition_firmware(sc)) != 0) {
414 device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 "error %d\n", error);
419 /* Start: LSIP200113393 */
420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
421 1, 0, /* algnmnt, boundary */
422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 NULL, NULL, /* filter, filterarg */
425 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
427 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
429 &sc->verbuf_h_dmat)) {
430 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
433 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
434 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
435 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
438 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
439 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
440 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
441 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
442 /* End: LSIP200113393 */
445 * Get information needed for sizing the contiguous memory for the
446 * frame pool. Size down the sgl parameter since we know that
447 * we will never need more than what's required for MAXPHYS.
448 * It would be nice if these constants were available at runtime
449 * instead of compile time.
451 status = sc->mfi_read_fw_status(sc);
452 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
453 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
454 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
456 /* ThunderBolt Support get the contiguous memory */
458 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
459 mfi_tbolt_init_globals(sc);
460 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
461 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
462 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
464 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
465 1, 0, /* algnmnt, boundary */
466 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filter, filterarg */
469 tb_mem_size, /* maxsize */
471 tb_mem_size, /* maxsegsize */
474 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
477 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
478 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
479 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
482 bzero(sc->request_message_pool, tb_mem_size);
483 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
484 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
486 /* For ThunderBolt memory init */
487 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
488 0x100, 0, /* alignmnt, boundary */
489 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 BUS_SPACE_MAXADDR, /* highaddr */
491 NULL, NULL, /* filter, filterarg */
492 MFI_FRAME_SIZE, /* maxsize */
494 MFI_FRAME_SIZE, /* maxsegsize */
496 &sc->mfi_tb_init_dmat)) {
497 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
500 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
501 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
502 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
505 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
506 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
507 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
508 &sc->mfi_tb_init_busaddr, 0);
509 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
511 device_printf(sc->mfi_dev,
512 "Thunderbolt pool preparation error\n");
517 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
518 we are taking it diffrent from what we have allocated for Request
519 and reply descriptors to avoid confusion later
521 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
522 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
523 1, 0, /* algnmnt, boundary */
524 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
525 BUS_SPACE_MAXADDR, /* highaddr */
526 NULL, NULL, /* filter, filterarg */
527 tb_mem_size, /* maxsize */
529 tb_mem_size, /* maxsegsize */
531 &sc->mfi_tb_ioc_init_dmat)) {
532 device_printf(sc->mfi_dev,
533 "Cannot allocate comms DMA tag\n");
536 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
537 (void **)&sc->mfi_tb_ioc_init_desc,
538 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
539 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
542 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
543 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
544 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
545 &sc->mfi_tb_ioc_init_busaddr, 0);
548 * Create the dma tag for data buffers. Used both for block I/O
549 * and for various internal data queries.
551 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
552 1, 0, /* algnmnt, boundary */
553 BUS_SPACE_MAXADDR, /* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
557 sc->mfi_max_sge, /* nsegments */
558 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
559 BUS_DMA_ALLOCNOW, /* flags */
560 &sc->mfi_buffer_dmat)) {
561 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
566 * Allocate DMA memory for the comms queues. Keep it under 4GB for
567 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
568 * entry, so the calculated size here will be will be 1 more than
569 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
571 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
572 sizeof(struct mfi_hwcomms);
573 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
574 1, 0, /* algnmnt, boundary */
575 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 BUS_SPACE_MAXADDR, /* highaddr */
577 NULL, NULL, /* filter, filterarg */
578 commsz, /* maxsize */
580 commsz, /* maxsegsize */
582 &sc->mfi_comms_dmat)) {
583 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
586 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
587 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
588 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
591 bzero(sc->mfi_comms, commsz);
592 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
593 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
595 * Allocate DMA memory for the command frames. Keep them in the
596 * lower 4GB for efficiency. Calculate the size of the commands at
597 * the same time; each command is one 64 byte frame plus a set of
598 * additional frames for holding sg lists or other data.
599 * The assumption here is that the SG list will start at the second
600 * frame and not use the unused bytes in the first frame. While this
601 * isn't technically correct, it simplifies the calculation and allows
602 * for command frames that might be larger than an mfi_io_frame.
604 if (sizeof(bus_addr_t) == 8) {
605 sc->mfi_sge_size = sizeof(struct mfi_sg64);
606 sc->mfi_flags |= MFI_FLAGS_SG64;
608 sc->mfi_sge_size = sizeof(struct mfi_sg32);
610 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
611 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
612 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
613 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
614 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
615 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
616 64, 0, /* algnmnt, boundary */
617 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
618 BUS_SPACE_MAXADDR, /* highaddr */
619 NULL, NULL, /* filter, filterarg */
620 framessz, /* maxsize */
622 framessz, /* maxsegsize */
624 &sc->mfi_frames_dmat)) {
625 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
628 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
629 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
630 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
633 bzero(sc->mfi_frames, framessz);
634 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
635 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
637 * Allocate DMA memory for the frame sense data. Keep them in the
638 * lower 4GB for efficiency
640 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
641 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
642 4, 0, /* algnmnt, boundary */
643 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
644 BUS_SPACE_MAXADDR, /* highaddr */
645 NULL, NULL, /* filter, filterarg */
646 sensesz, /* maxsize */
648 sensesz, /* maxsegsize */
650 &sc->mfi_sense_dmat)) {
651 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
654 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
655 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
656 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
659 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
660 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
661 if ((error = mfi_alloc_commands(sc)) != 0)
665 * Before moving the FW to operational state, check whether
666 * hostmemory is required by the FW or not
669 /* ThunderBolt MFI_IOC2 INIT */
670 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
671 sc->mfi_disable_intr(sc);
672 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
673 device_printf(sc->mfi_dev,
674 "TB Init has failed with error %d\n",error);
678 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
680 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
681 mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
682 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
685 sc->mfi_enable_intr(sc);
688 if ((error = mfi_comms_init(sc)) != 0)
691 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
692 mfi_intr, sc, &sc->mfi_intr, NULL)) {
693 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
696 sc->mfi_enable_intr(sc);
698 if ((error = mfi_get_controller_info(sc)) != 0)
700 sc->disableOnlineCtrlReset = 0;
702 /* Register a config hook to probe the bus for arrays */
703 sc->mfi_ich.ich_func = mfi_startup;
704 sc->mfi_ich.ich_arg = sc;
705 sc->mfi_ich.ich_desc = "mfi";
706 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
707 device_printf(sc->mfi_dev, "Cannot establish configuration "
711 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
712 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
717 * Register a shutdown handler.
719 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
720 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
721 device_printf(sc->mfi_dev, "Warning: shutdown event "
722 "registration failed\n");
726 * Create the control device for doing management
728 unit = device_get_unit(sc->mfi_dev);
729 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
730 0640, "mfi%d", unit);
732 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
733 if (sc->mfi_cdev != NULL)
734 sc->mfi_cdev->si_drv1 = sc;
735 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
736 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
737 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
738 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
739 if (sc->mfi_sysctl_tree == NULL) {
740 device_printf(sc->mfi_dev, "can't add sysctl node\n");
743 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
744 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
745 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
746 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
747 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
748 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
749 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
750 &sc->mfi_keep_deleted_volumes, 0,
751 "Don't detach the mfid device for a busy volume that is deleted");
753 device_add_child(sc->mfi_dev, "mfip", -1);
754 bus_generic_attach(sc->mfi_dev);
756 /* Start the timeout watchdog */
757 callout_init_mp(&sc->mfi_watchdog_callout);
758 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
765 mfi_alloc_commands(struct mfi_softc *sc)
767 struct mfi_command *cm;
771 * XXX Should we allocate all the commands up front, or allocate on
772 * demand later like 'aac' does?
774 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
776 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
777 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
779 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
782 for (i = 0; i < ncmds; i++) {
783 cm = &sc->mfi_commands[i];
784 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
785 sc->mfi_cmd_size * i);
786 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
787 sc->mfi_cmd_size * i;
788 cm->cm_frame->header.context = i;
789 cm->cm_sense = &sc->mfi_sense[i];
790 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
793 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
794 &cm->cm_dmamap) == 0) {
795 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
796 mfi_release_command(cm);
797 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
801 sc->mfi_total_cmds++;
808 mfi_release_command(struct mfi_command *cm)
810 struct mfi_frame_header *hdr;
813 mfi_lockassert(&cm->cm_sc->mfi_io_lock);
816 * Zero out the important fields of the frame, but make sure the
817 * context field is preserved. For efficiency, handle the fields
818 * as 32 bit words. Clear out the first S/G entry too for safety.
820 hdr = &cm->cm_frame->header;
821 if (cm->cm_data != NULL && hdr->sg_count) {
822 cm->cm_sg->sg32[0].len = 0;
823 cm->cm_sg->sg32[0].addr = 0;
826 hdr_data = (uint32_t *)cm->cm_frame;
827 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
828 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
829 hdr_data[4] = 0; /* flags, timeout */
830 hdr_data[5] = 0; /* data_len */
832 cm->cm_extra_frames = 0;
834 cm->cm_complete = NULL;
835 cm->cm_private = NULL;
838 cm->cm_total_frame_size = 0;
839 cm->retry_for_fw_reset = 0;
841 mfi_enqueue_free(cm);
845 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
846 uint32_t opcode, void **bufp, size_t bufsize)
848 struct mfi_command *cm;
849 struct mfi_dcmd_frame *dcmd;
851 uint32_t context = 0;
853 mfi_lockassert(&sc->mfi_io_lock);
855 cm = mfi_dequeue_free(sc);
859 /* Zero out the MFI frame */
860 context = cm->cm_frame->header.context;
861 bzero(cm->cm_frame, sizeof(union mfi_frame));
862 cm->cm_frame->header.context = context;
864 if ((bufsize > 0) && (bufp != NULL)) {
866 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
868 mfi_release_command(cm);
877 dcmd = &cm->cm_frame->dcmd;
878 bzero(dcmd->mbox, MFI_MBOX_SIZE);
879 dcmd->header.cmd = MFI_CMD_DCMD;
880 dcmd->header.timeout = 0;
881 dcmd->header.flags = 0;
882 dcmd->header.data_len = bufsize;
883 dcmd->header.scsi_status = 0;
884 dcmd->opcode = opcode;
885 cm->cm_sg = &dcmd->sgl;
886 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
889 cm->cm_private = buf;
890 cm->cm_len = bufsize;
893 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
899 mfi_comms_init(struct mfi_softc *sc)
901 struct mfi_command *cm;
902 struct mfi_init_frame *init;
903 struct mfi_init_qinfo *qinfo;
905 uint32_t context = 0;
907 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
908 if ((cm = mfi_dequeue_free(sc)) == NULL) {
909 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
913 /* Zero out the MFI frame */
914 context = cm->cm_frame->header.context;
915 bzero(cm->cm_frame, sizeof(union mfi_frame));
916 cm->cm_frame->header.context = context;
919 * Abuse the SG list area of the frame to hold the init_qinfo
922 init = &cm->cm_frame->init;
923 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
925 bzero(qinfo, sizeof(struct mfi_init_qinfo));
926 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
927 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
928 offsetof(struct mfi_hwcomms, hw_reply_q);
929 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
930 offsetof(struct mfi_hwcomms, hw_pi);
931 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
932 offsetof(struct mfi_hwcomms, hw_ci);
934 init->header.cmd = MFI_CMD_INIT;
935 init->header.data_len = sizeof(struct mfi_init_qinfo);
936 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
938 cm->cm_flags = MFI_CMD_POLLED;
940 if ((error = mfi_mapcmd(sc, cm)) != 0) {
941 device_printf(sc->mfi_dev, "failed to send init command\n");
942 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
945 mfi_release_command(cm);
946 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
952 mfi_get_controller_info(struct mfi_softc *sc)
954 struct mfi_command *cm = NULL;
955 struct mfi_ctrl_info *ci = NULL;
956 uint32_t max_sectors_1, max_sectors_2;
959 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
960 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
961 (void **)&ci, sizeof(*ci));
964 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
966 if ((error = mfi_mapcmd(sc, cm)) != 0) {
967 device_printf(sc->mfi_dev, "Failed to get controller info\n");
968 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
974 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
975 BUS_DMASYNC_POSTREAD);
976 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
978 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
979 max_sectors_2 = ci->max_request_size;
980 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
981 sc->disableOnlineCtrlReset =
982 ci->properties.OnOffProperties.disableOnlineCtrlReset;
988 mfi_release_command(cm);
989 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
994 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
996 struct mfi_command *cm = NULL;
999 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1000 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1001 (void **)log_state, sizeof(**log_state));
1004 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1006 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1007 device_printf(sc->mfi_dev, "Failed to get log state\n");
1011 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1012 BUS_DMASYNC_POSTREAD);
1013 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1017 mfi_release_command(cm);
1018 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1024 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1026 struct mfi_evt_log_state *log_state = NULL;
1027 union mfi_evt class_locale;
1031 class_locale.members.reserved = 0;
1032 class_locale.members.locale = mfi_event_locale;
1033 class_locale.members.evt_class = mfi_event_class;
1035 if (seq_start == 0) {
1036 error = mfi_get_log_state(sc, &log_state);
1037 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1040 kfree(log_state, M_MFIBUF);
1045 * Walk through any events that fired since the last
1048 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1049 log_state->newest_seq_num);
1050 seq = log_state->newest_seq_num;
1053 mfi_aen_register(sc, seq, class_locale.word);
1054 if (log_state != NULL)
1055 kfree(log_state, M_MFIBUF);
1061 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1064 mfi_lockassert(&sc->mfi_io_lock);
1065 cm->cm_complete = NULL;
1069 * MegaCli can issue a DCMD of 0. In this case do nothing
1070 * and return 0 to it as status
1072 if (cm->cm_frame->dcmd.opcode == 0) {
1073 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1075 return (cm->cm_error);
1077 mfi_enqueue_ready(cm);
1079 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1080 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1081 return (cm->cm_error);
1085 mfi_free(struct mfi_softc *sc)
1087 struct mfi_command *cm;
1090 callout_stop_sync(&sc->mfi_watchdog_callout);
1092 if (sc->mfi_cdev != NULL)
1093 destroy_dev(sc->mfi_cdev);
1094 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1096 if (sc->mfi_total_cmds != 0) {
1097 for (i = 0; i < sc->mfi_total_cmds; i++) {
1098 cm = &sc->mfi_commands[i];
1099 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1101 kfree(sc->mfi_commands, M_MFIBUF);
1105 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1106 if (sc->mfi_irq != NULL)
1107 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1110 if (sc->mfi_sense_busaddr != 0)
1111 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1112 if (sc->mfi_sense != NULL)
1113 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1114 sc->mfi_sense_dmamap);
1115 if (sc->mfi_sense_dmat != NULL)
1116 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1118 if (sc->mfi_frames_busaddr != 0)
1119 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1120 if (sc->mfi_frames != NULL)
1121 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1122 sc->mfi_frames_dmamap);
1123 if (sc->mfi_frames_dmat != NULL)
1124 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1126 if (sc->mfi_comms_busaddr != 0)
1127 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1128 if (sc->mfi_comms != NULL)
1129 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1130 sc->mfi_comms_dmamap);
1131 if (sc->mfi_comms_dmat != NULL)
1132 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1134 /* ThunderBolt contiguous memory free here */
1135 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1136 if (sc->mfi_tb_busaddr != 0)
1137 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1138 if (sc->request_message_pool != NULL)
1139 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1141 if (sc->mfi_tb_dmat != NULL)
1142 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1144 /* Version buffer memory free */
1145 /* Start LSIP200113393 */
1146 if (sc->verbuf_h_busaddr != 0)
1147 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1148 if (sc->verbuf != NULL)
1149 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1150 sc->verbuf_h_dmamap);
1151 if (sc->verbuf_h_dmat != NULL)
1152 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1154 /* End LSIP200113393 */
1155 /* ThunderBolt INIT packet memory Free */
1156 if (sc->mfi_tb_init_busaddr != 0)
1157 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1158 if (sc->mfi_tb_init != NULL)
1159 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1160 sc->mfi_tb_init_dmamap);
1161 if (sc->mfi_tb_init_dmat != NULL)
1162 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1164 /* ThunderBolt IOC Init Desc memory free here */
1165 if (sc->mfi_tb_ioc_init_busaddr != 0)
1166 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1167 sc->mfi_tb_ioc_init_dmamap);
1168 if (sc->mfi_tb_ioc_init_desc != NULL)
1169 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1170 sc->mfi_tb_ioc_init_desc,
1171 sc->mfi_tb_ioc_init_dmamap);
1172 if (sc->mfi_tb_ioc_init_dmat != NULL)
1173 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1174 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1175 if (sc->mfi_cmd_pool_tbolt != NULL) {
1176 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1177 kfree(sc->mfi_cmd_pool_tbolt[i],
1179 sc->mfi_cmd_pool_tbolt[i] = NULL;
1183 if (sc->mfi_cmd_pool_tbolt != NULL) {
1184 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1185 sc->mfi_cmd_pool_tbolt = NULL;
1187 if (sc->request_desc_pool != NULL) {
1188 kfree(sc->request_desc_pool, M_MFIBUF);
1189 sc->request_desc_pool = NULL;
1192 if (sc->mfi_buffer_dmat != NULL)
1193 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1194 if (sc->mfi_parent_dmat != NULL)
1195 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1197 if (sc->mfi_sysctl_tree != NULL)
1198 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1200 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1201 if (mtx_initialized(&sc->mfi_io_lock))
1204 lockuninit(&sc->mfi_io_lock);
1205 lockuninit(&sc->mfi_config_lock);
1212 mfi_startup(void *arg)
1214 struct mfi_softc *sc;
1216 sc = (struct mfi_softc *)arg;
1218 config_intrhook_disestablish(&sc->mfi_ich);
1220 sc->mfi_enable_intr(sc);
1221 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1222 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1224 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1226 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1227 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1233 struct mfi_softc *sc;
1234 struct mfi_command *cm;
1235 uint32_t pi, ci, context;
1237 sc = (struct mfi_softc *)arg;
1239 if (sc->mfi_check_clear_intr(sc))
1243 pi = sc->mfi_comms->hw_pi;
1244 ci = sc->mfi_comms->hw_ci;
1245 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1247 context = sc->mfi_comms->hw_reply_q[ci];
1248 if (context < sc->mfi_max_fw_cmds) {
1249 cm = &sc->mfi_commands[context];
1250 mfi_remove_busy(cm);
1252 mfi_complete(sc, cm);
1254 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1259 sc->mfi_comms->hw_ci = ci;
1261 /* Give defered I/O a chance to run */
1262 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1263 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1265 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1268 * Dummy read to flush the bus; this ensures that the indexes are up
1269 * to date. Restart processing if more commands have come it.
1271 (void)sc->mfi_read_fw_status(sc);
1272 if (pi != sc->mfi_comms->hw_pi)
1279 mfi_shutdown(struct mfi_softc *sc)
1281 struct mfi_dcmd_frame *dcmd;
1282 struct mfi_command *cm;
1285 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1286 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1288 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1292 if (sc->mfi_aen_cm != NULL)
1293 mfi_abort(sc, sc->mfi_aen_cm);
1295 if (sc->map_update_cmd != NULL)
1296 mfi_abort(sc, sc->map_update_cmd);
1298 dcmd = &cm->cm_frame->dcmd;
1299 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1300 cm->cm_flags = MFI_CMD_POLLED;
1303 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1304 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1307 mfi_release_command(cm);
1308 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1313 mfi_syspdprobe(struct mfi_softc *sc)
1315 struct mfi_frame_header *hdr;
1316 struct mfi_command *cm = NULL;
1317 struct mfi_pd_list *pdlist = NULL;
1318 struct mfi_system_pd *syspd, *tmp;
1319 int error, i, found;
1321 mfi_lockassert(&sc->mfi_config_lock);
1322 mfi_lockassert(&sc->mfi_io_lock);
1323 /* Add SYSTEM PD's */
1324 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1325 (void **)&pdlist, sizeof(*pdlist));
1327 device_printf(sc->mfi_dev,
1328 "Error while forming SYSTEM PD list\n");
1332 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1333 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1334 cm->cm_frame->dcmd.mbox[1] = 0;
1335 if (mfi_mapcmd(sc, cm) != 0) {
1336 device_printf(sc->mfi_dev,
1337 "Failed to get syspd device listing\n");
1340 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1341 BUS_DMASYNC_POSTREAD);
1342 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1343 hdr = &cm->cm_frame->header;
1344 if (hdr->cmd_status != MFI_STAT_OK) {
1345 device_printf(sc->mfi_dev,
1346 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1349 /* Get each PD and add it to the system */
1350 for (i = 0; i < pdlist->count; i++) {
1351 if (pdlist->addr[i].device_id ==
1352 pdlist->addr[i].encl_device_id)
1355 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1356 if (syspd->pd_id == pdlist->addr[i].device_id)
1360 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1362 /* Delete SYSPD's whose state has been changed */
1363 TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1365 for (i = 0; i < pdlist->count; i++) {
1366 if (syspd->pd_id == pdlist->addr[i].device_id)
1370 kprintf("DELETE\n");
1371 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1373 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1375 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1380 kfree(pdlist, M_MFIBUF);
1382 mfi_release_command(cm);
1386 mfi_ldprobe(struct mfi_softc *sc)
1388 struct mfi_frame_header *hdr;
1389 struct mfi_command *cm = NULL;
1390 struct mfi_ld_list *list = NULL;
1391 struct mfi_disk *ld;
1394 mfi_lockassert(&sc->mfi_config_lock);
1395 mfi_lockassert(&sc->mfi_io_lock);
1397 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1398 (void **)&list, sizeof(*list));
1402 cm->cm_flags = MFI_CMD_DATAIN;
1403 if (mfi_wait_command(sc, cm) != 0) {
1404 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1408 hdr = &cm->cm_frame->header;
1409 if (hdr->cmd_status != MFI_STAT_OK) {
1410 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1415 for (i = 0; i < list->ld_count; i++) {
1416 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1417 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1420 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1425 kfree(list, M_MFIBUF);
1427 mfi_release_command(cm);
1433 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1434 * the bits in 24-31 are all set, then it is the number of seconds since
1438 format_timestamp(uint32_t timestamp)
1440 static char buffer[32];
1442 if ((timestamp & 0xff000000) == 0xff000000)
1443 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1446 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1451 format_class(int8_t class)
1453 static char buffer[6];
1456 case MFI_EVT_CLASS_DEBUG:
1458 case MFI_EVT_CLASS_PROGRESS:
1459 return ("progress");
1460 case MFI_EVT_CLASS_INFO:
1462 case MFI_EVT_CLASS_WARNING:
1464 case MFI_EVT_CLASS_CRITICAL:
1466 case MFI_EVT_CLASS_FATAL:
1468 case MFI_EVT_CLASS_DEAD:
1471 ksnprintf(buffer, sizeof(buffer), "%d", class);
1477 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1479 struct mfi_system_pd *syspd = NULL;
1481 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1482 format_timestamp(detail->time), detail->evt_class.members.locale,
1483 format_class(detail->evt_class.members.evt_class),
1484 detail->description);
1486 /* Don't act on old AEN's or while shutting down */
1487 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1490 switch (detail->arg_type) {
1491 case MR_EVT_ARGS_NONE:
1492 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1493 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1494 if (mfi_detect_jbod_change) {
1496 * Probe for new SYSPD's and Delete
1499 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1500 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1502 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1503 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1507 case MR_EVT_ARGS_LD_STATE:
1509 * During load time driver reads all the events starting
1510 * from the one that has been logged after shutdown. Avoid
1513 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1515 struct mfi_disk *ld;
1516 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1518 detail->args.ld_state.ld.target_id)
1522 Fix: for kernel panics when SSCD is removed
1523 KASSERT(ld != NULL, ("volume dissappeared"));
1527 device_delete_child(sc->mfi_dev, ld->ld_dev);
1532 case MR_EVT_ARGS_PD:
1533 if (detail->code == MR_EVT_PD_REMOVED) {
1534 if (mfi_detect_jbod_change) {
1536 * If the removed device is a SYSPD then
1539 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1542 detail->args.pd.device_id) {
1544 device_delete_child(
1553 if (detail->code == MR_EVT_PD_INSERTED) {
1554 if (mfi_detect_jbod_change) {
1555 /* Probe for new SYSPD's */
1556 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1557 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1559 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1560 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1568 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1570 struct mfi_evt_queue_elm *elm;
1572 mfi_lockassert(&sc->mfi_io_lock);
1573 elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1576 memcpy(&elm->detail, detail, sizeof(*detail));
1577 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1578 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1582 mfi_handle_evt(void *context, int pending)
1584 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1585 struct mfi_softc *sc;
1586 struct mfi_evt_queue_elm *elm;
1590 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1591 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1592 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1593 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1594 TAILQ_REMOVE(&queue, elm, link);
1595 mfi_decode_evt(sc, &elm->detail);
1596 kfree(elm, M_MFIBUF);
1601 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1603 struct mfi_command *cm;
1604 struct mfi_dcmd_frame *dcmd;
1605 union mfi_evt current_aen, prior_aen;
1606 struct mfi_evt_detail *ed = NULL;
1609 current_aen.word = locale;
1610 if (sc->mfi_aen_cm != NULL) {
1612 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1613 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1614 !((prior_aen.members.locale & current_aen.members.locale)
1615 ^current_aen.members.locale)) {
1618 prior_aen.members.locale |= current_aen.members.locale;
1619 if (prior_aen.members.evt_class
1620 < current_aen.members.evt_class)
1621 current_aen.members.evt_class =
1622 prior_aen.members.evt_class;
1623 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1624 mfi_abort(sc, sc->mfi_aen_cm);
1625 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1629 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1630 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1631 (void **)&ed, sizeof(*ed));
1632 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1637 dcmd = &cm->cm_frame->dcmd;
1638 ((uint32_t *)&dcmd->mbox)[0] = seq;
1639 ((uint32_t *)&dcmd->mbox)[1] = locale;
1640 cm->cm_flags = MFI_CMD_DATAIN;
1641 cm->cm_complete = mfi_aen_complete;
1643 sc->last_seq_num = seq;
1644 sc->mfi_aen_cm = cm;
1646 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1647 mfi_enqueue_ready(cm);
1649 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1656 mfi_aen_complete(struct mfi_command *cm)
1658 struct mfi_frame_header *hdr;
1659 struct mfi_softc *sc;
1660 struct mfi_evt_detail *detail;
1661 struct mfi_aen *mfi_aen_entry, *tmp;
1663 int seq = 0, aborted = 0;
1666 mfi_lockassert(&sc->mfi_io_lock);
1668 hdr = &cm->cm_frame->header;
1670 if (sc->mfi_aen_cm == NULL)
1673 if (sc->mfi_aen_cm->cm_aen_abort ||
1674 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1675 sc->mfi_aen_cm->cm_aen_abort = 0;
1678 sc->mfi_aen_triggered = 1;
1679 if (sc->mfi_poll_waiting) {
1680 sc->mfi_poll_waiting = 0;
1681 KNOTE(&sc->mfi_kq.ki_note, 0);
1683 detail = cm->cm_data;
1684 mfi_queue_evt(sc, detail);
1685 seq = detail->seq + 1;
1686 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1688 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1690 p = mfi_aen_entry->p;
1694 kfree(mfi_aen_entry, M_MFIBUF);
1698 kfree(cm->cm_data, M_MFIBUF);
1699 sc->mfi_aen_cm = NULL;
1700 wakeup(&sc->mfi_aen_cm);
1701 mfi_release_command(cm);
1703 /* set it up again so the driver can catch more events */
1705 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1706 mfi_aen_setup(sc, seq);
1707 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1711 #define MAX_EVENTS 15
1714 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1716 struct mfi_command *cm;
1717 struct mfi_dcmd_frame *dcmd;
1718 struct mfi_evt_list *el;
1719 union mfi_evt class_locale;
1720 int error, i, seq, size;
1722 class_locale.members.reserved = 0;
1723 class_locale.members.locale = mfi_event_locale;
1724 class_locale.members.evt_class = mfi_event_class;
1726 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1728 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1732 for (seq = start_seq;;) {
1733 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1734 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1735 kfree(el, M_MFIBUF);
1736 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1740 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1742 dcmd = &cm->cm_frame->dcmd;
1743 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1744 dcmd->header.cmd = MFI_CMD_DCMD;
1745 dcmd->header.timeout = 0;
1746 dcmd->header.data_len = size;
1747 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1748 ((uint32_t *)&dcmd->mbox)[0] = seq;
1749 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1750 cm->cm_sg = &dcmd->sgl;
1751 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1752 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1756 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1757 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1758 device_printf(sc->mfi_dev,
1759 "Failed to get controller entries\n");
1760 mfi_release_command(cm);
1761 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1765 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1766 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1767 BUS_DMASYNC_POSTREAD);
1768 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1770 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1771 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1772 mfi_release_command(cm);
1773 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1776 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1777 device_printf(sc->mfi_dev,
1778 "Error %d fetching controller entries\n",
1779 dcmd->header.cmd_status);
1780 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1781 mfi_release_command(cm);
1782 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1785 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1786 mfi_release_command(cm);
1787 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1789 for (i = 0; i < el->count; i++) {
1791 * If this event is newer than 'stop_seq' then
1792 * break out of the loop. Note that the log
1793 * is a circular buffer so we have to handle
1794 * the case that our stop point is earlier in
1795 * the buffer than our start point.
1797 if (el->event[i].seq >= stop_seq) {
1798 if (start_seq <= stop_seq)
1800 else if (el->event[i].seq < start_seq)
1803 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1804 mfi_queue_evt(sc, &el->event[i]);
1805 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1807 seq = el->event[el->count - 1].seq + 1;
1810 kfree(el, M_MFIBUF);
1815 mfi_add_ld(struct mfi_softc *sc, int id)
1817 struct mfi_command *cm;
1818 struct mfi_dcmd_frame *dcmd = NULL;
1819 struct mfi_ld_info *ld_info = NULL;
1822 mfi_lockassert(&sc->mfi_io_lock);
1824 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1825 (void **)&ld_info, sizeof(*ld_info));
1827 device_printf(sc->mfi_dev,
1828 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1830 kfree(ld_info, M_MFIBUF);
1833 cm->cm_flags = MFI_CMD_DATAIN;
1834 dcmd = &cm->cm_frame->dcmd;
1836 if (mfi_wait_command(sc, cm) != 0) {
1837 device_printf(sc->mfi_dev,
1838 "Failed to get logical drive: %d\n", id);
1839 kfree(ld_info, M_MFIBUF);
1842 if (ld_info->ld_config.params.isSSCD != 1) {
1843 mfi_add_ld_complete(cm);
1845 mfi_release_command(cm);
1846 if (ld_info) /* SSCD drives ld_info free here */
1847 kfree(ld_info, M_MFIBUF);
1853 mfi_add_ld_complete(struct mfi_command *cm)
1855 struct mfi_frame_header *hdr;
1856 struct mfi_ld_info *ld_info;
1857 struct mfi_softc *sc;
1861 hdr = &cm->cm_frame->header;
1862 ld_info = cm->cm_private;
1864 if (hdr->cmd_status != MFI_STAT_OK) {
1865 kfree(ld_info, M_MFIBUF);
1866 mfi_release_command(cm);
1869 mfi_release_command(cm);
1871 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1873 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1874 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1875 kfree(ld_info, M_MFIBUF);
1877 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1881 device_set_ivars(child, ld_info);
1882 device_set_desc(child, "MFI Logical Disk");
1883 bus_generic_attach(sc->mfi_dev);
1885 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1889 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1891 struct mfi_command *cm;
1892 struct mfi_dcmd_frame *dcmd = NULL;
1893 struct mfi_pd_info *pd_info = NULL;
1896 mfi_lockassert(&sc->mfi_io_lock);
1898 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1899 (void **)&pd_info, sizeof(*pd_info));
1901 device_printf(sc->mfi_dev,
1902 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1905 kfree(pd_info, M_MFIBUF);
1908 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1909 dcmd = &cm->cm_frame->dcmd;
1911 dcmd->header.scsi_status = 0;
1912 dcmd->header.pad0 = 0;
1913 if (mfi_mapcmd(sc, cm) != 0) {
1914 device_printf(sc->mfi_dev,
1915 "Failed to get physical drive info %d\n", id);
1916 kfree(pd_info, M_MFIBUF);
1919 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1920 BUS_DMASYNC_POSTREAD);
1921 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1922 mfi_add_sys_pd_complete(cm);
1927 mfi_add_sys_pd_complete(struct mfi_command *cm)
1929 struct mfi_frame_header *hdr;
1930 struct mfi_pd_info *pd_info;
1931 struct mfi_softc *sc;
1935 hdr = &cm->cm_frame->header;
1936 pd_info = cm->cm_private;
1938 if (hdr->cmd_status != MFI_STAT_OK) {
1939 kfree(pd_info, M_MFIBUF);
1940 mfi_release_command(cm);
1943 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1944 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1945 pd_info->ref.v.device_id);
1946 kfree(pd_info, M_MFIBUF);
1947 mfi_release_command(cm);
1950 mfi_release_command(cm);
1952 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1954 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1955 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1956 kfree(pd_info, M_MFIBUF);
1958 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1962 device_set_ivars(child, pd_info);
1963 device_set_desc(child, "MFI System PD");
1964 bus_generic_attach(sc->mfi_dev);
1966 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1969 static struct mfi_command *
1970 mfi_bio_command(struct mfi_softc *sc)
1973 struct mfi_command *cm = NULL;
1974 struct mfi_disk *mfid;
1976 /* reserving two commands to avoid starvation for IOCTL */
1977 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1979 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1981 mfid = bio->bio_driver_info;
1982 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1983 cm = mfi_build_syspdio(sc, bio);
1985 cm = mfi_build_ldio(sc, bio);
1987 mfi_enqueue_bio(sc, bio);
1991 static struct mfi_command *
1992 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1994 struct mfi_command *cm;
1996 struct mfi_system_pd *disk;
1997 struct mfi_pass_frame *pass;
1998 int flags = 0, blkcount = 0;
1999 uint32_t context = 0;
2001 if ((cm = mfi_dequeue_free(sc)) == NULL)
2004 /* Zero out the MFI frame */
2005 context = cm->cm_frame->header.context;
2006 bzero(cm->cm_frame, sizeof(union mfi_frame));
2007 cm->cm_frame->header.context = context;
2009 pass = &cm->cm_frame->pass;
2010 bzero(pass->cdb, 16);
2011 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2012 switch (bp->b_cmd & 0x03) {
2014 pass->cdb[0] = READ_10;
2015 flags = MFI_CMD_DATAIN;
2018 pass->cdb[0] = WRITE_10;
2019 flags = MFI_CMD_DATAOUT;
2022 panic("Invalid bio command");
2025 /* Cheat with the sector length to avoid a non-constant division */
2026 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2027 disk = bio->bio_driver_info;
2028 /* Fill the LBA and Transfer length in CDB */
2029 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2030 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2031 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2032 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2033 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2034 pass->cdb[8] = (blkcount & 0x00ff);
2035 pass->header.target_id = disk->pd_id;
2036 pass->header.timeout = 0;
2037 pass->header.flags = 0;
2038 pass->header.scsi_status = 0;
2039 pass->header.sense_len = MFI_SENSE_LEN;
2040 pass->header.data_len = bp->b_bcount;
2041 pass->header.cdb_len = 10;
2042 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2043 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2044 cm->cm_complete = mfi_bio_complete;
2045 cm->cm_private = bio;
2046 cm->cm_data = bp->b_data;
2047 cm->cm_len = bp->b_bcount;
2048 cm->cm_sg = &pass->sgl;
2049 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2050 cm->cm_flags = flags;
2054 static struct mfi_command *
2055 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2057 struct mfi_io_frame *io;
2059 struct mfi_disk *disk;
2060 struct mfi_command *cm;
2061 int flags, blkcount;
2062 uint32_t context = 0;
2064 if ((cm = mfi_dequeue_free(sc)) == NULL)
2067 /* Zero out the MFI frame */
2068 context = cm->cm_frame->header.context;
2069 bzero(cm->cm_frame, sizeof(union mfi_frame));
2070 cm->cm_frame->header.context = context;
2072 io = &cm->cm_frame->io;
2073 switch (bp->b_cmd & 0x03) {
2075 io->header.cmd = MFI_CMD_LD_READ;
2076 flags = MFI_CMD_DATAIN;
2079 io->header.cmd = MFI_CMD_LD_WRITE;
2080 flags = MFI_CMD_DATAOUT;
2083 panic("Invalid bio command");
2086 /* Cheat with the sector length to avoid a non-constant division */
2087 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2088 disk = bio->bio_driver_info;
2089 io->header.target_id = disk->ld_id;
2090 io->header.timeout = 0;
2091 io->header.flags = 0;
2092 io->header.scsi_status = 0;
2093 io->header.sense_len = MFI_SENSE_LEN;
2094 io->header.data_len = blkcount;
2095 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2096 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2097 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2098 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2099 cm->cm_complete = mfi_bio_complete;
2100 cm->cm_private = bio;
2101 cm->cm_data = bp->b_data;
2102 cm->cm_len = bp->b_bcount;
2103 cm->cm_sg = &io->sgl;
2104 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2105 cm->cm_flags = flags;
2110 mfi_bio_complete(struct mfi_command *cm)
2114 struct mfi_frame_header *hdr;
2115 struct mfi_softc *sc;
2117 bio = cm->cm_private;
2119 hdr = &cm->cm_frame->header;
2122 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2123 bp->b_flags |= B_ERROR;
2125 device_printf(sc->mfi_dev, "I/O error, status= %d "
2126 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2127 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2128 } else if (cm->cm_error != 0) {
2129 bp->b_flags |= B_ERROR;
2132 mfi_release_command(cm);
2133 mfi_disk_complete(bio);
2137 mfi_startio(struct mfi_softc *sc)
2139 struct mfi_command *cm;
2140 struct ccb_hdr *ccbh;
2143 /* Don't bother if we're short on resources */
2144 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2147 /* Try a command that has already been prepared */
2148 cm = mfi_dequeue_ready(sc);
2151 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2152 cm = sc->mfi_cam_start(ccbh);
2155 /* Nope, so look for work on the bioq */
2157 cm = mfi_bio_command(sc);
2159 /* No work available, so exit */
2163 /* Send the command to the controller */
2164 if (mfi_mapcmd(sc, cm) != 0) {
2165 mfi_requeue_ready(cm);
2172 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2176 mfi_lockassert(&sc->mfi_io_lock);
2178 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2179 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2180 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2181 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2182 if (error == EINPROGRESS) {
2183 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2187 if (sc->MFA_enabled)
2188 error = mfi_tbolt_send_frame(sc, cm);
2190 error = mfi_send_frame(sc, cm);
2197 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2199 struct mfi_frame_header *hdr;
2200 struct mfi_command *cm;
2202 struct mfi_softc *sc;
2203 int i, j, first, dir;
2205 cm = (struct mfi_command *)arg;
2207 hdr = &cm->cm_frame->header;
2211 kprintf("error %d in callback\n", error);
2212 cm->cm_error = error;
2213 mfi_complete(sc, cm);
2217 /* Use IEEE sgl only for IO's on a SKINNY controller
2218 * For other commands on a SKINNY controller use either
2219 * sg32 or sg64 based on the sizeof(bus_addr_t).
2220 * Also calculate the total frame size based on the type
2223 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2224 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2225 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2226 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2227 for (i = 0; i < nsegs; i++) {
2228 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2229 sgl->sg_skinny[i].len = segs[i].ds_len;
2230 sgl->sg_skinny[i].flag = 0;
2232 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2233 hdr->sg_count = nsegs;
2236 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2237 first = cm->cm_stp_len;
2238 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2239 sgl->sg32[j].addr = segs[0].ds_addr;
2240 sgl->sg32[j++].len = first;
2242 sgl->sg64[j].addr = segs[0].ds_addr;
2243 sgl->sg64[j++].len = first;
2247 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2248 for (i = 0; i < nsegs; i++) {
2249 sgl->sg32[j].addr = segs[i].ds_addr + first;
2250 sgl->sg32[j++].len = segs[i].ds_len - first;
2254 for (i = 0; i < nsegs; i++) {
2255 sgl->sg64[j].addr = segs[i].ds_addr + first;
2256 sgl->sg64[j++].len = segs[i].ds_len - first;
2259 hdr->flags |= MFI_FRAME_SGL64;
2265 if (cm->cm_flags & MFI_CMD_DATAIN) {
2266 dir |= BUS_DMASYNC_PREREAD;
2267 hdr->flags |= MFI_FRAME_DIR_READ;
2269 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2270 dir |= BUS_DMASYNC_PREWRITE;
2271 hdr->flags |= MFI_FRAME_DIR_WRITE;
2273 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2274 cm->cm_flags |= MFI_CMD_MAPPED;
2277 * Instead of calculating the total number of frames in the
2278 * compound frame, it's already assumed that there will be at
2279 * least 1 frame, so don't compensate for the modulo of the
2280 * following division.
2282 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2283 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2285 if (sc->MFA_enabled)
2286 mfi_tbolt_send_frame(sc, cm);
2288 mfi_send_frame(sc, cm);
2292 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2294 struct mfi_frame_header *hdr;
2295 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2297 hdr = &cm->cm_frame->header;
2299 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2300 cm->cm_timestamp = time_uptime;
2301 mfi_enqueue_busy(cm);
2303 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2304 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2308 * The bus address of the command is aligned on a 64 byte boundary,
2309 * leaving the least 6 bits as zero. For whatever reason, the
2310 * hardware wants the address shifted right by three, leaving just
2311 * 3 zero bits. These three bits are then used as a prefetching
2312 * hint for the hardware to predict how many frames need to be
2313 * fetched across the bus. If a command has more than 8 frames
2314 * then the 3 bits are set to 0x7 and the firmware uses other
2315 * information in the command to determine the total amount to fetch.
2316 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2317 * is enough for both 32bit and 64bit systems.
2319 if (cm->cm_extra_frames > 7)
2320 cm->cm_extra_frames = 7;
2322 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2324 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2327 /* This is a polled command, so busy-wait for it to complete. */
2328 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2335 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2336 device_printf(sc->mfi_dev, "Frame %p timed out "
2337 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2345 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2349 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2351 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2352 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2353 dir |= BUS_DMASYNC_POSTREAD;
2354 if (cm->cm_flags & MFI_CMD_DATAOUT)
2355 dir |= BUS_DMASYNC_POSTWRITE;
2357 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2358 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2359 cm->cm_flags &= ~MFI_CMD_MAPPED;
2362 cm->cm_flags |= MFI_CMD_COMPLETED;
2364 if (cm->cm_complete != NULL)
2365 cm->cm_complete(cm);
2371 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2373 struct mfi_command *cm;
2374 struct mfi_abort_frame *abort;
2376 uint32_t context = 0;
2378 mfi_lockassert(&sc->mfi_io_lock);
2380 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2384 /* Zero out the MFI frame */
2385 context = cm->cm_frame->header.context;
2386 bzero(cm->cm_frame, sizeof(union mfi_frame));
2387 cm->cm_frame->header.context = context;
2389 abort = &cm->cm_frame->abort;
2390 abort->header.cmd = MFI_CMD_ABORT;
2391 abort->header.flags = 0;
2392 abort->header.scsi_status = 0;
2393 abort->abort_context = cm_abort->cm_frame->header.context;
2394 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2395 abort->abort_mfi_addr_hi =
2396 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2398 cm->cm_flags = MFI_CMD_POLLED;
2401 sc->mfi_aen_cm->cm_aen_abort = 1;
2403 mfi_release_command(cm);
2405 while (i < 5 && sc->mfi_aen_cm != NULL) {
2406 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2415 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2418 struct mfi_command *cm;
2419 struct mfi_io_frame *io;
2421 uint32_t context = 0;
2423 if ((cm = mfi_dequeue_free(sc)) == NULL)
2426 /* Zero out the MFI frame */
2427 context = cm->cm_frame->header.context;
2428 bzero(cm->cm_frame, sizeof(union mfi_frame));
2429 cm->cm_frame->header.context = context;
2431 io = &cm->cm_frame->io;
2432 io->header.cmd = MFI_CMD_LD_WRITE;
2433 io->header.target_id = id;
2434 io->header.timeout = 0;
2435 io->header.flags = 0;
2436 io->header.scsi_status = 0;
2437 io->header.sense_len = MFI_SENSE_LEN;
2438 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2439 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2440 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2441 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2442 io->lba_lo = lba & 0xffffffff;
2445 cm->cm_sg = &io->sgl;
2446 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2447 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2449 error = mfi_mapcmd(sc, cm);
2450 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2451 BUS_DMASYNC_POSTWRITE);
2452 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2453 mfi_release_command(cm);
2459 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2462 struct mfi_command *cm;
2463 struct mfi_pass_frame *pass;
2467 if ((cm = mfi_dequeue_free(sc)) == NULL)
2470 pass = &cm->cm_frame->pass;
2471 bzero(pass->cdb, 16);
2472 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2473 pass->cdb[0] = WRITE_10;
2474 pass->cdb[2] = (lba & 0xff000000) >> 24;
2475 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2476 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2477 pass->cdb[5] = (lba & 0x000000ff);
2478 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2479 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2480 pass->cdb[8] = (blkcount & 0x00ff);
2481 pass->header.target_id = id;
2482 pass->header.timeout = 0;
2483 pass->header.flags = 0;
2484 pass->header.scsi_status = 0;
2485 pass->header.sense_len = MFI_SENSE_LEN;
2486 pass->header.data_len = len;
2487 pass->header.cdb_len = 10;
2488 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2489 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2492 cm->cm_sg = &pass->sgl;
2493 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2494 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2496 error = mfi_mapcmd(sc, cm);
2497 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2498 BUS_DMASYNC_POSTWRITE);
2499 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2500 mfi_release_command(cm);
2506 mfi_open(struct dev_open_args *ap)
2508 cdev_t dev = ap->a_head.a_dev;
2509 struct mfi_softc *sc;
2514 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2515 if (sc->mfi_detaching)
2518 sc->mfi_flags |= MFI_FLAGS_OPEN;
2521 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2527 mfi_close(struct dev_close_args *ap)
2529 cdev_t dev = ap->a_head.a_dev;
2530 struct mfi_softc *sc;
2531 struct mfi_aen *mfi_aen_entry, *tmp;
2535 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2536 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2538 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2539 if (mfi_aen_entry->p == curproc) {
2540 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2542 kfree(mfi_aen_entry, M_MFIBUF);
2545 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2550 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2554 case MFI_DCMD_LD_DELETE:
2555 case MFI_DCMD_CFG_ADD:
2556 case MFI_DCMD_CFG_CLEAR:
2557 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2558 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2566 mfi_config_unlock(struct mfi_softc *sc, int locked)
2570 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2574 * Perform pre-issue checks on commands from userland and possibly veto
2578 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2580 struct mfi_disk *ld, *ld2;
2582 struct mfi_system_pd *syspd = NULL;
2586 mfi_lockassert(&sc->mfi_io_lock);
2588 switch (cm->cm_frame->dcmd.opcode) {
2589 case MFI_DCMD_LD_DELETE:
2590 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2591 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2597 error = mfi_disk_disable(ld);
2599 case MFI_DCMD_CFG_CLEAR:
2600 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2601 error = mfi_disk_disable(ld);
2606 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2609 mfi_disk_enable(ld2);
2613 case MFI_DCMD_PD_STATE_SET:
2614 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2616 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2617 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2618 if (syspd->pd_id == syspd_id)
2625 error = mfi_syspd_disable(syspd);
2633 /* Perform post-issue checks on commands from userland. */
2635 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2637 struct mfi_disk *ld, *ldn;
2638 struct mfi_system_pd *syspd = NULL;
2642 switch (cm->cm_frame->dcmd.opcode) {
2643 case MFI_DCMD_LD_DELETE:
2644 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2645 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2648 KASSERT(ld != NULL, ("volume dissappeared"));
2649 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2650 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2652 device_delete_child(sc->mfi_dev, ld->ld_dev);
2654 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2656 mfi_disk_enable(ld);
2658 case MFI_DCMD_CFG_CLEAR:
2659 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2660 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2662 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2663 device_delete_child(sc->mfi_dev, ld->ld_dev);
2666 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2668 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2669 mfi_disk_enable(ld);
2672 case MFI_DCMD_CFG_ADD:
2673 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2674 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2677 case MFI_DCMD_PD_STATE_SET:
2678 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2680 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2681 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2682 if (syspd->pd_id == syspd_id)
2688 /* If the transition fails then enable the syspd again */
2689 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2690 mfi_syspd_enable(syspd);
2696 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2698 struct mfi_config_data *conf_data = cm->cm_data;
2699 struct mfi_command *ld_cm = NULL;
2700 struct mfi_ld_info *ld_info = NULL;
2703 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2704 (conf_data->ld[0].params.isSSCD == 1)) {
2706 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2707 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2708 (void **)&ld_info, sizeof(*ld_info));
2710 device_printf(sc->mfi_dev, "Failed to allocate"
2711 "MFI_DCMD_LD_GET_INFO %d", error);
2713 kfree(ld_info, M_MFIBUF);
2716 ld_cm->cm_flags = MFI_CMD_DATAIN;
2717 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2718 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2719 if (mfi_wait_command(sc, ld_cm) != 0) {
2720 device_printf(sc->mfi_dev, "failed to get log drv\n");
2721 mfi_release_command(ld_cm);
2722 kfree(ld_info, M_MFIBUF);
2726 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2727 kfree(ld_info, M_MFIBUF);
2728 mfi_release_command(ld_cm);
2731 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2734 if (ld_info->ld_config.params.isSSCD == 1)
2737 mfi_release_command(ld_cm);
2738 kfree(ld_info, M_MFIBUF);
2744 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2747 struct mfi_ioc_packet *ioc;
2748 ioc = (struct mfi_ioc_packet *)arg;
2749 int sge_size, error;
2750 struct megasas_sge *kern_sge;
2752 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2753 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2754 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2756 if (sizeof(bus_addr_t) == 8) {
2757 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2758 cm->cm_extra_frames = 2;
2759 sge_size = sizeof(struct mfi_sg64);
2761 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2762 sge_size = sizeof(struct mfi_sg32);
2765 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2766 for (i = 0; i < ioc->mfi_sge_count; i++) {
2767 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2768 1, 0, /* algnmnt, boundary */
2769 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2770 BUS_SPACE_MAXADDR, /* highaddr */
2771 NULL, NULL, /* filter, filterarg */
2772 ioc->mfi_sgl[i].iov_len,/* maxsize */
2774 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2775 BUS_DMA_ALLOCNOW, /* flags */
2776 &sc->mfi_kbuff_arr_dmat[i])) {
2777 device_printf(sc->mfi_dev,
2778 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2782 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2783 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2784 &sc->mfi_kbuff_arr_dmamap[i])) {
2785 device_printf(sc->mfi_dev,
2786 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2790 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2791 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2792 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2793 &sc->mfi_kbuff_arr_busaddr[i], 0);
2795 if (!sc->kbuff_arr[i]) {
2796 device_printf(sc->mfi_dev,
2797 "Could not allocate memory for kbuff_arr info\n");
2800 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2801 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2803 if (sizeof(bus_addr_t) == 8) {
2804 cm->cm_frame->stp.sgl.sg64[i].addr =
2805 kern_sge[i].phys_addr;
2806 cm->cm_frame->stp.sgl.sg64[i].len =
2807 ioc->mfi_sgl[i].iov_len;
2809 cm->cm_frame->stp.sgl.sg32[i].addr =
2810 kern_sge[i].phys_addr;
2811 cm->cm_frame->stp.sgl.sg32[i].len =
2812 ioc->mfi_sgl[i].iov_len;
2815 error = copyin(ioc->mfi_sgl[i].iov_base,
2817 ioc->mfi_sgl[i].iov_len);
2819 device_printf(sc->mfi_dev, "Copy in failed\n");
2824 cm->cm_flags |=MFI_CMD_MAPPED;
2829 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2831 struct mfi_command *cm;
2832 struct mfi_dcmd_frame *dcmd;
2833 void *ioc_buf = NULL;
2835 int error = 0, locked;
2838 if (ioc->buf_size > 0) {
2839 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2840 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2842 device_printf(sc->mfi_dev, "failed to copyin\n");
2843 kfree(ioc_buf, M_MFIBUF);
2848 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2850 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2851 while ((cm = mfi_dequeue_free(sc)) == NULL)
2852 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2854 /* Save context for later */
2855 context = cm->cm_frame->header.context;
2857 dcmd = &cm->cm_frame->dcmd;
2858 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2860 cm->cm_sg = &dcmd->sgl;
2861 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2862 cm->cm_data = ioc_buf;
2863 cm->cm_len = ioc->buf_size;
2865 /* restore context */
2866 cm->cm_frame->header.context = context;
2868 /* Cheat since we don't know if we're writing or reading */
2869 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2871 error = mfi_check_command_pre(sc, cm);
2875 error = mfi_wait_command(sc, cm);
2877 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2880 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2881 mfi_check_command_post(sc, cm);
2883 mfi_release_command(cm);
2884 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2885 mfi_config_unlock(sc, locked);
2886 if (ioc->buf_size > 0)
2887 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2889 kfree(ioc_buf, M_MFIBUF);
2893 #define PTRIN(p) ((void *)(uintptr_t)(p))
2896 mfi_ioctl(struct dev_ioctl_args *ap)
2898 cdev_t dev = ap->a_head.a_dev;
2899 u_long cmd = ap->a_cmd;
2900 int flag = ap->a_fflag;
2901 caddr_t arg = ap->a_data;
2902 struct mfi_softc *sc;
2903 union mfi_statrequest *ms;
2904 struct mfi_ioc_packet *ioc;
2905 struct mfi_ioc_aen *aen;
2906 struct mfi_command *cm = NULL;
2908 union mfi_sense_ptr sense_ptr;
2909 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2912 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2921 if (sc->hw_crit_error)
2924 if (sc->issuepend_done == 0)
2929 ms = (union mfi_statrequest *)arg;
2930 switch (ms->ms_item) {
2935 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2936 sizeof(struct mfi_qstat));
2943 case MFIIO_QUERY_DISK:
2945 struct mfi_query_disk *qd;
2946 struct mfi_disk *ld;
2948 qd = (struct mfi_query_disk *)arg;
2949 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2950 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2951 if (ld->ld_id == qd->array_id)
2956 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2960 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2962 bzero(qd->devname, SPECNAMELEN + 1);
2963 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2964 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2969 devclass_t devclass;
2970 ioc = (struct mfi_ioc_packet *)arg;
2973 adapter = ioc->mfi_adapter_no;
2974 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2975 devclass = devclass_find("mfi");
2976 sc = devclass_get_softc(devclass, adapter);
2978 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2979 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2980 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2983 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2987 * save off original context since copying from user
2988 * will clobber some data
2990 context = cm->cm_frame->header.context;
2991 cm->cm_frame->header.context = cm->cm_index;
2993 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2994 2 * MEGAMFI_FRAME_SIZE);
2995 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2996 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2997 cm->cm_frame->header.scsi_status = 0;
2998 cm->cm_frame->header.pad0 = 0;
2999 if (ioc->mfi_sge_count) {
3001 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3004 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3005 cm->cm_flags |= MFI_CMD_DATAIN;
3006 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3007 cm->cm_flags |= MFI_CMD_DATAOUT;
3008 /* Legacy app shim */
3009 if (cm->cm_flags == 0)
3010 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3011 cm->cm_len = cm->cm_frame->header.data_len;
3012 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3013 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3014 cm->cm_len += cm->cm_stp_len;
3017 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3018 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3024 /* restore header context */
3025 cm->cm_frame->header.context = context;
3027 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3028 res = mfi_stp_cmd(sc, cm, arg);
3033 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3034 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3035 for (i = 0; i < ioc->mfi_sge_count; i++) {
3036 addr = ioc->mfi_sgl[i].iov_base;
3037 len = ioc->mfi_sgl[i].iov_len;
3038 error = copyin(addr, temp, len);
3040 device_printf(sc->mfi_dev,
3041 "Copy in failed\n");
3049 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3050 locked = mfi_config_lock(sc,
3051 cm->cm_frame->dcmd.opcode);
3053 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3054 cm->cm_frame->pass.sense_addr_lo =
3055 (uint32_t)cm->cm_sense_busaddr;
3056 cm->cm_frame->pass.sense_addr_hi =
3057 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3059 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3060 skip_pre_post = mfi_check_for_sscd(sc, cm);
3061 if (!skip_pre_post) {
3062 error = mfi_check_command_pre(sc, cm);
3064 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3069 if ((error = mfi_wait_command(sc, cm)) != 0) {
3070 device_printf(sc->mfi_dev,
3071 "Controller polled failed\n");
3072 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3077 mfi_check_command_post(sc, cm);
3078 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3080 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3082 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3083 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3084 for (i = 0; i < ioc->mfi_sge_count; i++) {
3085 addr = ioc->mfi_sgl[i].iov_base;
3086 len = ioc->mfi_sgl[i].iov_len;
3087 error = copyout(temp, addr, len);
3089 device_printf(sc->mfi_dev,
3090 "Copy out failed\n");
3098 if (ioc->mfi_sense_len) {
3099 /* get user-space sense ptr then copy out sense */
3100 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3101 &sense_ptr.sense_ptr_data[0],
3102 sizeof(sense_ptr.sense_ptr_data));
3103 error = copyout(cm->cm_sense, sense_ptr.user_space,
3104 ioc->mfi_sense_len);
3106 device_printf(sc->mfi_dev,
3107 "Copy out failed\n");
3112 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3114 mfi_config_unlock(sc, locked);
3116 kfree(data, M_MFIBUF);
3117 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3118 for (i = 0; i < 2; i++) {
3119 if (sc->kbuff_arr[i]) {
3120 if (sc->mfi_kbuff_arr_busaddr != 0)
3122 sc->mfi_kbuff_arr_dmat[i],
3123 sc->mfi_kbuff_arr_dmamap[i]
3125 if (sc->kbuff_arr[i] != NULL)
3127 sc->mfi_kbuff_arr_dmat[i],
3129 sc->mfi_kbuff_arr_dmamap[i]
3131 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3132 bus_dma_tag_destroy(
3133 sc->mfi_kbuff_arr_dmat[i]);
3138 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3139 mfi_release_command(cm);
3140 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3146 aen = (struct mfi_ioc_aen *)arg;
3147 error = mfi_aen_register(sc, aen->aen_seq_num,
3148 aen->aen_class_locale);
3151 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3153 devclass_t devclass;
3154 struct mfi_linux_ioc_packet l_ioc;
3157 devclass = devclass_find("mfi");
3158 if (devclass == NULL)
3161 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3164 adapter = l_ioc.lioc_adapter_no;
3165 sc = devclass_get_softc(devclass, adapter);
3168 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3172 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3174 devclass_t devclass;
3175 struct mfi_linux_ioc_aen l_aen;
3178 devclass = devclass_find("mfi");
3179 if (devclass == NULL)
3182 error = copyin(arg, &l_aen, sizeof(l_aen));
3185 adapter = l_aen.laen_adapter_no;
3186 sc = devclass_get_softc(devclass, adapter);
3189 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3193 case MFIIO_PASSTHRU:
3194 error = mfi_user_command(sc, iop);
3197 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3206 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3208 struct mfi_softc *sc;
3209 struct mfi_linux_ioc_packet l_ioc;
3210 struct mfi_linux_ioc_aen l_aen;
3211 struct mfi_command *cm = NULL;
3212 struct mfi_aen *mfi_aen_entry;
3213 union mfi_sense_ptr sense_ptr;
3215 uint8_t *data = NULL, *temp;
3222 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3223 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3227 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3231 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3232 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3233 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3236 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3240 * save off original context since copying from user
3241 * will clobber some data
3243 context = cm->cm_frame->header.context;
3245 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3246 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3247 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3248 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3249 cm->cm_frame->header.scsi_status = 0;
3250 cm->cm_frame->header.pad0 = 0;
3251 if (l_ioc.lioc_sge_count)
3253 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3255 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3256 cm->cm_flags |= MFI_CMD_DATAIN;
3257 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3258 cm->cm_flags |= MFI_CMD_DATAOUT;
3259 cm->cm_len = cm->cm_frame->header.data_len;
3261 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3262 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3268 /* restore header context */
3269 cm->cm_frame->header.context = context;
3272 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3273 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3274 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3276 l_ioc.lioc_sgl[i].iov_len);
3278 device_printf(sc->mfi_dev,
3279 "Copy in failed\n");
3282 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3286 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3287 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3289 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3290 cm->cm_frame->pass.sense_addr_lo =
3291 (uint32_t)cm->cm_sense_busaddr;
3292 cm->cm_frame->pass.sense_addr_hi =
3293 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3296 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3297 error = mfi_check_command_pre(sc, cm);
3299 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3303 if ((error = mfi_wait_command(sc, cm)) != 0) {
3304 device_printf(sc->mfi_dev,
3305 "Controller polled failed\n");
3306 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3310 mfi_check_command_post(sc, cm);
3311 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3314 if (cm->cm_flags & MFI_CMD_DATAIN) {
3315 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3316 error = copyout(temp,
3317 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3318 l_ioc.lioc_sgl[i].iov_len);
3320 device_printf(sc->mfi_dev,
3321 "Copy out failed\n");
3324 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3328 if (l_ioc.lioc_sense_len) {
3329 /* get user-space sense ptr then copy out sense */
3330 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3331 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3332 &sense_ptr.sense_ptr_data[0],
3333 sizeof(sense_ptr.sense_ptr_data));
3336 * only 32bit Linux support so zero out any
3337 * address over 32bit
3339 sense_ptr.addr.high = 0;
3341 error = copyout(cm->cm_sense, sense_ptr.user_space,
3342 l_ioc.lioc_sense_len);
3344 device_printf(sc->mfi_dev,
3345 "Copy out failed\n");
3350 error = copyout(&cm->cm_frame->header.cmd_status,
3351 &((struct mfi_linux_ioc_packet*)arg)
3352 ->lioc_frame.hdr.cmd_status,
3355 device_printf(sc->mfi_dev,
3356 "Copy out failed\n");
3361 mfi_config_unlock(sc, locked);
3363 kfree(data, M_MFIBUF);
3365 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3366 mfi_release_command(cm);
3367 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3371 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3372 error = copyin(arg, &l_aen, sizeof(l_aen));
3375 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3376 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3378 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3379 if (mfi_aen_entry != NULL) {
3380 mfi_aen_entry->p = curproc;
3381 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3384 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3385 l_aen.laen_class_locale);
3388 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3390 kfree(mfi_aen_entry, M_MFIBUF);
3392 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3396 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3405 mfi_kqfilter(struct dev_kqfilter_args *ap)
3407 cdev_t dev = ap->a_head.a_dev;
3408 struct knote *kn = ap->a_kn;
3409 struct mfi_softc *sc;
3410 struct klist *klist;
3415 switch (kn->kn_filter) {
3417 kn->kn_fop = &mfi_read_filterops;
3418 kn->kn_hook = (caddr_t)sc;
3421 kn->kn_fop = &mfi_write_filterops;
3422 kn->kn_hook = (caddr_t)sc;
3425 ap->a_result = EOPNOTSUPP;
3429 klist = &sc->mfi_kq.ki_note;
3430 knote_insert(klist, kn);
3436 mfi_filter_detach(struct knote *kn)
3438 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3439 struct klist *klist = &sc->mfi_kq.ki_note;
3441 knote_remove(klist, kn);
3445 mfi_filter_read(struct knote *kn, long hint)
3447 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3450 if (sc->mfi_aen_triggered != 0) {
3452 sc->mfi_aen_triggered = 0;
3454 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3455 kn->kn_flags |= EV_ERROR;
3458 sc->mfi_poll_waiting = 1;
3464 mfi_filter_write(struct knote *kn, long hint)
3472 struct mfi_softc *sc;
3473 struct mfi_command *cm;
3479 dc = devclass_find("mfi");
3481 kprintf("No mfi dev class\n");
3485 for (i = 0; ; i++) {
3486 sc = devclass_get_softc(dc, i);
3489 device_printf(sc->mfi_dev, "Dumping\n\n");
3491 deadline = time_uptime - mfi_cmd_timeout;
3492 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3493 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3494 if (cm->cm_timestamp < deadline) {
3495 device_printf(sc->mfi_dev,
3496 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3497 cm, (int)(time_uptime - cm->cm_timestamp));
3508 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3515 mfi_timeout(void *data)
3517 struct mfi_softc *sc = (struct mfi_softc *)data;
3518 struct mfi_command *cm;
3522 deadline = time_uptime - mfi_cmd_timeout;
3523 if (sc->adpreset == 0) {
3524 if (!mfi_tbolt_reset(sc)) {
3525 callout_reset(&sc->mfi_watchdog_callout,
3526 mfi_cmd_timeout * hz, mfi_timeout, sc);
3530 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3531 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3532 if (sc->mfi_aen_cm == cm)
3534 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3535 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3536 cm->cm_timestamp = time_uptime;
3538 device_printf(sc->mfi_dev,
3539 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3540 cm, (int)(time_uptime - cm->cm_timestamp));
3542 MFI_VALIDATE_CMD(sc, cm);
3553 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3555 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,