2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53 * FreeBSD projects/head_mfi/ r233016
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
64 #include <sys/eventhandler.h>
66 #include <sys/bus_dma.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
75 #include <bus/cam/scsi/scsi_all.h>
77 #include <bus/pci/pcivar.h>
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
83 static int mfi_alloc_commands(struct mfi_softc *);
84 static int mfi_comms_init(struct mfi_softc *);
85 static int mfi_get_controller_info(struct mfi_softc *);
86 static int mfi_get_log_state(struct mfi_softc *,
87 struct mfi_evt_log_state **);
88 static int mfi_parse_entries(struct mfi_softc *, int, int);
89 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 uint32_t, void **, size_t);
91 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void mfi_startup(void *arg);
93 static void mfi_intr(void *arg);
94 static void mfi_ldprobe(struct mfi_softc *sc);
95 static void mfi_syspdprobe(struct mfi_softc *sc);
96 static void mfi_handle_evt(void *context, int pending);
97 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void mfi_aen_complete(struct mfi_command *);
99 static int mfi_add_ld(struct mfi_softc *sc, int);
100 static void mfi_add_ld_complete(struct mfi_command *);
101 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void mfi_timeout(void *);
111 static int mfi_user_command(struct mfi_softc *,
112 struct mfi_ioc_passthru *);
113 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_filter_detach(struct knote *);
130 static int mfi_filter_read(struct knote *, long);
131 static int mfi_filter_write(struct knote *, long);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_kqfilter_t mfi_kqfilter;
165 static struct dev_ops mfi_ops = {
166 { "mfi", 0, D_MPSAFE },
168 .d_close = mfi_close,
169 .d_ioctl = mfi_ioctl,
170 .d_kqfilter = mfi_kqfilter,
173 static struct filterops mfi_read_filterops =
174 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
186 MFI_WRITE4(sc, MFI_OMSK, 0x01);
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
192 if (sc->mfi_flags & MFI_FLAGS_1078) {
193 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
201 panic("unknown adapter type");
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
208 return MFI_READ4(sc, MFI_OMSG0);
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
214 return MFI_READ4(sc, MFI_OSP0);
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
222 status = MFI_READ4(sc, MFI_OSTS);
223 if ((status & MFI_OSTS_INTR_VALID) == 0)
226 MFI_WRITE4(sc, MFI_OSTS, status);
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
235 status = MFI_READ4(sc, MFI_OSTS);
236 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
241 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 MFI_WRITE4(sc, MFI_OSTS, status);
244 MFI_WRITE4(sc, MFI_ODCR0, status);
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
266 mfi_transition_firmware(struct mfi_softc *sc)
268 uint32_t fw_state, cur_state;
270 uint32_t cur_abs_reg_val = 0;
271 uint32_t prev_abs_reg_val = 0;
273 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 while (fw_state != MFI_FWSTATE_READY) {
277 device_printf(sc->mfi_dev, "Waiting for firmware to "
279 cur_state = fw_state;
281 case MFI_FWSTATE_FAULT:
282 device_printf(sc->mfi_dev, "Firmware fault\n");
284 case MFI_FWSTATE_WAIT_HANDSHAKE:
285 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 max_wait = MFI_RESET_WAIT_TIME;
291 case MFI_FWSTATE_OPERATIONAL:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_UNDEFINED:
299 case MFI_FWSTATE_BB_INIT:
300 max_wait = MFI_RESET_WAIT_TIME;
302 case MFI_FWSTATE_FW_INIT_2:
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_FW_INIT:
306 case MFI_FWSTATE_FLUSH_CACHE:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_DEVICE_SCAN:
310 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 prev_abs_reg_val = cur_abs_reg_val;
313 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 max_wait = MFI_RESET_WAIT_TIME;
321 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
325 for (i = 0; i < (max_wait * 10); i++) {
326 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 if (fw_state == cur_state)
333 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 /* Check the device scanning progress */
335 if (prev_abs_reg_val != cur_abs_reg_val)
338 if (fw_state == cur_state) {
339 device_printf(sc->mfi_dev, "Firmware stuck in state "
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
353 *addr = segs[0].ds_addr;
357 mfi_attach(struct mfi_softc *sc)
360 int error, commsz, framessz, sensesz;
361 int frames, unit, max_fw_sge;
362 uint32_t tb_mem_size = 0;
367 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
370 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 TAILQ_INIT(&sc->mfi_ld_tqh);
373 TAILQ_INIT(&sc->mfi_syspd_tqh);
374 TAILQ_INIT(&sc->mfi_evt_queue);
375 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 TAILQ_INIT(&sc->mfi_aen_pids);
377 TAILQ_INIT(&sc->mfi_cam_ccbq);
385 sc->last_seq_num = 0;
386 sc->disableOnlineCtrlReset = 1;
387 sc->issuepend_done = 1;
388 sc->hw_crit_error = 0;
390 if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
403 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
405 sc->mfi_enable_intr = mfi_enable_intr_ppc;
406 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
412 /* Before we get too far, see if the firmware is working */
413 if ((error = mfi_transition_firmware(sc)) != 0) {
414 device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 "error %d\n", error);
419 /* Start: LSIP200113393 */
420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
421 1, 0, /* algnmnt, boundary */
422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 NULL, NULL, /* filter, filterarg */
425 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
427 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
429 &sc->verbuf_h_dmat)) {
430 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
433 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
434 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
435 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
438 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
439 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
440 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
441 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
442 /* End: LSIP200113393 */
445 * Get information needed for sizing the contiguous memory for the
446 * frame pool. Size down the sgl parameter since we know that
447 * we will never need more than what's required for MAXPHYS.
448 * It would be nice if these constants were available at runtime
449 * instead of compile time.
451 status = sc->mfi_read_fw_status(sc);
452 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
453 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
454 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
456 /* ThunderBolt Support get the contiguous memory */
458 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
459 mfi_tbolt_init_globals(sc);
460 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
461 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
462 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
464 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
465 1, 0, /* algnmnt, boundary */
466 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filter, filterarg */
469 tb_mem_size, /* maxsize */
471 tb_mem_size, /* maxsegsize */
474 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
477 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
478 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
479 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
482 bzero(sc->request_message_pool, tb_mem_size);
483 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
484 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
486 /* For ThunderBolt memory init */
487 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
488 0x100, 0, /* alignmnt, boundary */
489 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 BUS_SPACE_MAXADDR, /* highaddr */
491 NULL, NULL, /* filter, filterarg */
492 MFI_FRAME_SIZE, /* maxsize */
494 MFI_FRAME_SIZE, /* maxsegsize */
496 &sc->mfi_tb_init_dmat)) {
497 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
500 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
501 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
502 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
505 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
506 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
507 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
508 &sc->mfi_tb_init_busaddr, 0);
509 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
511 device_printf(sc->mfi_dev,
512 "Thunderbolt pool preparation error\n");
517 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
518 we are taking it diffrent from what we have allocated for Request
519 and reply descriptors to avoid confusion later
521 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
522 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
523 1, 0, /* algnmnt, boundary */
524 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
525 BUS_SPACE_MAXADDR, /* highaddr */
526 NULL, NULL, /* filter, filterarg */
527 tb_mem_size, /* maxsize */
529 tb_mem_size, /* maxsegsize */
531 &sc->mfi_tb_ioc_init_dmat)) {
532 device_printf(sc->mfi_dev,
533 "Cannot allocate comms DMA tag\n");
536 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
537 (void **)&sc->mfi_tb_ioc_init_desc,
538 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
539 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
542 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
543 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
544 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
545 &sc->mfi_tb_ioc_init_busaddr, 0);
548 * Create the dma tag for data buffers. Used both for block I/O
549 * and for various internal data queries.
551 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
552 1, 0, /* algnmnt, boundary */
553 BUS_SPACE_MAXADDR, /* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
557 sc->mfi_max_sge, /* nsegments */
558 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
559 BUS_DMA_ALLOCNOW, /* flags */
560 &sc->mfi_buffer_dmat)) {
561 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
566 * Allocate DMA memory for the comms queues. Keep it under 4GB for
567 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
568 * entry, so the calculated size here will be will be 1 more than
569 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
571 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
572 sizeof(struct mfi_hwcomms);
573 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
574 1, 0, /* algnmnt, boundary */
575 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 BUS_SPACE_MAXADDR, /* highaddr */
577 NULL, NULL, /* filter, filterarg */
578 commsz, /* maxsize */
580 commsz, /* maxsegsize */
582 &sc->mfi_comms_dmat)) {
583 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
586 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
587 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
588 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
591 bzero(sc->mfi_comms, commsz);
592 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
593 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
595 * Allocate DMA memory for the command frames. Keep them in the
596 * lower 4GB for efficiency. Calculate the size of the commands at
597 * the same time; each command is one 64 byte frame plus a set of
598 * additional frames for holding sg lists or other data.
599 * The assumption here is that the SG list will start at the second
600 * frame and not use the unused bytes in the first frame. While this
601 * isn't technically correct, it simplifies the calculation and allows
602 * for command frames that might be larger than an mfi_io_frame.
604 if (sizeof(bus_addr_t) == 8) {
605 sc->mfi_sge_size = sizeof(struct mfi_sg64);
606 sc->mfi_flags |= MFI_FLAGS_SG64;
608 sc->mfi_sge_size = sizeof(struct mfi_sg32);
610 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
611 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
612 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
613 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
614 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
615 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
616 64, 0, /* algnmnt, boundary */
617 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
618 BUS_SPACE_MAXADDR, /* highaddr */
619 NULL, NULL, /* filter, filterarg */
620 framessz, /* maxsize */
622 framessz, /* maxsegsize */
624 &sc->mfi_frames_dmat)) {
625 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
628 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
629 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
630 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
633 bzero(sc->mfi_frames, framessz);
634 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
635 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
637 * Allocate DMA memory for the frame sense data. Keep them in the
638 * lower 4GB for efficiency
640 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
641 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
642 4, 0, /* algnmnt, boundary */
643 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
644 BUS_SPACE_MAXADDR, /* highaddr */
645 NULL, NULL, /* filter, filterarg */
646 sensesz, /* maxsize */
648 sensesz, /* maxsegsize */
650 &sc->mfi_sense_dmat)) {
651 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
654 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
655 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
656 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
659 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
660 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
661 if ((error = mfi_alloc_commands(sc)) != 0)
665 * Before moving the FW to operational state, check whether
666 * hostmemory is required by the FW or not
669 /* ThunderBolt MFI_IOC2 INIT */
670 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
671 sc->mfi_disable_intr(sc);
672 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
673 device_printf(sc->mfi_dev,
674 "TB Init has failed with error %d\n",error);
678 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
680 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
681 mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
682 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
685 sc->mfi_enable_intr(sc);
688 if ((error = mfi_comms_init(sc)) != 0)
691 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
692 mfi_intr, sc, &sc->mfi_intr, NULL)) {
693 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
696 sc->mfi_enable_intr(sc);
698 if ((error = mfi_get_controller_info(sc)) != 0)
700 sc->disableOnlineCtrlReset = 0;
702 /* Register a config hook to probe the bus for arrays */
703 sc->mfi_ich.ich_func = mfi_startup;
704 sc->mfi_ich.ich_arg = sc;
705 sc->mfi_ich.ich_desc = "mfi";
706 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
707 device_printf(sc->mfi_dev, "Cannot establish configuration "
711 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
712 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
717 * Register a shutdown handler.
719 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
720 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
721 device_printf(sc->mfi_dev, "Warning: shutdown event "
722 "registration failed\n");
726 * Create the control device for doing management
728 unit = device_get_unit(sc->mfi_dev);
729 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
730 0640, "mfi%d", unit);
732 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
733 if (sc->mfi_cdev != NULL)
734 sc->mfi_cdev->si_drv1 = sc;
735 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
736 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
737 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
738 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
739 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
740 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
741 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
742 &sc->mfi_keep_deleted_volumes, 0,
743 "Don't detach the mfid device for a busy volume that is deleted");
745 device_add_child(sc->mfi_dev, "mfip", -1);
746 bus_generic_attach(sc->mfi_dev);
748 /* Start the timeout watchdog */
749 callout_init_mp(&sc->mfi_watchdog_callout);
750 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
757 mfi_alloc_commands(struct mfi_softc *sc)
759 struct mfi_command *cm;
763 * XXX Should we allocate all the commands up front, or allocate on
764 * demand later like 'aac' does?
766 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
768 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
769 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
771 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
774 for (i = 0; i < ncmds; i++) {
775 cm = &sc->mfi_commands[i];
776 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
777 sc->mfi_cmd_size * i);
778 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
779 sc->mfi_cmd_size * i;
780 cm->cm_frame->header.context = i;
781 cm->cm_sense = &sc->mfi_sense[i];
782 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
785 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
786 &cm->cm_dmamap) == 0) {
787 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
788 mfi_release_command(cm);
789 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
793 sc->mfi_total_cmds++;
800 mfi_release_command(struct mfi_command *cm)
802 struct mfi_frame_header *hdr;
805 mfi_lockassert(&cm->cm_sc->mfi_io_lock);
808 * Zero out the important fields of the frame, but make sure the
809 * context field is preserved. For efficiency, handle the fields
810 * as 32 bit words. Clear out the first S/G entry too for safety.
812 hdr = &cm->cm_frame->header;
813 if (cm->cm_data != NULL && hdr->sg_count) {
814 cm->cm_sg->sg32[0].len = 0;
815 cm->cm_sg->sg32[0].addr = 0;
818 hdr_data = (uint32_t *)cm->cm_frame;
819 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
820 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
821 hdr_data[4] = 0; /* flags, timeout */
822 hdr_data[5] = 0; /* data_len */
824 cm->cm_extra_frames = 0;
826 cm->cm_complete = NULL;
827 cm->cm_private = NULL;
830 cm->cm_total_frame_size = 0;
831 cm->retry_for_fw_reset = 0;
833 mfi_enqueue_free(cm);
837 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
838 uint32_t opcode, void **bufp, size_t bufsize)
840 struct mfi_command *cm;
841 struct mfi_dcmd_frame *dcmd;
843 uint32_t context = 0;
845 mfi_lockassert(&sc->mfi_io_lock);
847 cm = mfi_dequeue_free(sc);
851 /* Zero out the MFI frame */
852 context = cm->cm_frame->header.context;
853 bzero(cm->cm_frame, sizeof(union mfi_frame));
854 cm->cm_frame->header.context = context;
856 if ((bufsize > 0) && (bufp != NULL)) {
858 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
860 mfi_release_command(cm);
869 dcmd = &cm->cm_frame->dcmd;
870 bzero(dcmd->mbox, MFI_MBOX_SIZE);
871 dcmd->header.cmd = MFI_CMD_DCMD;
872 dcmd->header.timeout = 0;
873 dcmd->header.flags = 0;
874 dcmd->header.data_len = bufsize;
875 dcmd->header.scsi_status = 0;
876 dcmd->opcode = opcode;
877 cm->cm_sg = &dcmd->sgl;
878 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
881 cm->cm_private = buf;
882 cm->cm_len = bufsize;
885 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
891 mfi_comms_init(struct mfi_softc *sc)
893 struct mfi_command *cm;
894 struct mfi_init_frame *init;
895 struct mfi_init_qinfo *qinfo;
897 uint32_t context = 0;
899 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
900 if ((cm = mfi_dequeue_free(sc)) == NULL) {
901 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
905 /* Zero out the MFI frame */
906 context = cm->cm_frame->header.context;
907 bzero(cm->cm_frame, sizeof(union mfi_frame));
908 cm->cm_frame->header.context = context;
911 * Abuse the SG list area of the frame to hold the init_qinfo
914 init = &cm->cm_frame->init;
915 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
917 bzero(qinfo, sizeof(struct mfi_init_qinfo));
918 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
919 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
920 offsetof(struct mfi_hwcomms, hw_reply_q);
921 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
922 offsetof(struct mfi_hwcomms, hw_pi);
923 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
924 offsetof(struct mfi_hwcomms, hw_ci);
926 init->header.cmd = MFI_CMD_INIT;
927 init->header.data_len = sizeof(struct mfi_init_qinfo);
928 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
930 cm->cm_flags = MFI_CMD_POLLED;
932 if ((error = mfi_mapcmd(sc, cm)) != 0) {
933 device_printf(sc->mfi_dev, "failed to send init command\n");
934 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
937 mfi_release_command(cm);
938 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
944 mfi_get_controller_info(struct mfi_softc *sc)
946 struct mfi_command *cm = NULL;
947 struct mfi_ctrl_info *ci = NULL;
948 uint32_t max_sectors_1, max_sectors_2;
951 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
952 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
953 (void **)&ci, sizeof(*ci));
956 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
958 if ((error = mfi_mapcmd(sc, cm)) != 0) {
959 device_printf(sc->mfi_dev, "Failed to get controller info\n");
960 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
966 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
967 BUS_DMASYNC_POSTREAD);
968 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
970 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
971 max_sectors_2 = ci->max_request_size;
972 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
973 sc->disableOnlineCtrlReset =
974 ci->properties.OnOffProperties.disableOnlineCtrlReset;
980 mfi_release_command(cm);
981 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
986 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
988 struct mfi_command *cm = NULL;
991 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
992 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
993 (void **)log_state, sizeof(**log_state));
996 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
998 if ((error = mfi_mapcmd(sc, cm)) != 0) {
999 device_printf(sc->mfi_dev, "Failed to get log state\n");
1003 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1004 BUS_DMASYNC_POSTREAD);
1005 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1009 mfi_release_command(cm);
1010 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1016 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1018 struct mfi_evt_log_state *log_state = NULL;
1019 union mfi_evt class_locale;
1023 class_locale.members.reserved = 0;
1024 class_locale.members.locale = mfi_event_locale;
1025 class_locale.members.evt_class = mfi_event_class;
1027 if (seq_start == 0) {
1028 error = mfi_get_log_state(sc, &log_state);
1029 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1032 kfree(log_state, M_MFIBUF);
1037 * Walk through any events that fired since the last
1040 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1041 log_state->newest_seq_num);
1042 seq = log_state->newest_seq_num;
1045 mfi_aen_register(sc, seq, class_locale.word);
1046 if (log_state != NULL)
1047 kfree(log_state, M_MFIBUF);
1053 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1056 mfi_lockassert(&sc->mfi_io_lock);
1057 cm->cm_complete = NULL;
1061 * MegaCli can issue a DCMD of 0. In this case do nothing
1062 * and return 0 to it as status
1064 if (cm->cm_frame->dcmd.opcode == 0) {
1065 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1067 return (cm->cm_error);
1069 mfi_enqueue_ready(cm);
1071 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1072 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1073 return (cm->cm_error);
1077 mfi_free(struct mfi_softc *sc)
1079 struct mfi_command *cm;
1082 callout_stop_sync(&sc->mfi_watchdog_callout);
1084 if (sc->mfi_cdev != NULL)
1085 destroy_dev(sc->mfi_cdev);
1086 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1088 if (sc->mfi_total_cmds != 0) {
1089 for (i = 0; i < sc->mfi_total_cmds; i++) {
1090 cm = &sc->mfi_commands[i];
1091 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1093 kfree(sc->mfi_commands, M_MFIBUF);
1097 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1098 if (sc->mfi_irq != NULL)
1099 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1102 if (sc->mfi_sense_busaddr != 0)
1103 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1104 if (sc->mfi_sense != NULL)
1105 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1106 sc->mfi_sense_dmamap);
1107 if (sc->mfi_sense_dmat != NULL)
1108 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1110 if (sc->mfi_frames_busaddr != 0)
1111 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1112 if (sc->mfi_frames != NULL)
1113 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1114 sc->mfi_frames_dmamap);
1115 if (sc->mfi_frames_dmat != NULL)
1116 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1118 if (sc->mfi_comms_busaddr != 0)
1119 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1120 if (sc->mfi_comms != NULL)
1121 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1122 sc->mfi_comms_dmamap);
1123 if (sc->mfi_comms_dmat != NULL)
1124 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1126 /* ThunderBolt contiguous memory free here */
1127 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1128 if (sc->mfi_tb_busaddr != 0)
1129 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1130 if (sc->request_message_pool != NULL)
1131 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1133 if (sc->mfi_tb_dmat != NULL)
1134 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1136 /* Version buffer memory free */
1137 /* Start LSIP200113393 */
1138 if (sc->verbuf_h_busaddr != 0)
1139 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1140 if (sc->verbuf != NULL)
1141 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1142 sc->verbuf_h_dmamap);
1143 if (sc->verbuf_h_dmat != NULL)
1144 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1146 /* End LSIP200113393 */
1147 /* ThunderBolt INIT packet memory Free */
1148 if (sc->mfi_tb_init_busaddr != 0)
1149 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1150 if (sc->mfi_tb_init != NULL)
1151 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1152 sc->mfi_tb_init_dmamap);
1153 if (sc->mfi_tb_init_dmat != NULL)
1154 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1156 /* ThunderBolt IOC Init Desc memory free here */
1157 if (sc->mfi_tb_ioc_init_busaddr != 0)
1158 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1159 sc->mfi_tb_ioc_init_dmamap);
1160 if (sc->mfi_tb_ioc_init_desc != NULL)
1161 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1162 sc->mfi_tb_ioc_init_desc,
1163 sc->mfi_tb_ioc_init_dmamap);
1164 if (sc->mfi_tb_ioc_init_dmat != NULL)
1165 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1166 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1167 if (sc->mfi_cmd_pool_tbolt != NULL) {
1168 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1169 kfree(sc->mfi_cmd_pool_tbolt[i],
1171 sc->mfi_cmd_pool_tbolt[i] = NULL;
1175 if (sc->mfi_cmd_pool_tbolt != NULL) {
1176 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1177 sc->mfi_cmd_pool_tbolt = NULL;
1179 if (sc->request_desc_pool != NULL) {
1180 kfree(sc->request_desc_pool, M_MFIBUF);
1181 sc->request_desc_pool = NULL;
1184 if (sc->mfi_buffer_dmat != NULL)
1185 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1186 if (sc->mfi_parent_dmat != NULL)
1187 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1189 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1190 if (mtx_initialized(&sc->mfi_io_lock))
1193 lockuninit(&sc->mfi_io_lock);
1194 lockuninit(&sc->mfi_config_lock);
1201 mfi_startup(void *arg)
1203 struct mfi_softc *sc;
1205 sc = (struct mfi_softc *)arg;
1207 config_intrhook_disestablish(&sc->mfi_ich);
1209 sc->mfi_enable_intr(sc);
1210 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1211 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1213 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1215 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1216 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1222 struct mfi_softc *sc;
1223 struct mfi_command *cm;
1224 uint32_t pi, ci, context;
1226 sc = (struct mfi_softc *)arg;
1228 if (sc->mfi_check_clear_intr(sc))
1232 pi = sc->mfi_comms->hw_pi;
1233 ci = sc->mfi_comms->hw_ci;
1234 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1236 context = sc->mfi_comms->hw_reply_q[ci];
1237 if (context < sc->mfi_max_fw_cmds) {
1238 cm = &sc->mfi_commands[context];
1239 mfi_remove_busy(cm);
1241 mfi_complete(sc, cm);
1243 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1248 sc->mfi_comms->hw_ci = ci;
1250 /* Give defered I/O a chance to run */
1251 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1252 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1254 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1257 * Dummy read to flush the bus; this ensures that the indexes are up
1258 * to date. Restart processing if more commands have come it.
1260 (void)sc->mfi_read_fw_status(sc);
1261 if (pi != sc->mfi_comms->hw_pi)
1268 mfi_shutdown(struct mfi_softc *sc)
1270 struct mfi_dcmd_frame *dcmd;
1271 struct mfi_command *cm;
1274 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1275 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1277 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1281 if (sc->mfi_aen_cm != NULL)
1282 mfi_abort(sc, sc->mfi_aen_cm);
1284 if (sc->map_update_cmd != NULL)
1285 mfi_abort(sc, sc->map_update_cmd);
1287 dcmd = &cm->cm_frame->dcmd;
1288 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1289 cm->cm_flags = MFI_CMD_POLLED;
1292 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1293 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1296 mfi_release_command(cm);
1297 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1302 mfi_syspdprobe(struct mfi_softc *sc)
1304 struct mfi_frame_header *hdr;
1305 struct mfi_command *cm = NULL;
1306 struct mfi_pd_list *pdlist = NULL;
1307 struct mfi_system_pd *syspd, *tmp;
1308 int error, i, found;
1310 mfi_lockassert(&sc->mfi_config_lock);
1311 mfi_lockassert(&sc->mfi_io_lock);
1312 /* Add SYSTEM PD's */
1313 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1314 (void **)&pdlist, sizeof(*pdlist));
1316 device_printf(sc->mfi_dev,
1317 "Error while forming SYSTEM PD list\n");
1321 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1322 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1323 cm->cm_frame->dcmd.mbox[1] = 0;
1324 if (mfi_mapcmd(sc, cm) != 0) {
1325 device_printf(sc->mfi_dev,
1326 "Failed to get syspd device listing\n");
1329 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1330 BUS_DMASYNC_POSTREAD);
1331 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1332 hdr = &cm->cm_frame->header;
1333 if (hdr->cmd_status != MFI_STAT_OK) {
1334 device_printf(sc->mfi_dev,
1335 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1338 /* Get each PD and add it to the system */
1339 for (i = 0; i < pdlist->count; i++) {
1340 if (pdlist->addr[i].device_id ==
1341 pdlist->addr[i].encl_device_id)
1344 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1345 if (syspd->pd_id == pdlist->addr[i].device_id)
1349 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1351 /* Delete SYSPD's whose state has been changed */
1352 TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1354 for (i = 0; i < pdlist->count; i++) {
1355 if (syspd->pd_id == pdlist->addr[i].device_id)
1359 kprintf("DELETE\n");
1360 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1362 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1364 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1369 kfree(pdlist, M_MFIBUF);
1371 mfi_release_command(cm);
1375 mfi_ldprobe(struct mfi_softc *sc)
1377 struct mfi_frame_header *hdr;
1378 struct mfi_command *cm = NULL;
1379 struct mfi_ld_list *list = NULL;
1380 struct mfi_disk *ld;
1383 mfi_lockassert(&sc->mfi_config_lock);
1384 mfi_lockassert(&sc->mfi_io_lock);
1386 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1387 (void **)&list, sizeof(*list));
1391 cm->cm_flags = MFI_CMD_DATAIN;
1392 if (mfi_wait_command(sc, cm) != 0) {
1393 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1397 hdr = &cm->cm_frame->header;
1398 if (hdr->cmd_status != MFI_STAT_OK) {
1399 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1404 for (i = 0; i < list->ld_count; i++) {
1405 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1406 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1409 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1414 kfree(list, M_MFIBUF);
1416 mfi_release_command(cm);
1422 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1423 * the bits in 24-31 are all set, then it is the number of seconds since
1427 format_timestamp(uint32_t timestamp)
1429 static char buffer[32];
1431 if ((timestamp & 0xff000000) == 0xff000000)
1432 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1435 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1440 format_class(int8_t class)
1442 static char buffer[6];
1445 case MFI_EVT_CLASS_DEBUG:
1447 case MFI_EVT_CLASS_PROGRESS:
1448 return ("progress");
1449 case MFI_EVT_CLASS_INFO:
1451 case MFI_EVT_CLASS_WARNING:
1453 case MFI_EVT_CLASS_CRITICAL:
1455 case MFI_EVT_CLASS_FATAL:
1457 case MFI_EVT_CLASS_DEAD:
1460 ksnprintf(buffer, sizeof(buffer), "%d", class);
1466 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1468 struct mfi_system_pd *syspd = NULL;
1470 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1471 format_timestamp(detail->time), detail->evt_class.members.locale,
1472 format_class(detail->evt_class.members.evt_class),
1473 detail->description);
1475 /* Don't act on old AEN's or while shutting down */
1476 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1479 switch (detail->arg_type) {
1480 case MR_EVT_ARGS_NONE:
1481 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1482 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1483 if (mfi_detect_jbod_change) {
1485 * Probe for new SYSPD's and Delete
1488 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1489 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1491 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1492 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1496 case MR_EVT_ARGS_LD_STATE:
1498 * During load time driver reads all the events starting
1499 * from the one that has been logged after shutdown. Avoid
1502 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1504 struct mfi_disk *ld;
1505 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1507 detail->args.ld_state.ld.target_id)
1511 Fix: for kernel panics when SSCD is removed
1512 KASSERT(ld != NULL, ("volume dissappeared"));
1516 device_delete_child(sc->mfi_dev, ld->ld_dev);
1521 case MR_EVT_ARGS_PD:
1522 if (detail->code == MR_EVT_PD_REMOVED) {
1523 if (mfi_detect_jbod_change) {
1525 * If the removed device is a SYSPD then
1528 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1531 detail->args.pd.device_id) {
1533 device_delete_child(
1542 if (detail->code == MR_EVT_PD_INSERTED) {
1543 if (mfi_detect_jbod_change) {
1544 /* Probe for new SYSPD's */
1545 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1546 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1548 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1549 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1557 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1559 struct mfi_evt_queue_elm *elm;
1561 mfi_lockassert(&sc->mfi_io_lock);
1562 elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1565 memcpy(&elm->detail, detail, sizeof(*detail));
1566 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1567 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1571 mfi_handle_evt(void *context, int pending)
1573 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1574 struct mfi_softc *sc;
1575 struct mfi_evt_queue_elm *elm;
1579 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1580 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1581 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1582 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1583 TAILQ_REMOVE(&queue, elm, link);
1584 mfi_decode_evt(sc, &elm->detail);
1585 kfree(elm, M_MFIBUF);
1590 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1592 struct mfi_command *cm;
1593 struct mfi_dcmd_frame *dcmd;
1594 union mfi_evt current_aen, prior_aen;
1595 struct mfi_evt_detail *ed = NULL;
1598 current_aen.word = locale;
1599 if (sc->mfi_aen_cm != NULL) {
1601 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1602 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1603 !((prior_aen.members.locale & current_aen.members.locale)
1604 ^current_aen.members.locale)) {
1607 prior_aen.members.locale |= current_aen.members.locale;
1608 if (prior_aen.members.evt_class
1609 < current_aen.members.evt_class)
1610 current_aen.members.evt_class =
1611 prior_aen.members.evt_class;
1612 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1613 mfi_abort(sc, sc->mfi_aen_cm);
1614 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1618 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1619 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1620 (void **)&ed, sizeof(*ed));
1621 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1626 dcmd = &cm->cm_frame->dcmd;
1627 ((uint32_t *)&dcmd->mbox)[0] = seq;
1628 ((uint32_t *)&dcmd->mbox)[1] = locale;
1629 cm->cm_flags = MFI_CMD_DATAIN;
1630 cm->cm_complete = mfi_aen_complete;
1632 sc->last_seq_num = seq;
1633 sc->mfi_aen_cm = cm;
1635 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1636 mfi_enqueue_ready(cm);
1638 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1645 mfi_aen_complete(struct mfi_command *cm)
1647 struct mfi_frame_header *hdr;
1648 struct mfi_softc *sc;
1649 struct mfi_evt_detail *detail;
1650 struct mfi_aen *mfi_aen_entry, *tmp;
1652 int seq = 0, aborted = 0;
1655 mfi_lockassert(&sc->mfi_io_lock);
1657 hdr = &cm->cm_frame->header;
1659 if (sc->mfi_aen_cm == NULL)
1662 if (sc->mfi_aen_cm->cm_aen_abort ||
1663 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1664 sc->mfi_aen_cm->cm_aen_abort = 0;
1667 sc->mfi_aen_triggered = 1;
1668 if (sc->mfi_poll_waiting) {
1669 sc->mfi_poll_waiting = 0;
1670 KNOTE(&sc->mfi_kq.ki_note, 0);
1672 detail = cm->cm_data;
1673 mfi_queue_evt(sc, detail);
1674 seq = detail->seq + 1;
1675 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1677 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1679 p = mfi_aen_entry->p;
1683 kfree(mfi_aen_entry, M_MFIBUF);
1687 kfree(cm->cm_data, M_MFIBUF);
1688 sc->mfi_aen_cm = NULL;
1689 wakeup(&sc->mfi_aen_cm);
1690 mfi_release_command(cm);
1692 /* set it up again so the driver can catch more events */
1694 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1695 mfi_aen_setup(sc, seq);
1696 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1700 #define MAX_EVENTS 15
1703 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1705 struct mfi_command *cm;
1706 struct mfi_dcmd_frame *dcmd;
1707 struct mfi_evt_list *el;
1708 union mfi_evt class_locale;
1709 int error, i, seq, size;
1711 class_locale.members.reserved = 0;
1712 class_locale.members.locale = mfi_event_locale;
1713 class_locale.members.evt_class = mfi_event_class;
1715 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1717 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1721 for (seq = start_seq;;) {
1722 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1723 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1724 kfree(el, M_MFIBUF);
1725 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1729 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1731 dcmd = &cm->cm_frame->dcmd;
1732 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1733 dcmd->header.cmd = MFI_CMD_DCMD;
1734 dcmd->header.timeout = 0;
1735 dcmd->header.data_len = size;
1736 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1737 ((uint32_t *)&dcmd->mbox)[0] = seq;
1738 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1739 cm->cm_sg = &dcmd->sgl;
1740 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1741 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1745 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1746 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1747 device_printf(sc->mfi_dev,
1748 "Failed to get controller entries\n");
1749 mfi_release_command(cm);
1750 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1754 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1755 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1756 BUS_DMASYNC_POSTREAD);
1757 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1759 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1760 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1761 mfi_release_command(cm);
1762 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1765 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1766 device_printf(sc->mfi_dev,
1767 "Error %d fetching controller entries\n",
1768 dcmd->header.cmd_status);
1769 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1770 mfi_release_command(cm);
1771 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1774 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1775 mfi_release_command(cm);
1776 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1778 for (i = 0; i < el->count; i++) {
1780 * If this event is newer than 'stop_seq' then
1781 * break out of the loop. Note that the log
1782 * is a circular buffer so we have to handle
1783 * the case that our stop point is earlier in
1784 * the buffer than our start point.
1786 if (el->event[i].seq >= stop_seq) {
1787 if (start_seq <= stop_seq)
1789 else if (el->event[i].seq < start_seq)
1792 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1793 mfi_queue_evt(sc, &el->event[i]);
1794 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1796 seq = el->event[el->count - 1].seq + 1;
1799 kfree(el, M_MFIBUF);
1804 mfi_add_ld(struct mfi_softc *sc, int id)
1806 struct mfi_command *cm;
1807 struct mfi_dcmd_frame *dcmd = NULL;
1808 struct mfi_ld_info *ld_info = NULL;
1811 mfi_lockassert(&sc->mfi_io_lock);
1813 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1814 (void **)&ld_info, sizeof(*ld_info));
1816 device_printf(sc->mfi_dev,
1817 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1819 kfree(ld_info, M_MFIBUF);
1822 cm->cm_flags = MFI_CMD_DATAIN;
1823 dcmd = &cm->cm_frame->dcmd;
1825 if (mfi_wait_command(sc, cm) != 0) {
1826 device_printf(sc->mfi_dev,
1827 "Failed to get logical drive: %d\n", id);
1828 kfree(ld_info, M_MFIBUF);
1831 if (ld_info->ld_config.params.isSSCD != 1) {
1832 mfi_add_ld_complete(cm);
1834 mfi_release_command(cm);
1835 if (ld_info) /* SSCD drives ld_info free here */
1836 kfree(ld_info, M_MFIBUF);
1842 mfi_add_ld_complete(struct mfi_command *cm)
1844 struct mfi_frame_header *hdr;
1845 struct mfi_ld_info *ld_info;
1846 struct mfi_softc *sc;
1850 hdr = &cm->cm_frame->header;
1851 ld_info = cm->cm_private;
1853 if (hdr->cmd_status != MFI_STAT_OK) {
1854 kfree(ld_info, M_MFIBUF);
1855 mfi_release_command(cm);
1858 mfi_release_command(cm);
1860 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1862 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1863 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1864 kfree(ld_info, M_MFIBUF);
1866 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1870 device_set_ivars(child, ld_info);
1871 device_set_desc(child, "MFI Logical Disk");
1872 bus_generic_attach(sc->mfi_dev);
1874 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1878 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1880 struct mfi_command *cm;
1881 struct mfi_dcmd_frame *dcmd = NULL;
1882 struct mfi_pd_info *pd_info = NULL;
1885 mfi_lockassert(&sc->mfi_io_lock);
1887 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1888 (void **)&pd_info, sizeof(*pd_info));
1890 device_printf(sc->mfi_dev,
1891 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1894 kfree(pd_info, M_MFIBUF);
1897 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1898 dcmd = &cm->cm_frame->dcmd;
1900 dcmd->header.scsi_status = 0;
1901 dcmd->header.pad0 = 0;
1902 if (mfi_mapcmd(sc, cm) != 0) {
1903 device_printf(sc->mfi_dev,
1904 "Failed to get physical drive info %d\n", id);
1905 kfree(pd_info, M_MFIBUF);
1908 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1909 BUS_DMASYNC_POSTREAD);
1910 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1911 mfi_add_sys_pd_complete(cm);
1916 mfi_add_sys_pd_complete(struct mfi_command *cm)
1918 struct mfi_frame_header *hdr;
1919 struct mfi_pd_info *pd_info;
1920 struct mfi_softc *sc;
1924 hdr = &cm->cm_frame->header;
1925 pd_info = cm->cm_private;
1927 if (hdr->cmd_status != MFI_STAT_OK) {
1928 kfree(pd_info, M_MFIBUF);
1929 mfi_release_command(cm);
1932 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1933 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1934 pd_info->ref.v.device_id);
1935 kfree(pd_info, M_MFIBUF);
1936 mfi_release_command(cm);
1939 mfi_release_command(cm);
1941 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1943 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1944 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1945 kfree(pd_info, M_MFIBUF);
1947 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1951 device_set_ivars(child, pd_info);
1952 device_set_desc(child, "MFI System PD");
1953 bus_generic_attach(sc->mfi_dev);
1955 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1958 static struct mfi_command *
1959 mfi_bio_command(struct mfi_softc *sc)
1962 struct mfi_command *cm = NULL;
1963 struct mfi_disk *mfid;
1965 /* reserving two commands to avoid starvation for IOCTL */
1966 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1968 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1970 mfid = bio->bio_driver_info;
1971 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1972 cm = mfi_build_syspdio(sc, bio);
1974 cm = mfi_build_ldio(sc, bio);
1976 mfi_enqueue_bio(sc, bio);
1980 static struct mfi_command *
1981 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1983 struct mfi_command *cm;
1985 struct mfi_system_pd *disk;
1986 struct mfi_pass_frame *pass;
1987 int flags = 0, blkcount = 0;
1988 uint32_t context = 0;
1990 if ((cm = mfi_dequeue_free(sc)) == NULL)
1993 /* Zero out the MFI frame */
1994 context = cm->cm_frame->header.context;
1995 bzero(cm->cm_frame, sizeof(union mfi_frame));
1996 cm->cm_frame->header.context = context;
1998 pass = &cm->cm_frame->pass;
1999 bzero(pass->cdb, 16);
2000 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2001 switch (bp->b_cmd & 0x03) {
2003 pass->cdb[0] = READ_10;
2004 flags = MFI_CMD_DATAIN;
2007 pass->cdb[0] = WRITE_10;
2008 flags = MFI_CMD_DATAOUT;
2011 panic("Invalid bio command");
2014 /* Cheat with the sector length to avoid a non-constant division */
2015 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2016 disk = bio->bio_driver_info;
2017 /* Fill the LBA and Transfer length in CDB */
2018 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2019 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2020 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2021 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2022 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2023 pass->cdb[8] = (blkcount & 0x00ff);
2024 pass->header.target_id = disk->pd_id;
2025 pass->header.timeout = 0;
2026 pass->header.flags = 0;
2027 pass->header.scsi_status = 0;
2028 pass->header.sense_len = MFI_SENSE_LEN;
2029 pass->header.data_len = bp->b_bcount;
2030 pass->header.cdb_len = 10;
2031 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2032 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2033 cm->cm_complete = mfi_bio_complete;
2034 cm->cm_private = bio;
2035 cm->cm_data = bp->b_data;
2036 cm->cm_len = bp->b_bcount;
2037 cm->cm_sg = &pass->sgl;
2038 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2039 cm->cm_flags = flags;
2043 static struct mfi_command *
2044 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2046 struct mfi_io_frame *io;
2048 struct mfi_disk *disk;
2049 struct mfi_command *cm;
2050 int flags, blkcount;
2051 uint32_t context = 0;
2053 if ((cm = mfi_dequeue_free(sc)) == NULL)
2056 /* Zero out the MFI frame */
2057 context = cm->cm_frame->header.context;
2058 bzero(cm->cm_frame, sizeof(union mfi_frame));
2059 cm->cm_frame->header.context = context;
2061 io = &cm->cm_frame->io;
2062 switch (bp->b_cmd & 0x03) {
2064 io->header.cmd = MFI_CMD_LD_READ;
2065 flags = MFI_CMD_DATAIN;
2068 io->header.cmd = MFI_CMD_LD_WRITE;
2069 flags = MFI_CMD_DATAOUT;
2072 panic("Invalid bio command");
2075 /* Cheat with the sector length to avoid a non-constant division */
2076 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2077 disk = bio->bio_driver_info;
2078 io->header.target_id = disk->ld_id;
2079 io->header.timeout = 0;
2080 io->header.flags = 0;
2081 io->header.scsi_status = 0;
2082 io->header.sense_len = MFI_SENSE_LEN;
2083 io->header.data_len = blkcount;
2084 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2085 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2086 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2087 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2088 cm->cm_complete = mfi_bio_complete;
2089 cm->cm_private = bio;
2090 cm->cm_data = bp->b_data;
2091 cm->cm_len = bp->b_bcount;
2092 cm->cm_sg = &io->sgl;
2093 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2094 cm->cm_flags = flags;
2099 mfi_bio_complete(struct mfi_command *cm)
2103 struct mfi_frame_header *hdr;
2104 struct mfi_softc *sc;
2106 bio = cm->cm_private;
2108 hdr = &cm->cm_frame->header;
2111 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2112 bp->b_flags |= B_ERROR;
2114 device_printf(sc->mfi_dev, "I/O error, status= %d "
2115 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2116 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2117 } else if (cm->cm_error != 0) {
2118 bp->b_flags |= B_ERROR;
2121 mfi_release_command(cm);
2122 mfi_disk_complete(bio);
2126 mfi_startio(struct mfi_softc *sc)
2128 struct mfi_command *cm;
2129 struct ccb_hdr *ccbh;
2132 /* Don't bother if we're short on resources */
2133 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2136 /* Try a command that has already been prepared */
2137 cm = mfi_dequeue_ready(sc);
2140 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2141 cm = sc->mfi_cam_start(ccbh);
2144 /* Nope, so look for work on the bioq */
2146 cm = mfi_bio_command(sc);
2148 /* No work available, so exit */
2152 /* Send the command to the controller */
2153 if (mfi_mapcmd(sc, cm) != 0) {
2154 mfi_requeue_ready(cm);
2161 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2165 mfi_lockassert(&sc->mfi_io_lock);
2167 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2168 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2169 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2170 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2171 if (error == EINPROGRESS) {
2172 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2176 if (sc->MFA_enabled)
2177 error = mfi_tbolt_send_frame(sc, cm);
2179 error = mfi_send_frame(sc, cm);
2186 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2188 struct mfi_frame_header *hdr;
2189 struct mfi_command *cm;
2191 struct mfi_softc *sc;
2192 int i, j, first, dir;
2194 cm = (struct mfi_command *)arg;
2196 hdr = &cm->cm_frame->header;
2200 kprintf("error %d in callback\n", error);
2201 cm->cm_error = error;
2202 mfi_complete(sc, cm);
2206 /* Use IEEE sgl only for IO's on a SKINNY controller
2207 * For other commands on a SKINNY controller use either
2208 * sg32 or sg64 based on the sizeof(bus_addr_t).
2209 * Also calculate the total frame size based on the type
2212 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2213 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2214 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2215 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2216 for (i = 0; i < nsegs; i++) {
2217 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2218 sgl->sg_skinny[i].len = segs[i].ds_len;
2219 sgl->sg_skinny[i].flag = 0;
2221 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2222 hdr->sg_count = nsegs;
2225 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2226 first = cm->cm_stp_len;
2227 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2228 sgl->sg32[j].addr = segs[0].ds_addr;
2229 sgl->sg32[j++].len = first;
2231 sgl->sg64[j].addr = segs[0].ds_addr;
2232 sgl->sg64[j++].len = first;
2236 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2237 for (i = 0; i < nsegs; i++) {
2238 sgl->sg32[j].addr = segs[i].ds_addr + first;
2239 sgl->sg32[j++].len = segs[i].ds_len - first;
2243 for (i = 0; i < nsegs; i++) {
2244 sgl->sg64[j].addr = segs[i].ds_addr + first;
2245 sgl->sg64[j++].len = segs[i].ds_len - first;
2248 hdr->flags |= MFI_FRAME_SGL64;
2254 if (cm->cm_flags & MFI_CMD_DATAIN) {
2255 dir |= BUS_DMASYNC_PREREAD;
2256 hdr->flags |= MFI_FRAME_DIR_READ;
2258 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2259 dir |= BUS_DMASYNC_PREWRITE;
2260 hdr->flags |= MFI_FRAME_DIR_WRITE;
2262 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2263 cm->cm_flags |= MFI_CMD_MAPPED;
2266 * Instead of calculating the total number of frames in the
2267 * compound frame, it's already assumed that there will be at
2268 * least 1 frame, so don't compensate for the modulo of the
2269 * following division.
2271 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2272 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2274 if (sc->MFA_enabled)
2275 mfi_tbolt_send_frame(sc, cm);
2277 mfi_send_frame(sc, cm);
2281 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2283 struct mfi_frame_header *hdr;
2284 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2286 hdr = &cm->cm_frame->header;
2288 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2289 cm->cm_timestamp = time_uptime;
2290 mfi_enqueue_busy(cm);
2292 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2293 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2297 * The bus address of the command is aligned on a 64 byte boundary,
2298 * leaving the least 6 bits as zero. For whatever reason, the
2299 * hardware wants the address shifted right by three, leaving just
2300 * 3 zero bits. These three bits are then used as a prefetching
2301 * hint for the hardware to predict how many frames need to be
2302 * fetched across the bus. If a command has more than 8 frames
2303 * then the 3 bits are set to 0x7 and the firmware uses other
2304 * information in the command to determine the total amount to fetch.
2305 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2306 * is enough for both 32bit and 64bit systems.
2308 if (cm->cm_extra_frames > 7)
2309 cm->cm_extra_frames = 7;
2311 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2313 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2316 /* This is a polled command, so busy-wait for it to complete. */
2317 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2324 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2325 device_printf(sc->mfi_dev, "Frame %p timed out "
2326 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2334 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2338 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2340 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2341 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2342 dir |= BUS_DMASYNC_POSTREAD;
2343 if (cm->cm_flags & MFI_CMD_DATAOUT)
2344 dir |= BUS_DMASYNC_POSTWRITE;
2346 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2347 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2348 cm->cm_flags &= ~MFI_CMD_MAPPED;
2351 cm->cm_flags |= MFI_CMD_COMPLETED;
2353 if (cm->cm_complete != NULL)
2354 cm->cm_complete(cm);
2360 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2362 struct mfi_command *cm;
2363 struct mfi_abort_frame *abort;
2365 uint32_t context = 0;
2367 mfi_lockassert(&sc->mfi_io_lock);
2369 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2373 /* Zero out the MFI frame */
2374 context = cm->cm_frame->header.context;
2375 bzero(cm->cm_frame, sizeof(union mfi_frame));
2376 cm->cm_frame->header.context = context;
2378 abort = &cm->cm_frame->abort;
2379 abort->header.cmd = MFI_CMD_ABORT;
2380 abort->header.flags = 0;
2381 abort->header.scsi_status = 0;
2382 abort->abort_context = cm_abort->cm_frame->header.context;
2383 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2384 abort->abort_mfi_addr_hi =
2385 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2387 cm->cm_flags = MFI_CMD_POLLED;
2390 sc->mfi_aen_cm->cm_aen_abort = 1;
2392 mfi_release_command(cm);
2394 while (i < 5 && sc->mfi_aen_cm != NULL) {
2395 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2404 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2407 struct mfi_command *cm;
2408 struct mfi_io_frame *io;
2410 uint32_t context = 0;
2412 if ((cm = mfi_dequeue_free(sc)) == NULL)
2415 /* Zero out the MFI frame */
2416 context = cm->cm_frame->header.context;
2417 bzero(cm->cm_frame, sizeof(union mfi_frame));
2418 cm->cm_frame->header.context = context;
2420 io = &cm->cm_frame->io;
2421 io->header.cmd = MFI_CMD_LD_WRITE;
2422 io->header.target_id = id;
2423 io->header.timeout = 0;
2424 io->header.flags = 0;
2425 io->header.scsi_status = 0;
2426 io->header.sense_len = MFI_SENSE_LEN;
2427 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2428 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2429 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2430 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2431 io->lba_lo = lba & 0xffffffff;
2434 cm->cm_sg = &io->sgl;
2435 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2436 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2438 error = mfi_mapcmd(sc, cm);
2439 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2440 BUS_DMASYNC_POSTWRITE);
2441 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2442 mfi_release_command(cm);
2448 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2451 struct mfi_command *cm;
2452 struct mfi_pass_frame *pass;
2456 if ((cm = mfi_dequeue_free(sc)) == NULL)
2459 pass = &cm->cm_frame->pass;
2460 bzero(pass->cdb, 16);
2461 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2462 pass->cdb[0] = WRITE_10;
2463 pass->cdb[2] = (lba & 0xff000000) >> 24;
2464 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2465 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2466 pass->cdb[5] = (lba & 0x000000ff);
2467 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2468 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2469 pass->cdb[8] = (blkcount & 0x00ff);
2470 pass->header.target_id = id;
2471 pass->header.timeout = 0;
2472 pass->header.flags = 0;
2473 pass->header.scsi_status = 0;
2474 pass->header.sense_len = MFI_SENSE_LEN;
2475 pass->header.data_len = len;
2476 pass->header.cdb_len = 10;
2477 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2478 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2481 cm->cm_sg = &pass->sgl;
2482 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2483 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2485 error = mfi_mapcmd(sc, cm);
2486 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2487 BUS_DMASYNC_POSTWRITE);
2488 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2489 mfi_release_command(cm);
2495 mfi_open(struct dev_open_args *ap)
2497 cdev_t dev = ap->a_head.a_dev;
2498 struct mfi_softc *sc;
2503 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2504 if (sc->mfi_detaching)
2507 sc->mfi_flags |= MFI_FLAGS_OPEN;
2510 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2516 mfi_close(struct dev_close_args *ap)
2518 cdev_t dev = ap->a_head.a_dev;
2519 struct mfi_softc *sc;
2520 struct mfi_aen *mfi_aen_entry, *tmp;
2524 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2525 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2527 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2528 if (mfi_aen_entry->p == curproc) {
2529 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2531 kfree(mfi_aen_entry, M_MFIBUF);
2534 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2539 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2543 case MFI_DCMD_LD_DELETE:
2544 case MFI_DCMD_CFG_ADD:
2545 case MFI_DCMD_CFG_CLEAR:
2546 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2547 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2555 mfi_config_unlock(struct mfi_softc *sc, int locked)
2559 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2563 * Perform pre-issue checks on commands from userland and possibly veto
2567 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2569 struct mfi_disk *ld, *ld2;
2571 struct mfi_system_pd *syspd = NULL;
2575 mfi_lockassert(&sc->mfi_io_lock);
2577 switch (cm->cm_frame->dcmd.opcode) {
2578 case MFI_DCMD_LD_DELETE:
2579 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2580 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2586 error = mfi_disk_disable(ld);
2588 case MFI_DCMD_CFG_CLEAR:
2589 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2590 error = mfi_disk_disable(ld);
2595 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2598 mfi_disk_enable(ld2);
2602 case MFI_DCMD_PD_STATE_SET:
2603 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2605 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2606 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2607 if (syspd->pd_id == syspd_id)
2614 error = mfi_syspd_disable(syspd);
2622 /* Perform post-issue checks on commands from userland. */
2624 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2626 struct mfi_disk *ld, *ldn;
2627 struct mfi_system_pd *syspd = NULL;
2631 switch (cm->cm_frame->dcmd.opcode) {
2632 case MFI_DCMD_LD_DELETE:
2633 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2634 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2637 KASSERT(ld != NULL, ("volume dissappeared"));
2638 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2639 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2641 device_delete_child(sc->mfi_dev, ld->ld_dev);
2643 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2645 mfi_disk_enable(ld);
2647 case MFI_DCMD_CFG_CLEAR:
2648 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2649 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2651 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2652 device_delete_child(sc->mfi_dev, ld->ld_dev);
2655 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2657 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2658 mfi_disk_enable(ld);
2661 case MFI_DCMD_CFG_ADD:
2662 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2663 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2666 case MFI_DCMD_PD_STATE_SET:
2667 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2669 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2670 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2671 if (syspd->pd_id == syspd_id)
2677 /* If the transition fails then enable the syspd again */
2678 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2679 mfi_syspd_enable(syspd);
2685 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2687 struct mfi_config_data *conf_data = cm->cm_data;
2688 struct mfi_command *ld_cm = NULL;
2689 struct mfi_ld_info *ld_info = NULL;
2692 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2693 (conf_data->ld[0].params.isSSCD == 1)) {
2695 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2696 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2697 (void **)&ld_info, sizeof(*ld_info));
2699 device_printf(sc->mfi_dev, "Failed to allocate"
2700 "MFI_DCMD_LD_GET_INFO %d", error);
2702 kfree(ld_info, M_MFIBUF);
2705 ld_cm->cm_flags = MFI_CMD_DATAIN;
2706 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2707 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2708 if (mfi_wait_command(sc, ld_cm) != 0) {
2709 device_printf(sc->mfi_dev, "failed to get log drv\n");
2710 mfi_release_command(ld_cm);
2711 kfree(ld_info, M_MFIBUF);
2715 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2716 kfree(ld_info, M_MFIBUF);
2717 mfi_release_command(ld_cm);
2720 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2723 if (ld_info->ld_config.params.isSSCD == 1)
2726 mfi_release_command(ld_cm);
2727 kfree(ld_info, M_MFIBUF);
2733 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2736 struct mfi_ioc_packet *ioc;
2737 ioc = (struct mfi_ioc_packet *)arg;
2738 int sge_size, error;
2739 struct megasas_sge *kern_sge;
2741 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2742 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2743 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2745 if (sizeof(bus_addr_t) == 8) {
2746 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2747 cm->cm_extra_frames = 2;
2748 sge_size = sizeof(struct mfi_sg64);
2750 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2751 sge_size = sizeof(struct mfi_sg32);
2754 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2755 for (i = 0; i < ioc->mfi_sge_count; i++) {
2756 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2757 1, 0, /* algnmnt, boundary */
2758 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2759 BUS_SPACE_MAXADDR, /* highaddr */
2760 NULL, NULL, /* filter, filterarg */
2761 ioc->mfi_sgl[i].iov_len,/* maxsize */
2763 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2764 BUS_DMA_ALLOCNOW, /* flags */
2765 &sc->mfi_kbuff_arr_dmat[i])) {
2766 device_printf(sc->mfi_dev,
2767 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2771 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2772 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2773 &sc->mfi_kbuff_arr_dmamap[i])) {
2774 device_printf(sc->mfi_dev,
2775 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2779 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2780 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2781 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2782 &sc->mfi_kbuff_arr_busaddr[i], 0);
2784 if (!sc->kbuff_arr[i]) {
2785 device_printf(sc->mfi_dev,
2786 "Could not allocate memory for kbuff_arr info\n");
2789 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2790 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2792 if (sizeof(bus_addr_t) == 8) {
2793 cm->cm_frame->stp.sgl.sg64[i].addr =
2794 kern_sge[i].phys_addr;
2795 cm->cm_frame->stp.sgl.sg64[i].len =
2796 ioc->mfi_sgl[i].iov_len;
2798 cm->cm_frame->stp.sgl.sg32[i].addr =
2799 kern_sge[i].phys_addr;
2800 cm->cm_frame->stp.sgl.sg32[i].len =
2801 ioc->mfi_sgl[i].iov_len;
2804 error = copyin(ioc->mfi_sgl[i].iov_base,
2806 ioc->mfi_sgl[i].iov_len);
2808 device_printf(sc->mfi_dev, "Copy in failed\n");
2813 cm->cm_flags |=MFI_CMD_MAPPED;
2818 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2820 struct mfi_command *cm;
2821 struct mfi_dcmd_frame *dcmd;
2822 void *ioc_buf = NULL;
2824 int error = 0, locked;
2827 if (ioc->buf_size > 0) {
2828 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2829 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2831 device_printf(sc->mfi_dev, "failed to copyin\n");
2832 kfree(ioc_buf, M_MFIBUF);
2837 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2839 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2840 while ((cm = mfi_dequeue_free(sc)) == NULL)
2841 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2843 /* Save context for later */
2844 context = cm->cm_frame->header.context;
2846 dcmd = &cm->cm_frame->dcmd;
2847 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2849 cm->cm_sg = &dcmd->sgl;
2850 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2851 cm->cm_data = ioc_buf;
2852 cm->cm_len = ioc->buf_size;
2854 /* restore context */
2855 cm->cm_frame->header.context = context;
2857 /* Cheat since we don't know if we're writing or reading */
2858 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2860 error = mfi_check_command_pre(sc, cm);
2864 error = mfi_wait_command(sc, cm);
2866 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2869 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2870 mfi_check_command_post(sc, cm);
2872 mfi_release_command(cm);
2873 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2874 mfi_config_unlock(sc, locked);
2875 if (ioc->buf_size > 0)
2876 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2878 kfree(ioc_buf, M_MFIBUF);
2882 #define PTRIN(p) ((void *)(uintptr_t)(p))
2885 mfi_ioctl(struct dev_ioctl_args *ap)
2887 cdev_t dev = ap->a_head.a_dev;
2888 u_long cmd = ap->a_cmd;
2889 int flag = ap->a_fflag;
2890 caddr_t arg = ap->a_data;
2891 struct mfi_softc *sc;
2892 union mfi_statrequest *ms;
2893 struct mfi_ioc_packet *ioc;
2894 struct mfi_ioc_aen *aen;
2895 struct mfi_command *cm = NULL;
2897 union mfi_sense_ptr sense_ptr;
2898 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2901 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2910 if (sc->hw_crit_error)
2913 if (sc->issuepend_done == 0)
2918 ms = (union mfi_statrequest *)arg;
2919 switch (ms->ms_item) {
2924 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2925 sizeof(struct mfi_qstat));
2932 case MFIIO_QUERY_DISK:
2934 struct mfi_query_disk *qd;
2935 struct mfi_disk *ld;
2937 qd = (struct mfi_query_disk *)arg;
2938 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2939 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2940 if (ld->ld_id == qd->array_id)
2945 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2949 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2951 bzero(qd->devname, SPECNAMELEN + 1);
2952 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2953 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2958 devclass_t devclass;
2959 ioc = (struct mfi_ioc_packet *)arg;
2962 adapter = ioc->mfi_adapter_no;
2963 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2964 devclass = devclass_find("mfi");
2965 sc = devclass_get_softc(devclass, adapter);
2967 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2968 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2969 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2972 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2976 * save off original context since copying from user
2977 * will clobber some data
2979 context = cm->cm_frame->header.context;
2980 cm->cm_frame->header.context = cm->cm_index;
2982 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2983 2 * MEGAMFI_FRAME_SIZE);
2984 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2985 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2986 cm->cm_frame->header.scsi_status = 0;
2987 cm->cm_frame->header.pad0 = 0;
2988 if (ioc->mfi_sge_count) {
2990 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2993 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2994 cm->cm_flags |= MFI_CMD_DATAIN;
2995 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2996 cm->cm_flags |= MFI_CMD_DATAOUT;
2997 /* Legacy app shim */
2998 if (cm->cm_flags == 0)
2999 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3000 cm->cm_len = cm->cm_frame->header.data_len;
3001 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3002 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3003 cm->cm_len += cm->cm_stp_len;
3006 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3007 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3013 /* restore header context */
3014 cm->cm_frame->header.context = context;
3016 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3017 res = mfi_stp_cmd(sc, cm, arg);
3022 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3023 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3024 for (i = 0; i < ioc->mfi_sge_count; i++) {
3025 addr = ioc->mfi_sgl[i].iov_base;
3026 len = ioc->mfi_sgl[i].iov_len;
3027 error = copyin(addr, temp, len);
3029 device_printf(sc->mfi_dev,
3030 "Copy in failed\n");
3038 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3039 locked = mfi_config_lock(sc,
3040 cm->cm_frame->dcmd.opcode);
3042 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3043 cm->cm_frame->pass.sense_addr_lo =
3044 (uint32_t)cm->cm_sense_busaddr;
3045 cm->cm_frame->pass.sense_addr_hi =
3046 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3048 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3049 skip_pre_post = mfi_check_for_sscd(sc, cm);
3050 if (!skip_pre_post) {
3051 error = mfi_check_command_pre(sc, cm);
3053 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3058 if ((error = mfi_wait_command(sc, cm)) != 0) {
3059 device_printf(sc->mfi_dev,
3060 "Controller polled failed\n");
3061 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3066 mfi_check_command_post(sc, cm);
3067 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3069 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3071 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3072 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3073 for (i = 0; i < ioc->mfi_sge_count; i++) {
3074 addr = ioc->mfi_sgl[i].iov_base;
3075 len = ioc->mfi_sgl[i].iov_len;
3076 error = copyout(temp, addr, len);
3078 device_printf(sc->mfi_dev,
3079 "Copy out failed\n");
3087 if (ioc->mfi_sense_len) {
3088 /* get user-space sense ptr then copy out sense */
3089 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3090 &sense_ptr.sense_ptr_data[0],
3091 sizeof(sense_ptr.sense_ptr_data));
3092 error = copyout(cm->cm_sense, sense_ptr.user_space,
3093 ioc->mfi_sense_len);
3095 device_printf(sc->mfi_dev,
3096 "Copy out failed\n");
3101 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3103 mfi_config_unlock(sc, locked);
3105 kfree(data, M_MFIBUF);
3106 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3107 for (i = 0; i < 2; i++) {
3108 if (sc->kbuff_arr[i]) {
3109 if (sc->mfi_kbuff_arr_busaddr != 0)
3111 sc->mfi_kbuff_arr_dmat[i],
3112 sc->mfi_kbuff_arr_dmamap[i]
3114 if (sc->kbuff_arr[i] != NULL)
3116 sc->mfi_kbuff_arr_dmat[i],
3118 sc->mfi_kbuff_arr_dmamap[i]
3120 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3121 bus_dma_tag_destroy(
3122 sc->mfi_kbuff_arr_dmat[i]);
3127 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3128 mfi_release_command(cm);
3129 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3135 aen = (struct mfi_ioc_aen *)arg;
3136 error = mfi_aen_register(sc, aen->aen_seq_num,
3137 aen->aen_class_locale);
3140 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3142 devclass_t devclass;
3143 struct mfi_linux_ioc_packet l_ioc;
3146 devclass = devclass_find("mfi");
3147 if (devclass == NULL)
3150 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3153 adapter = l_ioc.lioc_adapter_no;
3154 sc = devclass_get_softc(devclass, adapter);
3157 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3161 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3163 devclass_t devclass;
3164 struct mfi_linux_ioc_aen l_aen;
3167 devclass = devclass_find("mfi");
3168 if (devclass == NULL)
3171 error = copyin(arg, &l_aen, sizeof(l_aen));
3174 adapter = l_aen.laen_adapter_no;
3175 sc = devclass_get_softc(devclass, adapter);
3178 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3182 case MFIIO_PASSTHRU:
3183 error = mfi_user_command(sc, iop);
3186 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3195 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3197 struct mfi_softc *sc;
3198 struct mfi_linux_ioc_packet l_ioc;
3199 struct mfi_linux_ioc_aen l_aen;
3200 struct mfi_command *cm = NULL;
3201 struct mfi_aen *mfi_aen_entry;
3202 union mfi_sense_ptr sense_ptr;
3204 uint8_t *data = NULL, *temp;
3211 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3212 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3216 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3220 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3221 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3222 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3225 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3229 * save off original context since copying from user
3230 * will clobber some data
3232 context = cm->cm_frame->header.context;
3234 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3235 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3236 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3237 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3238 cm->cm_frame->header.scsi_status = 0;
3239 cm->cm_frame->header.pad0 = 0;
3240 if (l_ioc.lioc_sge_count)
3242 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3244 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3245 cm->cm_flags |= MFI_CMD_DATAIN;
3246 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3247 cm->cm_flags |= MFI_CMD_DATAOUT;
3248 cm->cm_len = cm->cm_frame->header.data_len;
3250 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3251 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3257 /* restore header context */
3258 cm->cm_frame->header.context = context;
3261 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3262 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3263 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3265 l_ioc.lioc_sgl[i].iov_len);
3267 device_printf(sc->mfi_dev,
3268 "Copy in failed\n");
3271 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3275 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3276 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3278 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3279 cm->cm_frame->pass.sense_addr_lo =
3280 (uint32_t)cm->cm_sense_busaddr;
3281 cm->cm_frame->pass.sense_addr_hi =
3282 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3285 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3286 error = mfi_check_command_pre(sc, cm);
3288 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3292 if ((error = mfi_wait_command(sc, cm)) != 0) {
3293 device_printf(sc->mfi_dev,
3294 "Controller polled failed\n");
3295 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3299 mfi_check_command_post(sc, cm);
3300 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3303 if (cm->cm_flags & MFI_CMD_DATAIN) {
3304 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3305 error = copyout(temp,
3306 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3307 l_ioc.lioc_sgl[i].iov_len);
3309 device_printf(sc->mfi_dev,
3310 "Copy out failed\n");
3313 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3317 if (l_ioc.lioc_sense_len) {
3318 /* get user-space sense ptr then copy out sense */
3319 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3320 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3321 &sense_ptr.sense_ptr_data[0],
3322 sizeof(sense_ptr.sense_ptr_data));
3325 * only 32bit Linux support so zero out any
3326 * address over 32bit
3328 sense_ptr.addr.high = 0;
3330 error = copyout(cm->cm_sense, sense_ptr.user_space,
3331 l_ioc.lioc_sense_len);
3333 device_printf(sc->mfi_dev,
3334 "Copy out failed\n");
3339 error = copyout(&cm->cm_frame->header.cmd_status,
3340 &((struct mfi_linux_ioc_packet*)arg)
3341 ->lioc_frame.hdr.cmd_status,
3344 device_printf(sc->mfi_dev,
3345 "Copy out failed\n");
3350 mfi_config_unlock(sc, locked);
3352 kfree(data, M_MFIBUF);
3354 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3355 mfi_release_command(cm);
3356 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3360 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3361 error = copyin(arg, &l_aen, sizeof(l_aen));
3364 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3365 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3367 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3368 if (mfi_aen_entry != NULL) {
3369 mfi_aen_entry->p = curproc;
3370 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3373 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3374 l_aen.laen_class_locale);
3377 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3379 kfree(mfi_aen_entry, M_MFIBUF);
3381 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3385 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3394 mfi_kqfilter(struct dev_kqfilter_args *ap)
3396 cdev_t dev = ap->a_head.a_dev;
3397 struct knote *kn = ap->a_kn;
3398 struct mfi_softc *sc;
3399 struct klist *klist;
3404 switch (kn->kn_filter) {
3406 kn->kn_fop = &mfi_read_filterops;
3407 kn->kn_hook = (caddr_t)sc;
3410 kn->kn_fop = &mfi_write_filterops;
3411 kn->kn_hook = (caddr_t)sc;
3414 ap->a_result = EOPNOTSUPP;
3418 klist = &sc->mfi_kq.ki_note;
3419 knote_insert(klist, kn);
3425 mfi_filter_detach(struct knote *kn)
3427 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3428 struct klist *klist = &sc->mfi_kq.ki_note;
3430 knote_remove(klist, kn);
3434 mfi_filter_read(struct knote *kn, long hint)
3436 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3439 if (sc->mfi_aen_triggered != 0) {
3441 sc->mfi_aen_triggered = 0;
3443 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3444 kn->kn_flags |= EV_ERROR;
3447 sc->mfi_poll_waiting = 1;
3453 mfi_filter_write(struct knote *kn, long hint)
3461 struct mfi_softc *sc;
3462 struct mfi_command *cm;
3468 dc = devclass_find("mfi");
3470 kprintf("No mfi dev class\n");
3474 for (i = 0; ; i++) {
3475 sc = devclass_get_softc(dc, i);
3478 device_printf(sc->mfi_dev, "Dumping\n\n");
3480 deadline = time_uptime - mfi_cmd_timeout;
3481 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3482 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3483 if (cm->cm_timestamp < deadline) {
3484 device_printf(sc->mfi_dev,
3485 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3486 cm, (int)(time_uptime - cm->cm_timestamp));
3497 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3504 mfi_timeout(void *data)
3506 struct mfi_softc *sc = (struct mfi_softc *)data;
3507 struct mfi_command *cm;
3511 deadline = time_uptime - mfi_cmd_timeout;
3512 if (sc->adpreset == 0) {
3513 if (!mfi_tbolt_reset(sc)) {
3514 callout_reset(&sc->mfi_watchdog_callout,
3515 mfi_cmd_timeout * hz, mfi_timeout, sc);
3519 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3520 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3521 if (sc->mfi_aen_cm == cm)
3523 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3524 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3525 cm->cm_timestamp = time_uptime;
3527 device_printf(sc->mfi_dev,
3528 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3529 cm, (int)(time_uptime - cm->cm_timestamp));
3531 MFI_VALIDATE_CMD(sc, cm);
3542 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3544 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,