2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53 * FreeBSD projects/head_mfi/ r233016
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
64 #include <sys/eventhandler.h>
66 #include <sys/bus_dma.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
75 #include <bus/cam/scsi/scsi_all.h>
77 #include <bus/pci/pcivar.h>
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
83 static int mfi_alloc_commands(struct mfi_softc *);
84 static int mfi_comms_init(struct mfi_softc *);
85 static int mfi_get_controller_info(struct mfi_softc *);
86 static int mfi_get_log_state(struct mfi_softc *,
87 struct mfi_evt_log_state **);
88 static int mfi_parse_entries(struct mfi_softc *, int, int);
89 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 uint32_t, void **, size_t);
91 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void mfi_startup(void *arg);
93 static void mfi_intr(void *arg);
94 static void mfi_ldprobe(struct mfi_softc *sc);
95 static void mfi_syspdprobe(struct mfi_softc *sc);
96 static void mfi_handle_evt(void *context, int pending);
97 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void mfi_aen_complete(struct mfi_command *);
99 static int mfi_add_ld(struct mfi_softc *sc, int);
100 static void mfi_add_ld_complete(struct mfi_command *);
101 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void mfi_timeout(void *);
111 static int mfi_user_command(struct mfi_softc *,
112 struct mfi_ioc_passthru *);
113 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_filter_detach(struct knote *);
130 static int mfi_filter_read(struct knote *, long);
131 static int mfi_filter_write(struct knote *, long);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_kqfilter_t mfi_kqfilter;
165 static struct dev_ops mfi_ops = {
166 { "mfi", 0, D_MPSAFE },
168 .d_close = mfi_close,
169 .d_ioctl = mfi_ioctl,
170 .d_kqfilter = mfi_kqfilter,
173 static struct filterops mfi_read_filterops =
174 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
186 MFI_WRITE4(sc, MFI_OMSK, 0x01);
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
192 if (sc->mfi_flags & MFI_FLAGS_1078) {
193 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
201 panic("unknown adapter type");
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
208 return MFI_READ4(sc, MFI_OMSG0);
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
214 return MFI_READ4(sc, MFI_OSP0);
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
222 status = MFI_READ4(sc, MFI_OSTS);
223 if ((status & MFI_OSTS_INTR_VALID) == 0)
226 MFI_WRITE4(sc, MFI_OSTS, status);
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
235 status = MFI_READ4(sc, MFI_OSTS);
236 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
241 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 MFI_WRITE4(sc, MFI_OSTS, status);
244 MFI_WRITE4(sc, MFI_ODCR0, status);
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
266 mfi_transition_firmware(struct mfi_softc *sc)
268 uint32_t fw_state, cur_state;
270 uint32_t cur_abs_reg_val = 0;
271 uint32_t prev_abs_reg_val = 0;
273 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 while (fw_state != MFI_FWSTATE_READY) {
277 device_printf(sc->mfi_dev, "Waiting for firmware to "
279 cur_state = fw_state;
281 case MFI_FWSTATE_FAULT:
282 device_printf(sc->mfi_dev, "Firmware fault\n");
284 case MFI_FWSTATE_WAIT_HANDSHAKE:
285 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 max_wait = MFI_RESET_WAIT_TIME;
291 case MFI_FWSTATE_OPERATIONAL:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_UNDEFINED:
299 case MFI_FWSTATE_BB_INIT:
300 max_wait = MFI_RESET_WAIT_TIME;
302 case MFI_FWSTATE_FW_INIT_2:
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_FW_INIT:
306 case MFI_FWSTATE_FLUSH_CACHE:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_DEVICE_SCAN:
310 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 prev_abs_reg_val = cur_abs_reg_val;
313 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 max_wait = MFI_RESET_WAIT_TIME;
321 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
325 for (i = 0; i < (max_wait * 10); i++) {
326 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 if (fw_state == cur_state)
333 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 /* Check the device scanning progress */
335 if (prev_abs_reg_val != cur_abs_reg_val)
338 if (fw_state == cur_state) {
339 device_printf(sc->mfi_dev, "Firmware stuck in state "
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
353 *addr = segs[0].ds_addr;
357 mfi_attach(struct mfi_softc *sc)
360 int error, commsz, framessz, sensesz;
361 int frames, unit, max_fw_sge;
362 uint32_t tb_mem_size = 0;
367 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
370 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 TAILQ_INIT(&sc->mfi_ld_tqh);
373 TAILQ_INIT(&sc->mfi_syspd_tqh);
374 TAILQ_INIT(&sc->mfi_evt_queue);
375 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 TAILQ_INIT(&sc->mfi_aen_pids);
377 TAILQ_INIT(&sc->mfi_cam_ccbq);
385 sc->last_seq_num = 0;
386 sc->disableOnlineCtrlReset = 1;
387 sc->issuepend_done = 1;
388 sc->hw_crit_error = 0;
390 if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
403 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
405 sc->mfi_enable_intr = mfi_enable_intr_ppc;
406 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
412 /* Before we get too far, see if the firmware is working */
413 if ((error = mfi_transition_firmware(sc)) != 0) {
414 device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 "error %d\n", error);
419 /* Start: LSIP200113393 */
420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
421 1, 0, /* algnmnt, boundary */
422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
426 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
428 &sc->verbuf_h_dmat)) {
429 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
432 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
433 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
434 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
437 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
438 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
439 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
440 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
441 /* End: LSIP200113393 */
444 * Get information needed for sizing the contiguous memory for the
445 * frame pool. Size down the sgl parameter since we know that
446 * we will never need more than what's required for MAXPHYS.
447 * It would be nice if these constants were available at runtime
448 * instead of compile time.
450 status = sc->mfi_read_fw_status(sc);
451 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
452 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
453 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
455 /* ThunderBolt Support get the contiguous memory */
457 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
458 mfi_tbolt_init_globals(sc);
459 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
460 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
461 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
463 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
464 1, 0, /* algnmnt, boundary */
465 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
466 BUS_SPACE_MAXADDR, /* highaddr */
467 tb_mem_size, /* maxsize */
469 tb_mem_size, /* maxsegsize */
472 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
475 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
476 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
477 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
480 bzero(sc->request_message_pool, tb_mem_size);
481 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
482 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
484 /* For ThunderBolt memory init */
485 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
486 0x100, 0, /* alignmnt, boundary */
487 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 BUS_SPACE_MAXADDR, /* highaddr */
489 MFI_FRAME_SIZE, /* maxsize */
491 MFI_FRAME_SIZE, /* maxsegsize */
493 &sc->mfi_tb_init_dmat)) {
494 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
497 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
498 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
499 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
502 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
503 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
504 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
505 &sc->mfi_tb_init_busaddr, 0);
506 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
508 device_printf(sc->mfi_dev,
509 "Thunderbolt pool preparation error\n");
514 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
515 we are taking it diffrent from what we have allocated for Request
516 and reply descriptors to avoid confusion later
518 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
519 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
520 1, 0, /* algnmnt, boundary */
521 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
522 BUS_SPACE_MAXADDR, /* highaddr */
523 tb_mem_size, /* maxsize */
525 tb_mem_size, /* maxsegsize */
527 &sc->mfi_tb_ioc_init_dmat)) {
528 device_printf(sc->mfi_dev,
529 "Cannot allocate comms DMA tag\n");
532 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
533 (void **)&sc->mfi_tb_ioc_init_desc,
534 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
535 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
538 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
539 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
540 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
541 &sc->mfi_tb_ioc_init_busaddr, 0);
544 * Create the dma tag for data buffers. Used both for block I/O
545 * and for various internal data queries.
547 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
548 1, 0, /* algnmnt, boundary */
549 BUS_SPACE_MAXADDR, /* lowaddr */
550 BUS_SPACE_MAXADDR, /* highaddr */
551 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
552 sc->mfi_max_sge, /* nsegments */
553 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
554 BUS_DMA_ALLOCNOW, /* flags */
555 &sc->mfi_buffer_dmat)) {
556 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
561 * Allocate DMA memory for the comms queues. Keep it under 4GB for
562 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
563 * entry, so the calculated size here will be will be 1 more than
564 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
566 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
567 sizeof(struct mfi_hwcomms);
568 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
569 1, 0, /* algnmnt, boundary */
570 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
571 BUS_SPACE_MAXADDR, /* highaddr */
572 commsz, /* maxsize */
574 commsz, /* maxsegsize */
576 &sc->mfi_comms_dmat)) {
577 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
580 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
581 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
582 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
585 bzero(sc->mfi_comms, commsz);
586 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
587 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
589 * Allocate DMA memory for the command frames. Keep them in the
590 * lower 4GB for efficiency. Calculate the size of the commands at
591 * the same time; each command is one 64 byte frame plus a set of
592 * additional frames for holding sg lists or other data.
593 * The assumption here is that the SG list will start at the second
594 * frame and not use the unused bytes in the first frame. While this
595 * isn't technically correct, it simplifies the calculation and allows
596 * for command frames that might be larger than an mfi_io_frame.
598 if (sizeof(bus_addr_t) == 8) {
599 sc->mfi_sge_size = sizeof(struct mfi_sg64);
600 sc->mfi_flags |= MFI_FLAGS_SG64;
602 sc->mfi_sge_size = sizeof(struct mfi_sg32);
604 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
605 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
606 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
607 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
608 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
609 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
610 64, 0, /* algnmnt, boundary */
611 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
612 BUS_SPACE_MAXADDR, /* highaddr */
613 framessz, /* maxsize */
615 framessz, /* maxsegsize */
617 &sc->mfi_frames_dmat)) {
618 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
621 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
622 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
623 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
626 bzero(sc->mfi_frames, framessz);
627 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
628 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
630 * Allocate DMA memory for the frame sense data. Keep them in the
631 * lower 4GB for efficiency
633 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
634 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
635 4, 0, /* algnmnt, boundary */
636 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
637 BUS_SPACE_MAXADDR, /* highaddr */
638 sensesz, /* maxsize */
640 sensesz, /* maxsegsize */
642 &sc->mfi_sense_dmat)) {
643 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
646 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
647 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
648 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
651 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
652 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
653 if ((error = mfi_alloc_commands(sc)) != 0)
657 * Before moving the FW to operational state, check whether
658 * hostmemory is required by the FW or not
661 /* ThunderBolt MFI_IOC2 INIT */
662 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
663 sc->mfi_disable_intr(sc);
664 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
665 device_printf(sc->mfi_dev,
666 "TB Init has failed with error %d\n",error);
670 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
672 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
673 mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
674 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
677 sc->mfi_enable_intr(sc);
680 if ((error = mfi_comms_init(sc)) != 0)
683 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
684 mfi_intr, sc, &sc->mfi_intr, NULL)) {
685 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
688 sc->mfi_enable_intr(sc);
690 if ((error = mfi_get_controller_info(sc)) != 0)
692 sc->disableOnlineCtrlReset = 0;
694 /* Register a config hook to probe the bus for arrays */
695 sc->mfi_ich.ich_func = mfi_startup;
696 sc->mfi_ich.ich_arg = sc;
697 sc->mfi_ich.ich_desc = "mfi";
698 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
699 device_printf(sc->mfi_dev, "Cannot establish configuration "
703 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
704 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
709 * Register a shutdown handler.
711 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
712 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
713 device_printf(sc->mfi_dev, "Warning: shutdown event "
714 "registration failed\n");
718 * Create the control device for doing management
720 unit = device_get_unit(sc->mfi_dev);
721 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
722 0640, "mfi%d", unit);
724 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
725 if (sc->mfi_cdev != NULL)
726 sc->mfi_cdev->si_drv1 = sc;
727 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
728 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
729 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
730 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
731 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
732 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
733 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
734 &sc->mfi_keep_deleted_volumes, 0,
735 "Don't detach the mfid device for a busy volume that is deleted");
737 device_add_child(sc->mfi_dev, "mfip", -1);
738 bus_generic_attach(sc->mfi_dev);
740 /* Start the timeout watchdog */
741 callout_init_mp(&sc->mfi_watchdog_callout);
742 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
749 mfi_alloc_commands(struct mfi_softc *sc)
751 struct mfi_command *cm;
755 * XXX Should we allocate all the commands up front, or allocate on
756 * demand later like 'aac' does?
758 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
760 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
761 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
763 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
766 for (i = 0; i < ncmds; i++) {
767 cm = &sc->mfi_commands[i];
768 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
769 sc->mfi_cmd_size * i);
770 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
771 sc->mfi_cmd_size * i;
772 cm->cm_frame->header.context = i;
773 cm->cm_sense = &sc->mfi_sense[i];
774 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
777 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
778 &cm->cm_dmamap) == 0) {
779 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
780 mfi_release_command(cm);
781 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
785 sc->mfi_total_cmds++;
792 mfi_release_command(struct mfi_command *cm)
794 struct mfi_frame_header *hdr;
797 mfi_lockassert(&cm->cm_sc->mfi_io_lock);
800 * Zero out the important fields of the frame, but make sure the
801 * context field is preserved. For efficiency, handle the fields
802 * as 32 bit words. Clear out the first S/G entry too for safety.
804 hdr = &cm->cm_frame->header;
805 if (cm->cm_data != NULL && hdr->sg_count) {
806 cm->cm_sg->sg32[0].len = 0;
807 cm->cm_sg->sg32[0].addr = 0;
810 hdr_data = (uint32_t *)cm->cm_frame;
811 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
812 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
813 hdr_data[4] = 0; /* flags, timeout */
814 hdr_data[5] = 0; /* data_len */
816 cm->cm_extra_frames = 0;
818 cm->cm_complete = NULL;
819 cm->cm_private = NULL;
822 cm->cm_total_frame_size = 0;
823 cm->retry_for_fw_reset = 0;
825 mfi_enqueue_free(cm);
829 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
830 uint32_t opcode, void **bufp, size_t bufsize)
832 struct mfi_command *cm;
833 struct mfi_dcmd_frame *dcmd;
835 uint32_t context = 0;
837 mfi_lockassert(&sc->mfi_io_lock);
839 cm = mfi_dequeue_free(sc);
843 /* Zero out the MFI frame */
844 context = cm->cm_frame->header.context;
845 bzero(cm->cm_frame, sizeof(union mfi_frame));
846 cm->cm_frame->header.context = context;
848 if ((bufsize > 0) && (bufp != NULL)) {
850 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
852 mfi_release_command(cm);
861 dcmd = &cm->cm_frame->dcmd;
862 bzero(dcmd->mbox, MFI_MBOX_SIZE);
863 dcmd->header.cmd = MFI_CMD_DCMD;
864 dcmd->header.timeout = 0;
865 dcmd->header.flags = 0;
866 dcmd->header.data_len = bufsize;
867 dcmd->header.scsi_status = 0;
868 dcmd->opcode = opcode;
869 cm->cm_sg = &dcmd->sgl;
870 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
873 cm->cm_private = buf;
874 cm->cm_len = bufsize;
877 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
883 mfi_comms_init(struct mfi_softc *sc)
885 struct mfi_command *cm;
886 struct mfi_init_frame *init;
887 struct mfi_init_qinfo *qinfo;
889 uint32_t context = 0;
891 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
892 if ((cm = mfi_dequeue_free(sc)) == NULL) {
893 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
897 /* Zero out the MFI frame */
898 context = cm->cm_frame->header.context;
899 bzero(cm->cm_frame, sizeof(union mfi_frame));
900 cm->cm_frame->header.context = context;
903 * Abuse the SG list area of the frame to hold the init_qinfo
906 init = &cm->cm_frame->init;
907 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
909 bzero(qinfo, sizeof(struct mfi_init_qinfo));
910 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
911 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
912 offsetof(struct mfi_hwcomms, hw_reply_q);
913 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
914 offsetof(struct mfi_hwcomms, hw_pi);
915 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
916 offsetof(struct mfi_hwcomms, hw_ci);
918 init->header.cmd = MFI_CMD_INIT;
919 init->header.data_len = sizeof(struct mfi_init_qinfo);
920 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
922 cm->cm_flags = MFI_CMD_POLLED;
924 if ((error = mfi_mapcmd(sc, cm)) != 0) {
925 device_printf(sc->mfi_dev, "failed to send init command\n");
926 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
929 mfi_release_command(cm);
930 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
936 mfi_get_controller_info(struct mfi_softc *sc)
938 struct mfi_command *cm = NULL;
939 struct mfi_ctrl_info *ci = NULL;
940 uint32_t max_sectors_1, max_sectors_2;
943 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
944 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
945 (void **)&ci, sizeof(*ci));
948 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
950 if ((error = mfi_mapcmd(sc, cm)) != 0) {
951 device_printf(sc->mfi_dev, "Failed to get controller info\n");
952 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
958 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
959 BUS_DMASYNC_POSTREAD);
960 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
962 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
963 max_sectors_2 = ci->max_request_size;
964 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
965 sc->disableOnlineCtrlReset =
966 ci->properties.OnOffProperties.disableOnlineCtrlReset;
972 mfi_release_command(cm);
973 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
978 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
980 struct mfi_command *cm = NULL;
983 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
984 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
985 (void **)log_state, sizeof(**log_state));
988 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
990 if ((error = mfi_mapcmd(sc, cm)) != 0) {
991 device_printf(sc->mfi_dev, "Failed to get log state\n");
995 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
996 BUS_DMASYNC_POSTREAD);
997 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1001 mfi_release_command(cm);
1002 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1008 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1010 struct mfi_evt_log_state *log_state = NULL;
1011 union mfi_evt class_locale;
1015 class_locale.members.reserved = 0;
1016 class_locale.members.locale = mfi_event_locale;
1017 class_locale.members.evt_class = mfi_event_class;
1019 if (seq_start == 0) {
1020 error = mfi_get_log_state(sc, &log_state);
1021 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1024 kfree(log_state, M_MFIBUF);
1029 * Walk through any events that fired since the last
1032 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1033 log_state->newest_seq_num);
1034 seq = log_state->newest_seq_num;
1037 mfi_aen_register(sc, seq, class_locale.word);
1038 if (log_state != NULL)
1039 kfree(log_state, M_MFIBUF);
1045 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1048 mfi_lockassert(&sc->mfi_io_lock);
1049 cm->cm_complete = NULL;
1053 * MegaCli can issue a DCMD of 0. In this case do nothing
1054 * and return 0 to it as status
1056 if (cm->cm_frame->dcmd.opcode == 0) {
1057 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1059 return (cm->cm_error);
1061 mfi_enqueue_ready(cm);
1063 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1064 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1065 return (cm->cm_error);
1069 mfi_free(struct mfi_softc *sc)
1071 struct mfi_command *cm;
1074 callout_terminate(&sc->mfi_watchdog_callout);
1076 if (sc->mfi_cdev != NULL)
1077 destroy_dev(sc->mfi_cdev);
1078 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1080 if (sc->mfi_total_cmds != 0) {
1081 for (i = 0; i < sc->mfi_total_cmds; i++) {
1082 cm = &sc->mfi_commands[i];
1083 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1085 kfree(sc->mfi_commands, M_MFIBUF);
1089 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1090 if (sc->mfi_irq != NULL)
1091 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1094 if (sc->mfi_sense_busaddr != 0)
1095 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1096 if (sc->mfi_sense != NULL)
1097 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1098 sc->mfi_sense_dmamap);
1099 if (sc->mfi_sense_dmat != NULL)
1100 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1102 if (sc->mfi_frames_busaddr != 0)
1103 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1104 if (sc->mfi_frames != NULL)
1105 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1106 sc->mfi_frames_dmamap);
1107 if (sc->mfi_frames_dmat != NULL)
1108 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1110 if (sc->mfi_comms_busaddr != 0)
1111 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1112 if (sc->mfi_comms != NULL)
1113 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1114 sc->mfi_comms_dmamap);
1115 if (sc->mfi_comms_dmat != NULL)
1116 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1118 /* ThunderBolt contiguous memory free here */
1119 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1120 if (sc->mfi_tb_busaddr != 0)
1121 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1122 if (sc->request_message_pool != NULL)
1123 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1125 if (sc->mfi_tb_dmat != NULL)
1126 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1128 /* Version buffer memory free */
1129 /* Start LSIP200113393 */
1130 if (sc->verbuf_h_busaddr != 0)
1131 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1132 if (sc->verbuf != NULL)
1133 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1134 sc->verbuf_h_dmamap);
1135 if (sc->verbuf_h_dmat != NULL)
1136 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1138 /* End LSIP200113393 */
1139 /* ThunderBolt INIT packet memory Free */
1140 if (sc->mfi_tb_init_busaddr != 0)
1141 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1142 if (sc->mfi_tb_init != NULL)
1143 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1144 sc->mfi_tb_init_dmamap);
1145 if (sc->mfi_tb_init_dmat != NULL)
1146 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1148 /* ThunderBolt IOC Init Desc memory free here */
1149 if (sc->mfi_tb_ioc_init_busaddr != 0)
1150 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1151 sc->mfi_tb_ioc_init_dmamap);
1152 if (sc->mfi_tb_ioc_init_desc != NULL)
1153 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1154 sc->mfi_tb_ioc_init_desc,
1155 sc->mfi_tb_ioc_init_dmamap);
1156 if (sc->mfi_tb_ioc_init_dmat != NULL)
1157 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1158 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1159 if (sc->mfi_cmd_pool_tbolt != NULL) {
1160 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1161 kfree(sc->mfi_cmd_pool_tbolt[i],
1163 sc->mfi_cmd_pool_tbolt[i] = NULL;
1167 if (sc->mfi_cmd_pool_tbolt != NULL) {
1168 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1169 sc->mfi_cmd_pool_tbolt = NULL;
1171 if (sc->request_desc_pool != NULL) {
1172 kfree(sc->request_desc_pool, M_MFIBUF);
1173 sc->request_desc_pool = NULL;
1176 if (sc->mfi_buffer_dmat != NULL)
1177 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1178 if (sc->mfi_parent_dmat != NULL)
1179 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1181 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1182 if (mtx_initialized(&sc->mfi_io_lock))
1185 lockuninit(&sc->mfi_io_lock);
1186 lockuninit(&sc->mfi_config_lock);
1193 mfi_startup(void *arg)
1195 struct mfi_softc *sc;
1197 sc = (struct mfi_softc *)arg;
1199 config_intrhook_disestablish(&sc->mfi_ich);
1201 sc->mfi_enable_intr(sc);
1202 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1203 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1205 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1207 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1208 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1214 struct mfi_softc *sc;
1215 struct mfi_command *cm;
1216 uint32_t pi, ci, context;
1218 sc = (struct mfi_softc *)arg;
1220 if (sc->mfi_check_clear_intr(sc))
1224 pi = sc->mfi_comms->hw_pi;
1225 ci = sc->mfi_comms->hw_ci;
1226 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1228 context = sc->mfi_comms->hw_reply_q[ci];
1229 if (context < sc->mfi_max_fw_cmds) {
1230 cm = &sc->mfi_commands[context];
1231 mfi_remove_busy(cm);
1233 mfi_complete(sc, cm);
1235 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1240 sc->mfi_comms->hw_ci = ci;
1242 /* Give defered I/O a chance to run */
1243 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1244 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1246 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1249 * Dummy read to flush the bus; this ensures that the indexes are up
1250 * to date. Restart processing if more commands have come it.
1252 (void)sc->mfi_read_fw_status(sc);
1253 if (pi != sc->mfi_comms->hw_pi)
1260 mfi_shutdown(struct mfi_softc *sc)
1262 struct mfi_dcmd_frame *dcmd;
1263 struct mfi_command *cm;
1266 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1267 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1269 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1273 if (sc->mfi_aen_cm != NULL)
1274 mfi_abort(sc, sc->mfi_aen_cm);
1276 if (sc->map_update_cmd != NULL)
1277 mfi_abort(sc, sc->map_update_cmd);
1279 dcmd = &cm->cm_frame->dcmd;
1280 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1281 cm->cm_flags = MFI_CMD_POLLED;
1284 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1285 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1288 mfi_release_command(cm);
1289 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1294 mfi_syspdprobe(struct mfi_softc *sc)
1296 struct mfi_frame_header *hdr;
1297 struct mfi_command *cm = NULL;
1298 struct mfi_pd_list *pdlist = NULL;
1299 struct mfi_system_pd *syspd, *tmp;
1300 int error, i, found;
1302 mfi_lockassert(&sc->mfi_config_lock);
1303 mfi_lockassert(&sc->mfi_io_lock);
1304 /* Add SYSTEM PD's */
1305 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1306 (void **)&pdlist, sizeof(*pdlist));
1308 device_printf(sc->mfi_dev,
1309 "Error while forming SYSTEM PD list\n");
1313 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1314 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1315 cm->cm_frame->dcmd.mbox[1] = 0;
1316 if (mfi_mapcmd(sc, cm) != 0) {
1317 device_printf(sc->mfi_dev,
1318 "Failed to get syspd device listing\n");
1321 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1322 BUS_DMASYNC_POSTREAD);
1323 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1324 hdr = &cm->cm_frame->header;
1325 if (hdr->cmd_status != MFI_STAT_OK) {
1326 device_printf(sc->mfi_dev,
1327 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1330 /* Get each PD and add it to the system */
1331 for (i = 0; i < pdlist->count; i++) {
1332 if (pdlist->addr[i].device_id ==
1333 pdlist->addr[i].encl_device_id)
1336 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1337 if (syspd->pd_id == pdlist->addr[i].device_id)
1341 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1343 /* Delete SYSPD's whose state has been changed */
1344 TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1346 for (i = 0; i < pdlist->count; i++) {
1347 if (syspd->pd_id == pdlist->addr[i].device_id)
1351 kprintf("DELETE\n");
1352 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1354 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1356 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1361 kfree(pdlist, M_MFIBUF);
1363 mfi_release_command(cm);
1367 mfi_ldprobe(struct mfi_softc *sc)
1369 struct mfi_frame_header *hdr;
1370 struct mfi_command *cm = NULL;
1371 struct mfi_ld_list *list = NULL;
1372 struct mfi_disk *ld;
1375 mfi_lockassert(&sc->mfi_config_lock);
1376 mfi_lockassert(&sc->mfi_io_lock);
1378 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1379 (void **)&list, sizeof(*list));
1383 cm->cm_flags = MFI_CMD_DATAIN;
1384 if (mfi_wait_command(sc, cm) != 0) {
1385 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1389 hdr = &cm->cm_frame->header;
1390 if (hdr->cmd_status != MFI_STAT_OK) {
1391 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1396 for (i = 0; i < list->ld_count; i++) {
1397 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1398 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1401 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1406 kfree(list, M_MFIBUF);
1408 mfi_release_command(cm);
1414 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1415 * the bits in 24-31 are all set, then it is the number of seconds since
1419 format_timestamp(uint32_t timestamp)
1421 static char buffer[32];
1423 if ((timestamp & 0xff000000) == 0xff000000)
1424 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1427 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1432 format_class(int8_t class)
1434 static char buffer[6];
1437 case MFI_EVT_CLASS_DEBUG:
1439 case MFI_EVT_CLASS_PROGRESS:
1440 return ("progress");
1441 case MFI_EVT_CLASS_INFO:
1443 case MFI_EVT_CLASS_WARNING:
1445 case MFI_EVT_CLASS_CRITICAL:
1447 case MFI_EVT_CLASS_FATAL:
1449 case MFI_EVT_CLASS_DEAD:
1452 ksnprintf(buffer, sizeof(buffer), "%d", class);
1458 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1460 struct mfi_system_pd *syspd = NULL;
1462 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1463 format_timestamp(detail->time), detail->evt_class.members.locale,
1464 format_class(detail->evt_class.members.evt_class),
1465 detail->description);
1467 /* Don't act on old AEN's or while shutting down */
1468 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1471 switch (detail->arg_type) {
1472 case MR_EVT_ARGS_NONE:
1473 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1474 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1475 if (mfi_detect_jbod_change) {
1477 * Probe for new SYSPD's and Delete
1480 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1481 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1483 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1484 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1488 case MR_EVT_ARGS_LD_STATE:
1490 * During load time driver reads all the events starting
1491 * from the one that has been logged after shutdown. Avoid
1494 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1496 struct mfi_disk *ld;
1497 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1499 detail->args.ld_state.ld.target_id)
1503 Fix: for kernel panics when SSCD is removed
1504 KASSERT(ld != NULL, ("volume dissappeared"));
1508 device_delete_child(sc->mfi_dev, ld->ld_dev);
1513 case MR_EVT_ARGS_PD:
1514 if (detail->code == MR_EVT_PD_REMOVED) {
1515 if (mfi_detect_jbod_change) {
1517 * If the removed device is a SYSPD then
1520 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1523 detail->args.pd.device_id) {
1525 device_delete_child(
1534 if (detail->code == MR_EVT_PD_INSERTED) {
1535 if (mfi_detect_jbod_change) {
1536 /* Probe for new SYSPD's */
1537 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1538 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1540 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1541 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1549 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1551 struct mfi_evt_queue_elm *elm;
1553 mfi_lockassert(&sc->mfi_io_lock);
1554 elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1557 memcpy(&elm->detail, detail, sizeof(*detail));
1558 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1559 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1563 mfi_handle_evt(void *context, int pending)
1565 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1566 struct mfi_softc *sc;
1567 struct mfi_evt_queue_elm *elm;
1571 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1572 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1573 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1574 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1575 TAILQ_REMOVE(&queue, elm, link);
1576 mfi_decode_evt(sc, &elm->detail);
1577 kfree(elm, M_MFIBUF);
1582 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1584 struct mfi_command *cm;
1585 struct mfi_dcmd_frame *dcmd;
1586 union mfi_evt current_aen, prior_aen;
1587 struct mfi_evt_detail *ed = NULL;
1590 current_aen.word = locale;
1591 if (sc->mfi_aen_cm != NULL) {
1593 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1594 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1595 !((prior_aen.members.locale & current_aen.members.locale)
1596 ^current_aen.members.locale)) {
1599 prior_aen.members.locale |= current_aen.members.locale;
1600 if (prior_aen.members.evt_class
1601 < current_aen.members.evt_class)
1602 current_aen.members.evt_class =
1603 prior_aen.members.evt_class;
1604 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1605 mfi_abort(sc, sc->mfi_aen_cm);
1606 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1610 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1611 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1612 (void **)&ed, sizeof(*ed));
1613 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1618 dcmd = &cm->cm_frame->dcmd;
1619 ((uint32_t *)&dcmd->mbox)[0] = seq;
1620 ((uint32_t *)&dcmd->mbox)[1] = locale;
1621 cm->cm_flags = MFI_CMD_DATAIN;
1622 cm->cm_complete = mfi_aen_complete;
1624 sc->last_seq_num = seq;
1625 sc->mfi_aen_cm = cm;
1627 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1628 mfi_enqueue_ready(cm);
1630 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1637 mfi_aen_complete(struct mfi_command *cm)
1639 struct mfi_frame_header *hdr;
1640 struct mfi_softc *sc;
1641 struct mfi_evt_detail *detail;
1642 struct mfi_aen *mfi_aen_entry, *tmp;
1644 int seq = 0, aborted = 0;
1647 mfi_lockassert(&sc->mfi_io_lock);
1649 hdr = &cm->cm_frame->header;
1651 if (sc->mfi_aen_cm == NULL)
1654 if (sc->mfi_aen_cm->cm_aen_abort ||
1655 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1656 sc->mfi_aen_cm->cm_aen_abort = 0;
1659 sc->mfi_aen_triggered = 1;
1660 if (sc->mfi_poll_waiting) {
1661 sc->mfi_poll_waiting = 0;
1662 KNOTE(&sc->mfi_kq.ki_note, 0);
1664 detail = cm->cm_data;
1665 mfi_queue_evt(sc, detail);
1666 seq = detail->seq + 1;
1667 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1669 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1671 p = mfi_aen_entry->p;
1675 kfree(mfi_aen_entry, M_MFIBUF);
1679 kfree(cm->cm_data, M_MFIBUF);
1680 sc->mfi_aen_cm = NULL;
1681 wakeup(&sc->mfi_aen_cm);
1682 mfi_release_command(cm);
1684 /* set it up again so the driver can catch more events */
1686 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1687 mfi_aen_setup(sc, seq);
1688 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1692 #define MAX_EVENTS 15
1695 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1697 struct mfi_command *cm;
1698 struct mfi_dcmd_frame *dcmd;
1699 struct mfi_evt_list *el;
1700 union mfi_evt class_locale;
1701 int error, i, seq, size;
1703 class_locale.members.reserved = 0;
1704 class_locale.members.locale = mfi_event_locale;
1705 class_locale.members.evt_class = mfi_event_class;
1707 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1709 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1713 for (seq = start_seq;;) {
1714 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1715 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1716 kfree(el, M_MFIBUF);
1717 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1721 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1723 dcmd = &cm->cm_frame->dcmd;
1724 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1725 dcmd->header.cmd = MFI_CMD_DCMD;
1726 dcmd->header.timeout = 0;
1727 dcmd->header.data_len = size;
1728 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1729 ((uint32_t *)&dcmd->mbox)[0] = seq;
1730 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1731 cm->cm_sg = &dcmd->sgl;
1732 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1733 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1737 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1738 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1739 device_printf(sc->mfi_dev,
1740 "Failed to get controller entries\n");
1741 mfi_release_command(cm);
1742 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1746 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1747 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1748 BUS_DMASYNC_POSTREAD);
1749 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1751 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1752 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1753 mfi_release_command(cm);
1754 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1757 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1758 device_printf(sc->mfi_dev,
1759 "Error %d fetching controller entries\n",
1760 dcmd->header.cmd_status);
1761 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1762 mfi_release_command(cm);
1763 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1766 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1767 mfi_release_command(cm);
1768 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1770 for (i = 0; i < el->count; i++) {
1772 * If this event is newer than 'stop_seq' then
1773 * break out of the loop. Note that the log
1774 * is a circular buffer so we have to handle
1775 * the case that our stop point is earlier in
1776 * the buffer than our start point.
1778 if (el->event[i].seq >= stop_seq) {
1779 if (start_seq <= stop_seq)
1781 else if (el->event[i].seq < start_seq)
1784 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1785 mfi_queue_evt(sc, &el->event[i]);
1786 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1788 seq = el->event[el->count - 1].seq + 1;
1791 kfree(el, M_MFIBUF);
1796 mfi_add_ld(struct mfi_softc *sc, int id)
1798 struct mfi_command *cm;
1799 struct mfi_dcmd_frame *dcmd = NULL;
1800 struct mfi_ld_info *ld_info = NULL;
1803 mfi_lockassert(&sc->mfi_io_lock);
1805 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1806 (void **)&ld_info, sizeof(*ld_info));
1808 device_printf(sc->mfi_dev,
1809 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1811 kfree(ld_info, M_MFIBUF);
1814 cm->cm_flags = MFI_CMD_DATAIN;
1815 dcmd = &cm->cm_frame->dcmd;
1817 if (mfi_wait_command(sc, cm) != 0) {
1818 device_printf(sc->mfi_dev,
1819 "Failed to get logical drive: %d\n", id);
1820 kfree(ld_info, M_MFIBUF);
1823 if (ld_info->ld_config.params.isSSCD != 1) {
1824 mfi_add_ld_complete(cm);
1826 mfi_release_command(cm);
1827 if (ld_info) /* SSCD drives ld_info free here */
1828 kfree(ld_info, M_MFIBUF);
1834 mfi_add_ld_complete(struct mfi_command *cm)
1836 struct mfi_frame_header *hdr;
1837 struct mfi_ld_info *ld_info;
1838 struct mfi_softc *sc;
1842 hdr = &cm->cm_frame->header;
1843 ld_info = cm->cm_private;
1845 if (hdr->cmd_status != MFI_STAT_OK) {
1846 kfree(ld_info, M_MFIBUF);
1847 mfi_release_command(cm);
1850 mfi_release_command(cm);
1852 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1854 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1855 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1856 kfree(ld_info, M_MFIBUF);
1858 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1862 device_set_ivars(child, ld_info);
1863 device_set_desc(child, "MFI Logical Disk");
1864 bus_generic_attach(sc->mfi_dev);
1866 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1870 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1872 struct mfi_command *cm;
1873 struct mfi_dcmd_frame *dcmd = NULL;
1874 struct mfi_pd_info *pd_info = NULL;
1877 mfi_lockassert(&sc->mfi_io_lock);
1879 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1880 (void **)&pd_info, sizeof(*pd_info));
1882 device_printf(sc->mfi_dev,
1883 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1886 kfree(pd_info, M_MFIBUF);
1889 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1890 dcmd = &cm->cm_frame->dcmd;
1892 dcmd->header.scsi_status = 0;
1893 dcmd->header.pad0 = 0;
1894 if (mfi_mapcmd(sc, cm) != 0) {
1895 device_printf(sc->mfi_dev,
1896 "Failed to get physical drive info %d\n", id);
1897 kfree(pd_info, M_MFIBUF);
1900 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1901 BUS_DMASYNC_POSTREAD);
1902 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1903 mfi_add_sys_pd_complete(cm);
1908 mfi_add_sys_pd_complete(struct mfi_command *cm)
1910 struct mfi_frame_header *hdr;
1911 struct mfi_pd_info *pd_info;
1912 struct mfi_softc *sc;
1916 hdr = &cm->cm_frame->header;
1917 pd_info = cm->cm_private;
1919 if (hdr->cmd_status != MFI_STAT_OK) {
1920 kfree(pd_info, M_MFIBUF);
1921 mfi_release_command(cm);
1924 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1925 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1926 pd_info->ref.v.device_id);
1927 kfree(pd_info, M_MFIBUF);
1928 mfi_release_command(cm);
1931 mfi_release_command(cm);
1933 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1935 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1936 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1937 kfree(pd_info, M_MFIBUF);
1939 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1943 device_set_ivars(child, pd_info);
1944 device_set_desc(child, "MFI System PD");
1945 bus_generic_attach(sc->mfi_dev);
1947 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1950 static struct mfi_command *
1951 mfi_bio_command(struct mfi_softc *sc)
1954 struct mfi_command *cm = NULL;
1955 struct mfi_disk *mfid;
1957 /* reserving two commands to avoid starvation for IOCTL */
1958 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1960 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1962 mfid = bio->bio_driver_info;
1963 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1964 cm = mfi_build_syspdio(sc, bio);
1966 cm = mfi_build_ldio(sc, bio);
1968 mfi_enqueue_bio(sc, bio);
1972 static struct mfi_command *
1973 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1975 struct mfi_command *cm;
1977 struct mfi_system_pd *disk;
1978 struct mfi_pass_frame *pass;
1979 int flags = 0, blkcount = 0;
1980 uint32_t context = 0;
1982 if ((cm = mfi_dequeue_free(sc)) == NULL)
1985 /* Zero out the MFI frame */
1986 context = cm->cm_frame->header.context;
1987 bzero(cm->cm_frame, sizeof(union mfi_frame));
1988 cm->cm_frame->header.context = context;
1990 pass = &cm->cm_frame->pass;
1991 bzero(pass->cdb, 16);
1992 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1993 switch (bp->b_cmd & 0x03) {
1995 pass->cdb[0] = READ_10;
1996 flags = MFI_CMD_DATAIN;
1999 pass->cdb[0] = WRITE_10;
2000 flags = MFI_CMD_DATAOUT;
2003 panic("Invalid bio command");
2006 /* Cheat with the sector length to avoid a non-constant division */
2007 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2008 disk = bio->bio_driver_info;
2009 /* Fill the LBA and Transfer length in CDB */
2010 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2011 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2012 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2013 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2014 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2015 pass->cdb[8] = (blkcount & 0x00ff);
2016 pass->header.target_id = disk->pd_id;
2017 pass->header.timeout = 0;
2018 pass->header.flags = 0;
2019 pass->header.scsi_status = 0;
2020 pass->header.sense_len = MFI_SENSE_LEN;
2021 pass->header.data_len = bp->b_bcount;
2022 pass->header.cdb_len = 10;
2023 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2024 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2025 cm->cm_complete = mfi_bio_complete;
2026 cm->cm_private = bio;
2027 cm->cm_data = bp->b_data;
2028 cm->cm_len = bp->b_bcount;
2029 cm->cm_sg = &pass->sgl;
2030 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2031 cm->cm_flags = flags;
2035 static struct mfi_command *
2036 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2038 struct mfi_io_frame *io;
2040 struct mfi_disk *disk;
2041 struct mfi_command *cm;
2042 int flags, blkcount;
2043 uint32_t context = 0;
2045 if ((cm = mfi_dequeue_free(sc)) == NULL)
2048 /* Zero out the MFI frame */
2049 context = cm->cm_frame->header.context;
2050 bzero(cm->cm_frame, sizeof(union mfi_frame));
2051 cm->cm_frame->header.context = context;
2053 io = &cm->cm_frame->io;
2054 switch (bp->b_cmd & 0x03) {
2056 io->header.cmd = MFI_CMD_LD_READ;
2057 flags = MFI_CMD_DATAIN;
2060 io->header.cmd = MFI_CMD_LD_WRITE;
2061 flags = MFI_CMD_DATAOUT;
2064 panic("Invalid bio command");
2067 /* Cheat with the sector length to avoid a non-constant division */
2068 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2069 disk = bio->bio_driver_info;
2070 io->header.target_id = disk->ld_id;
2071 io->header.timeout = 0;
2072 io->header.flags = 0;
2073 io->header.scsi_status = 0;
2074 io->header.sense_len = MFI_SENSE_LEN;
2075 io->header.data_len = blkcount;
2076 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2077 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2078 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2079 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2080 cm->cm_complete = mfi_bio_complete;
2081 cm->cm_private = bio;
2082 cm->cm_data = bp->b_data;
2083 cm->cm_len = bp->b_bcount;
2084 cm->cm_sg = &io->sgl;
2085 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2086 cm->cm_flags = flags;
2091 mfi_bio_complete(struct mfi_command *cm)
2095 struct mfi_frame_header *hdr;
2096 struct mfi_softc *sc;
2098 bio = cm->cm_private;
2100 hdr = &cm->cm_frame->header;
2103 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2104 bp->b_flags |= B_ERROR;
2106 device_printf(sc->mfi_dev, "I/O error, status= %d "
2107 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2108 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2109 } else if (cm->cm_error != 0) {
2110 bp->b_flags |= B_ERROR;
2113 mfi_release_command(cm);
2114 mfi_disk_complete(bio);
2118 mfi_startio(struct mfi_softc *sc)
2120 struct mfi_command *cm;
2121 struct ccb_hdr *ccbh;
2124 /* Don't bother if we're short on resources */
2125 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2128 /* Try a command that has already been prepared */
2129 cm = mfi_dequeue_ready(sc);
2132 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2133 cm = sc->mfi_cam_start(ccbh);
2136 /* Nope, so look for work on the bioq */
2138 cm = mfi_bio_command(sc);
2140 /* No work available, so exit */
2144 /* Send the command to the controller */
2145 if (mfi_mapcmd(sc, cm) != 0) {
2146 mfi_requeue_ready(cm);
2153 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2157 mfi_lockassert(&sc->mfi_io_lock);
2159 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2160 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2161 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2162 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2163 if (error == EINPROGRESS) {
2164 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2168 if (sc->MFA_enabled)
2169 error = mfi_tbolt_send_frame(sc, cm);
2171 error = mfi_send_frame(sc, cm);
2178 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2180 struct mfi_frame_header *hdr;
2181 struct mfi_command *cm;
2183 struct mfi_softc *sc;
2184 int i, j, first, dir;
2186 cm = (struct mfi_command *)arg;
2188 hdr = &cm->cm_frame->header;
2192 kprintf("error %d in callback\n", error);
2193 cm->cm_error = error;
2194 mfi_complete(sc, cm);
2198 /* Use IEEE sgl only for IO's on a SKINNY controller
2199 * For other commands on a SKINNY controller use either
2200 * sg32 or sg64 based on the sizeof(bus_addr_t).
2201 * Also calculate the total frame size based on the type
2204 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2205 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2206 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2207 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2208 for (i = 0; i < nsegs; i++) {
2209 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2210 sgl->sg_skinny[i].len = segs[i].ds_len;
2211 sgl->sg_skinny[i].flag = 0;
2213 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2214 hdr->sg_count = nsegs;
2217 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2218 first = cm->cm_stp_len;
2219 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2220 sgl->sg32[j].addr = segs[0].ds_addr;
2221 sgl->sg32[j++].len = first;
2223 sgl->sg64[j].addr = segs[0].ds_addr;
2224 sgl->sg64[j++].len = first;
2228 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2229 for (i = 0; i < nsegs; i++) {
2230 sgl->sg32[j].addr = segs[i].ds_addr + first;
2231 sgl->sg32[j++].len = segs[i].ds_len - first;
2235 for (i = 0; i < nsegs; i++) {
2236 sgl->sg64[j].addr = segs[i].ds_addr + first;
2237 sgl->sg64[j++].len = segs[i].ds_len - first;
2240 hdr->flags |= MFI_FRAME_SGL64;
2246 if (cm->cm_flags & MFI_CMD_DATAIN) {
2247 dir |= BUS_DMASYNC_PREREAD;
2248 hdr->flags |= MFI_FRAME_DIR_READ;
2250 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2251 dir |= BUS_DMASYNC_PREWRITE;
2252 hdr->flags |= MFI_FRAME_DIR_WRITE;
2254 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2255 cm->cm_flags |= MFI_CMD_MAPPED;
2258 * Instead of calculating the total number of frames in the
2259 * compound frame, it's already assumed that there will be at
2260 * least 1 frame, so don't compensate for the modulo of the
2261 * following division.
2263 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2264 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2266 if (sc->MFA_enabled)
2267 mfi_tbolt_send_frame(sc, cm);
2269 mfi_send_frame(sc, cm);
2273 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2275 struct mfi_frame_header *hdr;
2276 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2278 hdr = &cm->cm_frame->header;
2280 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2281 cm->cm_timestamp = time_uptime;
2282 mfi_enqueue_busy(cm);
2284 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2285 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2289 * The bus address of the command is aligned on a 64 byte boundary,
2290 * leaving the least 6 bits as zero. For whatever reason, the
2291 * hardware wants the address shifted right by three, leaving just
2292 * 3 zero bits. These three bits are then used as a prefetching
2293 * hint for the hardware to predict how many frames need to be
2294 * fetched across the bus. If a command has more than 8 frames
2295 * then the 3 bits are set to 0x7 and the firmware uses other
2296 * information in the command to determine the total amount to fetch.
2297 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2298 * is enough for both 32bit and 64bit systems.
2300 if (cm->cm_extra_frames > 7)
2301 cm->cm_extra_frames = 7;
2303 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2305 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2308 /* This is a polled command, so busy-wait for it to complete. */
2309 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2316 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2317 device_printf(sc->mfi_dev, "Frame %p timed out "
2318 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2326 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2330 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2332 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2333 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2334 dir |= BUS_DMASYNC_POSTREAD;
2335 if (cm->cm_flags & MFI_CMD_DATAOUT)
2336 dir |= BUS_DMASYNC_POSTWRITE;
2338 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2339 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2340 cm->cm_flags &= ~MFI_CMD_MAPPED;
2343 cm->cm_flags |= MFI_CMD_COMPLETED;
2345 if (cm->cm_complete != NULL)
2346 cm->cm_complete(cm);
2352 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2354 struct mfi_command *cm;
2355 struct mfi_abort_frame *abort;
2357 uint32_t context = 0;
2359 mfi_lockassert(&sc->mfi_io_lock);
2361 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2365 /* Zero out the MFI frame */
2366 context = cm->cm_frame->header.context;
2367 bzero(cm->cm_frame, sizeof(union mfi_frame));
2368 cm->cm_frame->header.context = context;
2370 abort = &cm->cm_frame->abort;
2371 abort->header.cmd = MFI_CMD_ABORT;
2372 abort->header.flags = 0;
2373 abort->header.scsi_status = 0;
2374 abort->abort_context = cm_abort->cm_frame->header.context;
2375 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2376 abort->abort_mfi_addr_hi =
2377 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2379 cm->cm_flags = MFI_CMD_POLLED;
2382 sc->mfi_aen_cm->cm_aen_abort = 1;
2384 mfi_release_command(cm);
2386 while (i < 5 && sc->mfi_aen_cm != NULL) {
2387 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2396 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2399 struct mfi_command *cm;
2400 struct mfi_io_frame *io;
2402 uint32_t context = 0;
2404 if ((cm = mfi_dequeue_free(sc)) == NULL)
2407 /* Zero out the MFI frame */
2408 context = cm->cm_frame->header.context;
2409 bzero(cm->cm_frame, sizeof(union mfi_frame));
2410 cm->cm_frame->header.context = context;
2412 io = &cm->cm_frame->io;
2413 io->header.cmd = MFI_CMD_LD_WRITE;
2414 io->header.target_id = id;
2415 io->header.timeout = 0;
2416 io->header.flags = 0;
2417 io->header.scsi_status = 0;
2418 io->header.sense_len = MFI_SENSE_LEN;
2419 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2420 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2421 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2422 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2423 io->lba_lo = lba & 0xffffffff;
2426 cm->cm_sg = &io->sgl;
2427 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2428 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2430 error = mfi_mapcmd(sc, cm);
2431 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2432 BUS_DMASYNC_POSTWRITE);
2433 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2434 mfi_release_command(cm);
2440 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2443 struct mfi_command *cm;
2444 struct mfi_pass_frame *pass;
2448 if ((cm = mfi_dequeue_free(sc)) == NULL)
2451 pass = &cm->cm_frame->pass;
2452 bzero(pass->cdb, 16);
2453 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2454 pass->cdb[0] = WRITE_10;
2455 pass->cdb[2] = (lba & 0xff000000) >> 24;
2456 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2457 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2458 pass->cdb[5] = (lba & 0x000000ff);
2459 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2460 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2461 pass->cdb[8] = (blkcount & 0x00ff);
2462 pass->header.target_id = id;
2463 pass->header.timeout = 0;
2464 pass->header.flags = 0;
2465 pass->header.scsi_status = 0;
2466 pass->header.sense_len = MFI_SENSE_LEN;
2467 pass->header.data_len = len;
2468 pass->header.cdb_len = 10;
2469 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2470 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2473 cm->cm_sg = &pass->sgl;
2474 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2475 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2477 error = mfi_mapcmd(sc, cm);
2478 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2479 BUS_DMASYNC_POSTWRITE);
2480 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2481 mfi_release_command(cm);
2487 mfi_open(struct dev_open_args *ap)
2489 cdev_t dev = ap->a_head.a_dev;
2490 struct mfi_softc *sc;
2495 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2496 if (sc->mfi_detaching)
2499 sc->mfi_flags |= MFI_FLAGS_OPEN;
2502 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2508 mfi_close(struct dev_close_args *ap)
2510 cdev_t dev = ap->a_head.a_dev;
2511 struct mfi_softc *sc;
2512 struct mfi_aen *mfi_aen_entry, *tmp;
2516 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2517 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2519 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2520 if (mfi_aen_entry->p == curproc) {
2521 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2523 kfree(mfi_aen_entry, M_MFIBUF);
2526 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2531 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2535 case MFI_DCMD_LD_DELETE:
2536 case MFI_DCMD_CFG_ADD:
2537 case MFI_DCMD_CFG_CLEAR:
2538 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2539 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2547 mfi_config_unlock(struct mfi_softc *sc, int locked)
2551 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2555 * Perform pre-issue checks on commands from userland and possibly veto
2559 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2561 struct mfi_disk *ld, *ld2;
2563 struct mfi_system_pd *syspd = NULL;
2567 mfi_lockassert(&sc->mfi_io_lock);
2569 switch (cm->cm_frame->dcmd.opcode) {
2570 case MFI_DCMD_LD_DELETE:
2571 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2572 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2578 error = mfi_disk_disable(ld);
2580 case MFI_DCMD_CFG_CLEAR:
2581 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2582 error = mfi_disk_disable(ld);
2587 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2590 mfi_disk_enable(ld2);
2594 case MFI_DCMD_PD_STATE_SET:
2595 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2597 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2598 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2599 if (syspd->pd_id == syspd_id)
2606 error = mfi_syspd_disable(syspd);
2614 /* Perform post-issue checks on commands from userland. */
2616 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2618 struct mfi_disk *ld, *ldn;
2619 struct mfi_system_pd *syspd = NULL;
2623 switch (cm->cm_frame->dcmd.opcode) {
2624 case MFI_DCMD_LD_DELETE:
2625 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2626 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2629 KASSERT(ld != NULL, ("volume dissappeared"));
2630 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2631 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2633 device_delete_child(sc->mfi_dev, ld->ld_dev);
2635 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2637 mfi_disk_enable(ld);
2639 case MFI_DCMD_CFG_CLEAR:
2640 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2641 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2643 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2644 device_delete_child(sc->mfi_dev, ld->ld_dev);
2647 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2649 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2650 mfi_disk_enable(ld);
2653 case MFI_DCMD_CFG_ADD:
2654 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2655 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2658 case MFI_DCMD_PD_STATE_SET:
2659 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2661 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2662 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2663 if (syspd->pd_id == syspd_id)
2669 /* If the transition fails then enable the syspd again */
2670 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2671 mfi_syspd_enable(syspd);
2677 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2679 struct mfi_config_data *conf_data = cm->cm_data;
2680 struct mfi_command *ld_cm = NULL;
2681 struct mfi_ld_info *ld_info = NULL;
2684 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2685 (conf_data->ld[0].params.isSSCD == 1)) {
2687 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2688 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2689 (void **)&ld_info, sizeof(*ld_info));
2691 device_printf(sc->mfi_dev, "Failed to allocate"
2692 "MFI_DCMD_LD_GET_INFO %d", error);
2694 kfree(ld_info, M_MFIBUF);
2697 ld_cm->cm_flags = MFI_CMD_DATAIN;
2698 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2699 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2700 if (mfi_wait_command(sc, ld_cm) != 0) {
2701 device_printf(sc->mfi_dev, "failed to get log drv\n");
2702 mfi_release_command(ld_cm);
2703 kfree(ld_info, M_MFIBUF);
2707 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2708 kfree(ld_info, M_MFIBUF);
2709 mfi_release_command(ld_cm);
2712 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2715 if (ld_info->ld_config.params.isSSCD == 1)
2718 mfi_release_command(ld_cm);
2719 kfree(ld_info, M_MFIBUF);
2725 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2728 struct mfi_ioc_packet *ioc;
2729 ioc = (struct mfi_ioc_packet *)arg;
2730 int sge_size, error;
2731 struct megasas_sge *kern_sge;
2733 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2734 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2735 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2737 if (sizeof(bus_addr_t) == 8) {
2738 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2739 cm->cm_extra_frames = 2;
2740 sge_size = sizeof(struct mfi_sg64);
2742 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2743 sge_size = sizeof(struct mfi_sg32);
2746 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2747 for (i = 0; i < ioc->mfi_sge_count; i++) {
2748 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2749 1, 0, /* algnmnt, boundary */
2750 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2751 BUS_SPACE_MAXADDR, /* highaddr */
2752 ioc->mfi_sgl[i].iov_len,/* maxsize */
2754 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2755 BUS_DMA_ALLOCNOW, /* flags */
2756 &sc->mfi_kbuff_arr_dmat[i])) {
2757 device_printf(sc->mfi_dev,
2758 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2762 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2763 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2764 &sc->mfi_kbuff_arr_dmamap[i])) {
2765 device_printf(sc->mfi_dev,
2766 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2770 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2771 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2772 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2773 &sc->mfi_kbuff_arr_busaddr[i], 0);
2775 if (!sc->kbuff_arr[i]) {
2776 device_printf(sc->mfi_dev,
2777 "Could not allocate memory for kbuff_arr info\n");
2780 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2781 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2783 if (sizeof(bus_addr_t) == 8) {
2784 cm->cm_frame->stp.sgl.sg64[i].addr =
2785 kern_sge[i].phys_addr;
2786 cm->cm_frame->stp.sgl.sg64[i].len =
2787 ioc->mfi_sgl[i].iov_len;
2789 cm->cm_frame->stp.sgl.sg32[i].addr =
2790 kern_sge[i].phys_addr;
2791 cm->cm_frame->stp.sgl.sg32[i].len =
2792 ioc->mfi_sgl[i].iov_len;
2795 error = copyin(ioc->mfi_sgl[i].iov_base,
2797 ioc->mfi_sgl[i].iov_len);
2799 device_printf(sc->mfi_dev, "Copy in failed\n");
2804 cm->cm_flags |=MFI_CMD_MAPPED;
2809 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2811 struct mfi_command *cm;
2812 struct mfi_dcmd_frame *dcmd;
2813 void *ioc_buf = NULL;
2815 int error = 0, locked;
2818 if (ioc->buf_size > 0) {
2819 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2820 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2822 device_printf(sc->mfi_dev, "failed to copyin\n");
2823 kfree(ioc_buf, M_MFIBUF);
2828 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2830 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2831 while ((cm = mfi_dequeue_free(sc)) == NULL)
2832 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2834 /* Save context for later */
2835 context = cm->cm_frame->header.context;
2837 dcmd = &cm->cm_frame->dcmd;
2838 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2840 cm->cm_sg = &dcmd->sgl;
2841 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2842 cm->cm_data = ioc_buf;
2843 cm->cm_len = ioc->buf_size;
2845 /* restore context */
2846 cm->cm_frame->header.context = context;
2848 /* Cheat since we don't know if we're writing or reading */
2849 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2851 error = mfi_check_command_pre(sc, cm);
2855 error = mfi_wait_command(sc, cm);
2857 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2860 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2861 mfi_check_command_post(sc, cm);
2863 mfi_release_command(cm);
2864 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2865 mfi_config_unlock(sc, locked);
2866 if (ioc->buf_size > 0)
2867 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2869 kfree(ioc_buf, M_MFIBUF);
2873 #define PTRIN(p) ((void *)(uintptr_t)(p))
2876 mfi_ioctl(struct dev_ioctl_args *ap)
2878 cdev_t dev = ap->a_head.a_dev;
2879 u_long cmd = ap->a_cmd;
2880 int flag = ap->a_fflag;
2881 caddr_t arg = ap->a_data;
2882 struct mfi_softc *sc;
2883 union mfi_statrequest *ms;
2884 struct mfi_ioc_packet *ioc;
2885 struct mfi_ioc_aen *aen;
2886 struct mfi_command *cm = NULL;
2888 union mfi_sense_ptr sense_ptr;
2889 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2892 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2901 if (sc->hw_crit_error)
2904 if (sc->issuepend_done == 0)
2909 ms = (union mfi_statrequest *)arg;
2910 switch (ms->ms_item) {
2915 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2916 sizeof(struct mfi_qstat));
2923 case MFIIO_QUERY_DISK:
2925 struct mfi_query_disk *qd;
2926 struct mfi_disk *ld;
2928 qd = (struct mfi_query_disk *)arg;
2929 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2930 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2931 if (ld->ld_id == qd->array_id)
2936 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2940 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2942 bzero(qd->devname, SPECNAMELEN + 1);
2943 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2944 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2949 devclass_t devclass;
2950 ioc = (struct mfi_ioc_packet *)arg;
2953 adapter = ioc->mfi_adapter_no;
2954 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2955 devclass = devclass_find("mfi");
2956 sc = devclass_get_softc(devclass, adapter);
2958 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2959 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2960 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2963 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2967 * save off original context since copying from user
2968 * will clobber some data
2970 context = cm->cm_frame->header.context;
2971 cm->cm_frame->header.context = cm->cm_index;
2973 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2974 2 * MEGAMFI_FRAME_SIZE);
2975 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2976 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2977 cm->cm_frame->header.scsi_status = 0;
2978 cm->cm_frame->header.pad0 = 0;
2979 if (ioc->mfi_sge_count) {
2981 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2984 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2985 cm->cm_flags |= MFI_CMD_DATAIN;
2986 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2987 cm->cm_flags |= MFI_CMD_DATAOUT;
2988 /* Legacy app shim */
2989 if (cm->cm_flags == 0)
2990 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2991 cm->cm_len = cm->cm_frame->header.data_len;
2992 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2993 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
2994 cm->cm_len += cm->cm_stp_len;
2997 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2998 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3004 /* restore header context */
3005 cm->cm_frame->header.context = context;
3007 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3008 res = mfi_stp_cmd(sc, cm, arg);
3013 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3014 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3015 for (i = 0; i < ioc->mfi_sge_count; i++) {
3016 addr = ioc->mfi_sgl[i].iov_base;
3017 len = ioc->mfi_sgl[i].iov_len;
3018 error = copyin(addr, temp, len);
3020 device_printf(sc->mfi_dev,
3021 "Copy in failed\n");
3029 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3030 locked = mfi_config_lock(sc,
3031 cm->cm_frame->dcmd.opcode);
3033 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3034 cm->cm_frame->pass.sense_addr_lo =
3035 (uint32_t)cm->cm_sense_busaddr;
3036 cm->cm_frame->pass.sense_addr_hi =
3037 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3039 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3040 skip_pre_post = mfi_check_for_sscd(sc, cm);
3041 if (!skip_pre_post) {
3042 error = mfi_check_command_pre(sc, cm);
3044 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3049 if ((error = mfi_wait_command(sc, cm)) != 0) {
3050 device_printf(sc->mfi_dev,
3051 "Controller polled failed\n");
3052 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3057 mfi_check_command_post(sc, cm);
3058 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3060 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3062 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3063 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3064 for (i = 0; i < ioc->mfi_sge_count; i++) {
3065 addr = ioc->mfi_sgl[i].iov_base;
3066 len = ioc->mfi_sgl[i].iov_len;
3067 error = copyout(temp, addr, len);
3069 device_printf(sc->mfi_dev,
3070 "Copy out failed\n");
3078 if (ioc->mfi_sense_len) {
3079 /* get user-space sense ptr then copy out sense */
3080 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3081 &sense_ptr.sense_ptr_data[0],
3082 sizeof(sense_ptr.sense_ptr_data));
3083 error = copyout(cm->cm_sense, sense_ptr.user_space,
3084 ioc->mfi_sense_len);
3086 device_printf(sc->mfi_dev,
3087 "Copy out failed\n");
3092 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3094 mfi_config_unlock(sc, locked);
3096 kfree(data, M_MFIBUF);
3097 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3098 for (i = 0; i < 2; i++) {
3099 if (sc->kbuff_arr[i]) {
3100 if (sc->mfi_kbuff_arr_busaddr != 0)
3102 sc->mfi_kbuff_arr_dmat[i],
3103 sc->mfi_kbuff_arr_dmamap[i]
3105 if (sc->kbuff_arr[i] != NULL)
3107 sc->mfi_kbuff_arr_dmat[i],
3109 sc->mfi_kbuff_arr_dmamap[i]
3111 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3112 bus_dma_tag_destroy(
3113 sc->mfi_kbuff_arr_dmat[i]);
3118 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3119 mfi_release_command(cm);
3120 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3126 aen = (struct mfi_ioc_aen *)arg;
3127 error = mfi_aen_register(sc, aen->aen_seq_num,
3128 aen->aen_class_locale);
3131 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3133 devclass_t devclass;
3134 struct mfi_linux_ioc_packet l_ioc;
3137 devclass = devclass_find("mfi");
3138 if (devclass == NULL)
3141 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3144 adapter = l_ioc.lioc_adapter_no;
3145 sc = devclass_get_softc(devclass, adapter);
3148 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3152 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3154 devclass_t devclass;
3155 struct mfi_linux_ioc_aen l_aen;
3158 devclass = devclass_find("mfi");
3159 if (devclass == NULL)
3162 error = copyin(arg, &l_aen, sizeof(l_aen));
3165 adapter = l_aen.laen_adapter_no;
3166 sc = devclass_get_softc(devclass, adapter);
3169 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3173 case MFIIO_PASSTHRU:
3174 error = mfi_user_command(sc, iop);
3177 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3186 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3188 struct mfi_softc *sc;
3189 struct mfi_linux_ioc_packet l_ioc;
3190 struct mfi_linux_ioc_aen l_aen;
3191 struct mfi_command *cm = NULL;
3192 struct mfi_aen *mfi_aen_entry;
3193 union mfi_sense_ptr sense_ptr;
3195 uint8_t *data = NULL, *temp;
3202 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3203 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3207 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3211 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3212 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3213 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3216 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3220 * save off original context since copying from user
3221 * will clobber some data
3223 context = cm->cm_frame->header.context;
3225 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3226 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3227 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3228 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3229 cm->cm_frame->header.scsi_status = 0;
3230 cm->cm_frame->header.pad0 = 0;
3231 if (l_ioc.lioc_sge_count)
3233 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3235 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3236 cm->cm_flags |= MFI_CMD_DATAIN;
3237 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3238 cm->cm_flags |= MFI_CMD_DATAOUT;
3239 cm->cm_len = cm->cm_frame->header.data_len;
3241 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3242 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3248 /* restore header context */
3249 cm->cm_frame->header.context = context;
3252 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3253 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3254 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3256 l_ioc.lioc_sgl[i].iov_len);
3258 device_printf(sc->mfi_dev,
3259 "Copy in failed\n");
3262 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3266 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3267 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3269 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3270 cm->cm_frame->pass.sense_addr_lo =
3271 (uint32_t)cm->cm_sense_busaddr;
3272 cm->cm_frame->pass.sense_addr_hi =
3273 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3276 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3277 error = mfi_check_command_pre(sc, cm);
3279 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3283 if ((error = mfi_wait_command(sc, cm)) != 0) {
3284 device_printf(sc->mfi_dev,
3285 "Controller polled failed\n");
3286 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3290 mfi_check_command_post(sc, cm);
3291 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3294 if (cm->cm_flags & MFI_CMD_DATAIN) {
3295 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3296 error = copyout(temp,
3297 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3298 l_ioc.lioc_sgl[i].iov_len);
3300 device_printf(sc->mfi_dev,
3301 "Copy out failed\n");
3304 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3308 if (l_ioc.lioc_sense_len) {
3309 /* get user-space sense ptr then copy out sense */
3310 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3311 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3312 &sense_ptr.sense_ptr_data[0],
3313 sizeof(sense_ptr.sense_ptr_data));
3316 * only 32bit Linux support so zero out any
3317 * address over 32bit
3319 sense_ptr.addr.high = 0;
3321 error = copyout(cm->cm_sense, sense_ptr.user_space,
3322 l_ioc.lioc_sense_len);
3324 device_printf(sc->mfi_dev,
3325 "Copy out failed\n");
3330 error = copyout(&cm->cm_frame->header.cmd_status,
3331 &((struct mfi_linux_ioc_packet*)arg)
3332 ->lioc_frame.hdr.cmd_status,
3335 device_printf(sc->mfi_dev,
3336 "Copy out failed\n");
3341 mfi_config_unlock(sc, locked);
3343 kfree(data, M_MFIBUF);
3345 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3346 mfi_release_command(cm);
3347 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3351 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3352 error = copyin(arg, &l_aen, sizeof(l_aen));
3355 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3356 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3358 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3359 if (mfi_aen_entry != NULL) {
3360 mfi_aen_entry->p = curproc;
3361 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3364 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3365 l_aen.laen_class_locale);
3368 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3370 kfree(mfi_aen_entry, M_MFIBUF);
3372 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3376 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3385 mfi_kqfilter(struct dev_kqfilter_args *ap)
3387 cdev_t dev = ap->a_head.a_dev;
3388 struct knote *kn = ap->a_kn;
3389 struct mfi_softc *sc;
3390 struct klist *klist;
3395 switch (kn->kn_filter) {
3397 kn->kn_fop = &mfi_read_filterops;
3398 kn->kn_hook = (caddr_t)sc;
3401 kn->kn_fop = &mfi_write_filterops;
3402 kn->kn_hook = (caddr_t)sc;
3405 ap->a_result = EOPNOTSUPP;
3409 klist = &sc->mfi_kq.ki_note;
3410 knote_insert(klist, kn);
3416 mfi_filter_detach(struct knote *kn)
3418 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3419 struct klist *klist = &sc->mfi_kq.ki_note;
3421 knote_remove(klist, kn);
3425 mfi_filter_read(struct knote *kn, long hint)
3427 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3430 if (sc->mfi_aen_triggered != 0) {
3432 sc->mfi_aen_triggered = 0;
3434 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3435 kn->kn_flags |= EV_ERROR;
3438 sc->mfi_poll_waiting = 1;
3444 mfi_filter_write(struct knote *kn, long hint)
3452 struct mfi_softc *sc;
3453 struct mfi_command *cm;
3459 dc = devclass_find("mfi");
3461 kprintf("No mfi dev class\n");
3465 for (i = 0; ; i++) {
3466 sc = devclass_get_softc(dc, i);
3469 device_printf(sc->mfi_dev, "Dumping\n\n");
3471 deadline = time_uptime - mfi_cmd_timeout;
3472 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3473 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3474 if (cm->cm_timestamp < deadline) {
3475 device_printf(sc->mfi_dev,
3476 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3477 cm, (int)(time_uptime - cm->cm_timestamp));
3488 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3495 mfi_timeout(void *data)
3497 struct mfi_softc *sc = (struct mfi_softc *)data;
3498 struct mfi_command *cm;
3502 deadline = time_uptime - mfi_cmd_timeout;
3503 if (sc->adpreset == 0) {
3504 if (!mfi_tbolt_reset(sc)) {
3505 callout_reset(&sc->mfi_watchdog_callout,
3506 mfi_cmd_timeout * hz, mfi_timeout, sc);
3510 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3511 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3512 if (sc->mfi_aen_cm == cm)
3514 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3515 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3516 cm->cm_timestamp = time_uptime;
3518 device_printf(sc->mfi_dev,
3519 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3520 cm, (int)(time_uptime - cm->cm_timestamp));
3522 MFI_VALIDATE_CMD(sc, cm);
3533 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3535 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,