2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53 * FreeBSD projects/head_mfi/ r233016
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
64 #include <sys/eventhandler.h>
66 #include <sys/bus_dma.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
75 #include <bus/cam/scsi/scsi_all.h>
77 #include <bus/pci/pcivar.h>
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
83 static int mfi_alloc_commands(struct mfi_softc *);
84 static int mfi_comms_init(struct mfi_softc *);
85 static int mfi_get_controller_info(struct mfi_softc *);
86 static int mfi_get_log_state(struct mfi_softc *,
87 struct mfi_evt_log_state **);
88 static int mfi_parse_entries(struct mfi_softc *, int, int);
89 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 uint32_t, void **, size_t);
91 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void mfi_startup(void *arg);
93 static void mfi_intr(void *arg);
94 static void mfi_ldprobe(struct mfi_softc *sc);
95 static void mfi_syspdprobe(struct mfi_softc *sc);
96 static void mfi_handle_evt(void *context, int pending);
97 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void mfi_aen_complete(struct mfi_command *);
99 static int mfi_add_ld(struct mfi_softc *sc, int);
100 static void mfi_add_ld_complete(struct mfi_command *);
101 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void mfi_timeout(void *);
111 static int mfi_user_command(struct mfi_softc *,
112 struct mfi_ioc_passthru *);
113 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_filter_detach(struct knote *);
130 static int mfi_filter_read(struct knote *, long);
131 static int mfi_filter_write(struct knote *, long);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 /* Management interface */
155 static d_open_t mfi_open;
156 static d_close_t mfi_close;
157 static d_ioctl_t mfi_ioctl;
158 static d_kqfilter_t mfi_kqfilter;
160 static struct dev_ops mfi_ops = {
163 .d_close = mfi_close,
164 .d_ioctl = mfi_ioctl,
165 .d_kqfilter = mfi_kqfilter,
168 static struct filterops mfi_read_filterops =
169 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
170 static struct filterops mfi_write_filterops =
171 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
173 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
175 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
176 struct mfi_skinny_dma_info mfi_skinny;
179 mfi_enable_intr_xscale(struct mfi_softc *sc)
181 MFI_WRITE4(sc, MFI_OMSK, 0x01);
185 mfi_enable_intr_ppc(struct mfi_softc *sc)
187 if (sc->mfi_flags & MFI_FLAGS_1078) {
188 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
190 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
191 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
193 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
194 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
196 panic("unknown adapter type");
201 mfi_read_fw_status_xscale(struct mfi_softc *sc)
203 return MFI_READ4(sc, MFI_OMSG0);
207 mfi_read_fw_status_ppc(struct mfi_softc *sc)
209 return MFI_READ4(sc, MFI_OSP0);
213 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
217 status = MFI_READ4(sc, MFI_OSTS);
218 if ((status & MFI_OSTS_INTR_VALID) == 0)
221 MFI_WRITE4(sc, MFI_OSTS, status);
226 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
230 status = MFI_READ4(sc, MFI_OSTS);
231 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
232 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
233 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
236 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
237 MFI_WRITE4(sc, MFI_OSTS, status);
239 MFI_WRITE4(sc, MFI_ODCR0, status);
244 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
246 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
250 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
252 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
253 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
254 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
256 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
261 mfi_transition_firmware(struct mfi_softc *sc)
263 uint32_t fw_state, cur_state;
265 uint32_t cur_abs_reg_val = 0;
266 uint32_t prev_abs_reg_val = 0;
268 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
269 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
270 while (fw_state != MFI_FWSTATE_READY) {
272 device_printf(sc->mfi_dev, "Waiting for firmware to "
274 cur_state = fw_state;
276 case MFI_FWSTATE_FAULT:
277 device_printf(sc->mfi_dev, "Firmware fault\n");
279 case MFI_FWSTATE_WAIT_HANDSHAKE:
280 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
281 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
283 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
284 max_wait = MFI_RESET_WAIT_TIME;
286 case MFI_FWSTATE_OPERATIONAL:
287 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
288 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
290 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
291 max_wait = MFI_RESET_WAIT_TIME;
293 case MFI_FWSTATE_UNDEFINED:
294 case MFI_FWSTATE_BB_INIT:
295 max_wait = MFI_RESET_WAIT_TIME;
297 case MFI_FWSTATE_FW_INIT_2:
298 max_wait = MFI_RESET_WAIT_TIME;
300 case MFI_FWSTATE_FW_INIT:
301 case MFI_FWSTATE_FLUSH_CACHE:
302 max_wait = MFI_RESET_WAIT_TIME;
304 case MFI_FWSTATE_DEVICE_SCAN:
305 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
306 prev_abs_reg_val = cur_abs_reg_val;
308 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
309 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
310 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
312 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
313 max_wait = MFI_RESET_WAIT_TIME;
316 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
320 for (i = 0; i < (max_wait * 10); i++) {
321 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
322 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
323 if (fw_state == cur_state)
328 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
329 /* Check the device scanning progress */
330 if (prev_abs_reg_val != cur_abs_reg_val)
333 if (fw_state == cur_state) {
334 device_printf(sc->mfi_dev, "Firmware stuck in state "
343 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
348 *addr = segs[0].ds_addr;
352 mfi_attach(struct mfi_softc *sc)
355 int error, commsz, framessz, sensesz;
356 int frames, unit, max_fw_sge;
357 uint32_t tb_mem_size = 0;
362 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
365 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
366 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
367 TAILQ_INIT(&sc->mfi_ld_tqh);
368 TAILQ_INIT(&sc->mfi_syspd_tqh);
369 TAILQ_INIT(&sc->mfi_evt_queue);
370 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
371 TAILQ_INIT(&sc->mfi_aen_pids);
372 TAILQ_INIT(&sc->mfi_cam_ccbq);
380 sc->last_seq_num = 0;
381 sc->disableOnlineCtrlReset = 1;
382 sc->issuepend_done = 1;
383 sc->hw_crit_error = 0;
385 if (sc->mfi_flags & MFI_FLAGS_1064R) {
386 sc->mfi_enable_intr = mfi_enable_intr_xscale;
387 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
388 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
389 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
390 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
391 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
392 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
393 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
394 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
395 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
396 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
398 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
400 sc->mfi_enable_intr = mfi_enable_intr_ppc;
401 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
402 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
403 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
407 /* Before we get too far, see if the firmware is working */
408 if ((error = mfi_transition_firmware(sc)) != 0) {
409 device_printf(sc->mfi_dev, "Firmware not in READY state, "
410 "error %d\n", error);
414 /* Start: LSIP200113393 */
415 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
416 1, 0, /* algnmnt, boundary */
417 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
418 BUS_SPACE_MAXADDR, /* highaddr */
419 NULL, NULL, /* filter, filterarg */
420 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
422 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
424 &sc->verbuf_h_dmat)) {
425 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
428 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
429 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
430 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
433 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
434 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
435 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
436 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
437 /* End: LSIP200113393 */
440 * Get information needed for sizing the contiguous memory for the
441 * frame pool. Size down the sgl parameter since we know that
442 * we will never need more than what's required for MAXPHYS.
443 * It would be nice if these constants were available at runtime
444 * instead of compile time.
446 status = sc->mfi_read_fw_status(sc);
447 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
448 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
449 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
451 /* ThunderBolt Support get the contiguous memory */
453 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
454 mfi_tbolt_init_globals(sc);
455 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
456 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
457 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
459 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
460 1, 0, /* algnmnt, boundary */
461 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
462 BUS_SPACE_MAXADDR, /* highaddr */
463 NULL, NULL, /* filter, filterarg */
464 tb_mem_size, /* maxsize */
466 tb_mem_size, /* maxsegsize */
469 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
472 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
473 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
474 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
477 bzero(sc->request_message_pool, tb_mem_size);
478 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
479 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
481 /* For ThunderBolt memory init */
482 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
483 0x100, 0, /* alignmnt, boundary */
484 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
485 BUS_SPACE_MAXADDR, /* highaddr */
486 NULL, NULL, /* filter, filterarg */
487 MFI_FRAME_SIZE, /* maxsize */
489 MFI_FRAME_SIZE, /* maxsegsize */
491 &sc->mfi_tb_init_dmat)) {
492 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
495 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
496 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
497 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
500 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
501 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
502 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
503 &sc->mfi_tb_init_busaddr, 0);
504 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
506 device_printf(sc->mfi_dev,
507 "Thunderbolt pool preparation error\n");
512 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
513 we are taking it diffrent from what we have allocated for Request
514 and reply descriptors to avoid confusion later
516 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
517 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
518 1, 0, /* algnmnt, boundary */
519 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
520 BUS_SPACE_MAXADDR, /* highaddr */
521 NULL, NULL, /* filter, filterarg */
522 tb_mem_size, /* maxsize */
524 tb_mem_size, /* maxsegsize */
526 &sc->mfi_tb_ioc_init_dmat)) {
527 device_printf(sc->mfi_dev,
528 "Cannot allocate comms DMA tag\n");
531 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
532 (void **)&sc->mfi_tb_ioc_init_desc,
533 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
534 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
537 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
538 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
539 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
540 &sc->mfi_tb_ioc_init_busaddr, 0);
543 * Create the dma tag for data buffers. Used both for block I/O
544 * and for various internal data queries.
546 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
547 1, 0, /* algnmnt, boundary */
548 BUS_SPACE_MAXADDR, /* lowaddr */
549 BUS_SPACE_MAXADDR, /* highaddr */
550 NULL, NULL, /* filter, filterarg */
551 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
552 sc->mfi_max_sge, /* nsegments */
553 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
554 BUS_DMA_ALLOCNOW, /* flags */
555 &sc->mfi_buffer_dmat)) {
556 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
561 * Allocate DMA memory for the comms queues. Keep it under 4GB for
562 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
563 * entry, so the calculated size here will be will be 1 more than
564 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
566 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
567 sizeof(struct mfi_hwcomms);
568 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
569 1, 0, /* algnmnt, boundary */
570 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
571 BUS_SPACE_MAXADDR, /* highaddr */
572 NULL, NULL, /* filter, filterarg */
573 commsz, /* maxsize */
575 commsz, /* maxsegsize */
577 &sc->mfi_comms_dmat)) {
578 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
581 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
582 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
583 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
586 bzero(sc->mfi_comms, commsz);
587 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
588 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
590 * Allocate DMA memory for the command frames. Keep them in the
591 * lower 4GB for efficiency. Calculate the size of the commands at
592 * the same time; each command is one 64 byte frame plus a set of
593 * additional frames for holding sg lists or other data.
594 * The assumption here is that the SG list will start at the second
595 * frame and not use the unused bytes in the first frame. While this
596 * isn't technically correct, it simplifies the calculation and allows
597 * for command frames that might be larger than an mfi_io_frame.
599 if (sizeof(bus_addr_t) == 8) {
600 sc->mfi_sge_size = sizeof(struct mfi_sg64);
601 sc->mfi_flags |= MFI_FLAGS_SG64;
603 sc->mfi_sge_size = sizeof(struct mfi_sg32);
605 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
606 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
607 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
608 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
609 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
610 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
611 64, 0, /* algnmnt, boundary */
612 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
613 BUS_SPACE_MAXADDR, /* highaddr */
614 NULL, NULL, /* filter, filterarg */
615 framessz, /* maxsize */
617 framessz, /* maxsegsize */
619 &sc->mfi_frames_dmat)) {
620 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
623 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
624 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
625 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
628 bzero(sc->mfi_frames, framessz);
629 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
630 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
632 * Allocate DMA memory for the frame sense data. Keep them in the
633 * lower 4GB for efficiency
635 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
636 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
637 4, 0, /* algnmnt, boundary */
638 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
639 BUS_SPACE_MAXADDR, /* highaddr */
640 NULL, NULL, /* filter, filterarg */
641 sensesz, /* maxsize */
643 sensesz, /* maxsegsize */
645 &sc->mfi_sense_dmat)) {
646 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
649 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
650 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
651 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
654 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
655 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
656 if ((error = mfi_alloc_commands(sc)) != 0)
660 * Before moving the FW to operational state, check whether
661 * hostmemory is required by the FW or not
664 /* ThunderBolt MFI_IOC2 INIT */
665 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
666 sc->mfi_disable_intr(sc);
667 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
668 device_printf(sc->mfi_dev,
669 "TB Init has failed with error %d\n",error);
673 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
675 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
676 mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
677 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
680 sc->mfi_enable_intr(sc);
683 if ((error = mfi_comms_init(sc)) != 0)
686 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
687 mfi_intr, sc, &sc->mfi_intr, NULL)) {
688 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
691 sc->mfi_enable_intr(sc);
693 if ((error = mfi_get_controller_info(sc)) != 0)
695 sc->disableOnlineCtrlReset = 0;
697 /* Register a config hook to probe the bus for arrays */
698 sc->mfi_ich.ich_func = mfi_startup;
699 sc->mfi_ich.ich_arg = sc;
700 sc->mfi_ich.ich_desc = "mfi";
701 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
702 device_printf(sc->mfi_dev, "Cannot establish configuration "
706 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
707 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
712 * Register a shutdown handler.
714 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
715 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
716 device_printf(sc->mfi_dev, "Warning: shutdown event "
717 "registration failed\n");
721 * Create the control device for doing management
723 unit = device_get_unit(sc->mfi_dev);
724 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
725 0640, "mfi%d", unit);
727 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
728 if (sc->mfi_cdev != NULL)
729 sc->mfi_cdev->si_drv1 = sc;
730 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
731 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
732 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
733 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
734 if (sc->mfi_sysctl_tree == NULL) {
735 device_printf(sc->mfi_dev, "can't add sysctl node\n");
738 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
739 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
740 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
741 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
742 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
743 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
744 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
745 &sc->mfi_keep_deleted_volumes, 0,
746 "Don't detach the mfid device for a busy volume that is deleted");
748 device_add_child(sc->mfi_dev, "mfip", -1);
749 bus_generic_attach(sc->mfi_dev);
751 /* Start the timeout watchdog */
752 callout_init_mp(&sc->mfi_watchdog_callout);
753 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
760 mfi_alloc_commands(struct mfi_softc *sc)
762 struct mfi_command *cm;
766 * XXX Should we allocate all the commands up front, or allocate on
767 * demand later like 'aac' does?
769 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
771 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
772 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
774 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
777 for (i = 0; i < ncmds; i++) {
778 cm = &sc->mfi_commands[i];
779 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
780 sc->mfi_cmd_size * i);
781 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
782 sc->mfi_cmd_size * i;
783 cm->cm_frame->header.context = i;
784 cm->cm_sense = &sc->mfi_sense[i];
785 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
788 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
789 &cm->cm_dmamap) == 0) {
790 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
791 mfi_release_command(cm);
792 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
796 sc->mfi_total_cmds++;
803 mfi_release_command(struct mfi_command *cm)
805 struct mfi_frame_header *hdr;
808 KKASSERT(lockstatus(&cm->cm_sc->mfi_io_lock, curthread) != 0);
811 * Zero out the important fields of the frame, but make sure the
812 * context field is preserved. For efficiency, handle the fields
813 * as 32 bit words. Clear out the first S/G entry too for safety.
815 hdr = &cm->cm_frame->header;
816 if (cm->cm_data != NULL && hdr->sg_count) {
817 cm->cm_sg->sg32[0].len = 0;
818 cm->cm_sg->sg32[0].addr = 0;
821 hdr_data = (uint32_t *)cm->cm_frame;
822 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
823 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
824 hdr_data[4] = 0; /* flags, timeout */
825 hdr_data[5] = 0; /* data_len */
827 cm->cm_extra_frames = 0;
829 cm->cm_complete = NULL;
830 cm->cm_private = NULL;
833 cm->cm_total_frame_size = 0;
834 cm->retry_for_fw_reset = 0;
836 mfi_enqueue_free(cm);
840 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
841 uint32_t opcode, void **bufp, size_t bufsize)
843 struct mfi_command *cm;
844 struct mfi_dcmd_frame *dcmd;
846 uint32_t context = 0;
848 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
850 cm = mfi_dequeue_free(sc);
854 /* Zero out the MFI frame */
855 context = cm->cm_frame->header.context;
856 bzero(cm->cm_frame, sizeof(union mfi_frame));
857 cm->cm_frame->header.context = context;
859 if ((bufsize > 0) && (bufp != NULL)) {
861 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
863 mfi_release_command(cm);
872 dcmd = &cm->cm_frame->dcmd;
873 bzero(dcmd->mbox, MFI_MBOX_SIZE);
874 dcmd->header.cmd = MFI_CMD_DCMD;
875 dcmd->header.timeout = 0;
876 dcmd->header.flags = 0;
877 dcmd->header.data_len = bufsize;
878 dcmd->header.scsi_status = 0;
879 dcmd->opcode = opcode;
880 cm->cm_sg = &dcmd->sgl;
881 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
884 cm->cm_private = buf;
885 cm->cm_len = bufsize;
888 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
894 mfi_comms_init(struct mfi_softc *sc)
896 struct mfi_command *cm;
897 struct mfi_init_frame *init;
898 struct mfi_init_qinfo *qinfo;
900 uint32_t context = 0;
902 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
903 if ((cm = mfi_dequeue_free(sc)) == NULL)
906 /* Zero out the MFI frame */
907 context = cm->cm_frame->header.context;
908 bzero(cm->cm_frame, sizeof(union mfi_frame));
909 cm->cm_frame->header.context = context;
912 * Abuse the SG list area of the frame to hold the init_qinfo
915 init = &cm->cm_frame->init;
916 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
918 bzero(qinfo, sizeof(struct mfi_init_qinfo));
919 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
920 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
921 offsetof(struct mfi_hwcomms, hw_reply_q);
922 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
923 offsetof(struct mfi_hwcomms, hw_pi);
924 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
925 offsetof(struct mfi_hwcomms, hw_ci);
927 init->header.cmd = MFI_CMD_INIT;
928 init->header.data_len = sizeof(struct mfi_init_qinfo);
929 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
931 cm->cm_flags = MFI_CMD_POLLED;
933 if ((error = mfi_mapcmd(sc, cm)) != 0) {
934 device_printf(sc->mfi_dev, "failed to send init command\n");
935 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
938 mfi_release_command(cm);
939 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
945 mfi_get_controller_info(struct mfi_softc *sc)
947 struct mfi_command *cm = NULL;
948 struct mfi_ctrl_info *ci = NULL;
949 uint32_t max_sectors_1, max_sectors_2;
952 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
953 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
954 (void **)&ci, sizeof(*ci));
957 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
959 if ((error = mfi_mapcmd(sc, cm)) != 0) {
960 device_printf(sc->mfi_dev, "Failed to get controller info\n");
961 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
967 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
968 BUS_DMASYNC_POSTREAD);
969 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
971 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
972 max_sectors_2 = ci->max_request_size;
973 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
974 sc->disableOnlineCtrlReset =
975 ci->properties.OnOffProperties.disableOnlineCtrlReset;
981 mfi_release_command(cm);
982 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
987 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
989 struct mfi_command *cm = NULL;
992 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
993 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
994 (void **)log_state, sizeof(**log_state));
997 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
999 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1000 device_printf(sc->mfi_dev, "Failed to get log state\n");
1004 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1005 BUS_DMASYNC_POSTREAD);
1006 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1010 mfi_release_command(cm);
1011 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1017 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1019 struct mfi_evt_log_state *log_state = NULL;
1020 union mfi_evt class_locale;
1024 class_locale.members.reserved = 0;
1025 class_locale.members.locale = mfi_event_locale;
1026 class_locale.members.evt_class = mfi_event_class;
1028 if (seq_start == 0) {
1029 error = mfi_get_log_state(sc, &log_state);
1030 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1033 kfree(log_state, M_MFIBUF);
1038 * Walk through any events that fired since the last
1041 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1042 log_state->newest_seq_num);
1043 seq = log_state->newest_seq_num;
1046 mfi_aen_register(sc, seq, class_locale.word);
1047 if (log_state != NULL)
1048 kfree(log_state, M_MFIBUF);
1054 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1057 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1058 cm->cm_complete = NULL;
1062 * MegaCli can issue a DCMD of 0. In this case do nothing
1063 * and return 0 to it as status
1065 if (cm->cm_frame->dcmd.opcode == 0) {
1066 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1068 return (cm->cm_error);
1070 mfi_enqueue_ready(cm);
1072 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1073 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1074 return (cm->cm_error);
1078 mfi_free(struct mfi_softc *sc)
1080 struct mfi_command *cm;
1083 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
1085 if (sc->mfi_cdev != NULL)
1086 destroy_dev(sc->mfi_cdev);
1087 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1089 if (sc->mfi_total_cmds != 0) {
1090 for (i = 0; i < sc->mfi_total_cmds; i++) {
1091 cm = &sc->mfi_commands[i];
1092 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1094 kfree(sc->mfi_commands, M_MFIBUF);
1098 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1099 if (sc->mfi_irq != NULL)
1100 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1103 if (sc->mfi_sense_busaddr != 0)
1104 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1105 if (sc->mfi_sense != NULL)
1106 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1107 sc->mfi_sense_dmamap);
1108 if (sc->mfi_sense_dmat != NULL)
1109 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1111 if (sc->mfi_frames_busaddr != 0)
1112 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1113 if (sc->mfi_frames != NULL)
1114 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1115 sc->mfi_frames_dmamap);
1116 if (sc->mfi_frames_dmat != NULL)
1117 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1119 if (sc->mfi_comms_busaddr != 0)
1120 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1121 if (sc->mfi_comms != NULL)
1122 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1123 sc->mfi_comms_dmamap);
1124 if (sc->mfi_comms_dmat != NULL)
1125 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1127 /* ThunderBolt contiguous memory free here */
1128 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1129 if (sc->mfi_tb_busaddr != 0)
1130 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1131 if (sc->request_message_pool != NULL)
1132 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1134 if (sc->mfi_tb_dmat != NULL)
1135 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1137 /* Version buffer memory free */
1138 /* Start LSIP200113393 */
1139 if (sc->verbuf_h_busaddr != 0)
1140 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1141 if (sc->verbuf != NULL)
1142 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1143 sc->verbuf_h_dmamap);
1144 if (sc->verbuf_h_dmat != NULL)
1145 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1147 /* End LSIP200113393 */
1148 /* ThunderBolt INIT packet memory Free */
1149 if (sc->mfi_tb_init_busaddr != 0)
1150 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1151 if (sc->mfi_tb_init != NULL)
1152 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1153 sc->mfi_tb_init_dmamap);
1154 if (sc->mfi_tb_init_dmat != NULL)
1155 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1157 /* ThunderBolt IOC Init Desc memory free here */
1158 if (sc->mfi_tb_ioc_init_busaddr != 0)
1159 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1160 sc->mfi_tb_ioc_init_dmamap);
1161 if (sc->mfi_tb_ioc_init_desc != NULL)
1162 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1163 sc->mfi_tb_ioc_init_desc,
1164 sc->mfi_tb_ioc_init_dmamap);
1165 if (sc->mfi_tb_ioc_init_dmat != NULL)
1166 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1167 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1168 if (sc->mfi_cmd_pool_tbolt != NULL) {
1169 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1170 kfree(sc->mfi_cmd_pool_tbolt[i],
1172 sc->mfi_cmd_pool_tbolt[i] = NULL;
1176 if (sc->mfi_cmd_pool_tbolt != NULL) {
1177 kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1178 sc->mfi_cmd_pool_tbolt = NULL;
1180 if (sc->request_desc_pool != NULL) {
1181 kfree(sc->request_desc_pool, M_MFIBUF);
1182 sc->request_desc_pool = NULL;
1185 if (sc->mfi_buffer_dmat != NULL)
1186 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1187 if (sc->mfi_parent_dmat != NULL)
1188 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1190 if (sc->mfi_sysctl_tree != NULL)
1191 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1193 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1194 if (mtx_initialized(&sc->mfi_io_lock))
1197 lockuninit(&sc->mfi_io_lock);
1198 lockuninit(&sc->mfi_config_lock);
1205 mfi_startup(void *arg)
1207 struct mfi_softc *sc;
1209 sc = (struct mfi_softc *)arg;
1211 config_intrhook_disestablish(&sc->mfi_ich);
1213 sc->mfi_enable_intr(sc);
1214 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1215 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1217 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1219 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1220 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1226 struct mfi_softc *sc;
1227 struct mfi_command *cm;
1228 uint32_t pi, ci, context;
1230 sc = (struct mfi_softc *)arg;
1232 if (sc->mfi_check_clear_intr(sc))
1236 pi = sc->mfi_comms->hw_pi;
1237 ci = sc->mfi_comms->hw_ci;
1238 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1240 context = sc->mfi_comms->hw_reply_q[ci];
1241 if (context < sc->mfi_max_fw_cmds) {
1242 cm = &sc->mfi_commands[context];
1243 mfi_remove_busy(cm);
1245 mfi_complete(sc, cm);
1247 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1252 sc->mfi_comms->hw_ci = ci;
1254 /* Give defered I/O a chance to run */
1255 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1256 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1258 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1261 * Dummy read to flush the bus; this ensures that the indexes are up
1262 * to date. Restart processing if more commands have come it.
1264 (void)sc->mfi_read_fw_status(sc);
1265 if (pi != sc->mfi_comms->hw_pi)
1272 mfi_shutdown(struct mfi_softc *sc)
1274 struct mfi_dcmd_frame *dcmd;
1275 struct mfi_command *cm;
1278 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1279 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1281 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1285 if (sc->mfi_aen_cm != NULL)
1286 mfi_abort(sc, sc->mfi_aen_cm);
1288 if (sc->map_update_cmd != NULL)
1289 mfi_abort(sc, sc->map_update_cmd);
1291 dcmd = &cm->cm_frame->dcmd;
1292 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1293 cm->cm_flags = MFI_CMD_POLLED;
1296 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1297 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1300 mfi_release_command(cm);
1301 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1306 mfi_syspdprobe(struct mfi_softc *sc)
1308 struct mfi_frame_header *hdr;
1309 struct mfi_command *cm = NULL;
1310 struct mfi_pd_list *pdlist = NULL;
1311 struct mfi_system_pd *syspd, *tmp;
1312 int error, i, found;
1314 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1315 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1316 /* Add SYSTEM PD's */
1317 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1318 (void **)&pdlist, sizeof(*pdlist));
1320 device_printf(sc->mfi_dev,
1321 "Error while forming SYSTEM PD list\n");
1325 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1326 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1327 cm->cm_frame->dcmd.mbox[1] = 0;
1328 if (mfi_mapcmd(sc, cm) != 0) {
1329 device_printf(sc->mfi_dev,
1330 "Failed to get syspd device listing\n");
1333 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1334 BUS_DMASYNC_POSTREAD);
1335 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1336 hdr = &cm->cm_frame->header;
1337 if (hdr->cmd_status != MFI_STAT_OK) {
1338 device_printf(sc->mfi_dev,
1339 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1342 /* Get each PD and add it to the system */
1343 for (i = 0; i < pdlist->count; i++) {
1344 if (pdlist->addr[i].device_id ==
1345 pdlist->addr[i].encl_device_id)
1348 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1349 if (syspd->pd_id == pdlist->addr[i].device_id)
1353 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1355 /* Delete SYSPD's whose state has been changed */
1356 TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1358 for (i = 0; i < pdlist->count; i++) {
1359 if (syspd->pd_id == pdlist->addr[i].device_id)
1363 kprintf("DELETE\n");
1364 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1366 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1368 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1373 kfree(pdlist, M_MFIBUF);
1375 mfi_release_command(cm);
1379 mfi_ldprobe(struct mfi_softc *sc)
1381 struct mfi_frame_header *hdr;
1382 struct mfi_command *cm = NULL;
1383 struct mfi_ld_list *list = NULL;
1384 struct mfi_disk *ld;
1387 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1388 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1390 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1391 (void **)&list, sizeof(*list));
1395 cm->cm_flags = MFI_CMD_DATAIN;
1396 if (mfi_wait_command(sc, cm) != 0) {
1397 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1401 hdr = &cm->cm_frame->header;
1402 if (hdr->cmd_status != MFI_STAT_OK) {
1403 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1408 for (i = 0; i < list->ld_count; i++) {
1409 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1410 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1413 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1418 kfree(list, M_MFIBUF);
1420 mfi_release_command(cm);
1426 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1427 * the bits in 24-31 are all set, then it is the number of seconds since
1431 format_timestamp(uint32_t timestamp)
1433 static char buffer[32];
1435 if ((timestamp & 0xff000000) == 0xff000000)
1436 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1439 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1444 format_class(int8_t class)
1446 static char buffer[6];
1449 case MFI_EVT_CLASS_DEBUG:
1451 case MFI_EVT_CLASS_PROGRESS:
1452 return ("progress");
1453 case MFI_EVT_CLASS_INFO:
1455 case MFI_EVT_CLASS_WARNING:
1457 case MFI_EVT_CLASS_CRITICAL:
1459 case MFI_EVT_CLASS_FATAL:
1461 case MFI_EVT_CLASS_DEAD:
1464 ksnprintf(buffer, sizeof(buffer), "%d", class);
1470 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1472 struct mfi_system_pd *syspd = NULL;
1474 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1475 format_timestamp(detail->time), detail->evt_class.members.locale,
1476 format_class(detail->evt_class.members.evt_class),
1477 detail->description);
1479 /* Don't act on old AEN's or while shutting down */
1480 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1483 switch (detail->arg_type) {
1484 case MR_EVT_ARGS_NONE:
1485 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1486 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1487 if (mfi_detect_jbod_change) {
1489 * Probe for new SYSPD's and Delete
1492 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1493 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1495 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1496 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1500 case MR_EVT_ARGS_LD_STATE:
1502 * During load time driver reads all the events starting
1503 * from the one that has been logged after shutdown. Avoid
1506 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1508 struct mfi_disk *ld;
1509 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1511 detail->args.ld_state.ld.target_id)
1515 Fix: for kernel panics when SSCD is removed
1516 KASSERT(ld != NULL, ("volume dissappeared"));
1520 device_delete_child(sc->mfi_dev, ld->ld_dev);
1525 case MR_EVT_ARGS_PD:
1526 if (detail->code == MR_EVT_PD_REMOVED) {
1527 if (mfi_detect_jbod_change) {
1529 * If the removed device is a SYSPD then
1532 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1535 detail->args.pd.device_id) {
1537 device_delete_child(
1546 if (detail->code == MR_EVT_PD_INSERTED) {
1547 if (mfi_detect_jbod_change) {
1548 /* Probe for new SYSPD's */
1549 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1550 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1552 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1553 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1561 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1563 struct mfi_evt_queue_elm *elm;
1565 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1566 elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1569 memcpy(&elm->detail, detail, sizeof(*detail));
1570 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1571 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1575 mfi_handle_evt(void *context, int pending)
1577 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1578 struct mfi_softc *sc;
1579 struct mfi_evt_queue_elm *elm;
1583 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1584 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1585 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1586 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1587 TAILQ_REMOVE(&queue, elm, link);
1588 mfi_decode_evt(sc, &elm->detail);
1589 kfree(elm, M_MFIBUF);
1594 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1596 struct mfi_command *cm;
1597 struct mfi_dcmd_frame *dcmd;
1598 union mfi_evt current_aen, prior_aen;
1599 struct mfi_evt_detail *ed = NULL;
1602 current_aen.word = locale;
1603 if (sc->mfi_aen_cm != NULL) {
1605 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1606 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1607 !((prior_aen.members.locale & current_aen.members.locale)
1608 ^current_aen.members.locale)) {
1611 prior_aen.members.locale |= current_aen.members.locale;
1612 if (prior_aen.members.evt_class
1613 < current_aen.members.evt_class)
1614 current_aen.members.evt_class =
1615 prior_aen.members.evt_class;
1616 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1617 mfi_abort(sc, sc->mfi_aen_cm);
1618 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1622 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1623 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1624 (void **)&ed, sizeof(*ed));
1625 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1630 dcmd = &cm->cm_frame->dcmd;
1631 ((uint32_t *)&dcmd->mbox)[0] = seq;
1632 ((uint32_t *)&dcmd->mbox)[1] = locale;
1633 cm->cm_flags = MFI_CMD_DATAIN;
1634 cm->cm_complete = mfi_aen_complete;
1636 sc->last_seq_num = seq;
1637 sc->mfi_aen_cm = cm;
1639 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1640 mfi_enqueue_ready(cm);
1642 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1649 mfi_aen_complete(struct mfi_command *cm)
1651 struct mfi_frame_header *hdr;
1652 struct mfi_softc *sc;
1653 struct mfi_evt_detail *detail;
1654 struct mfi_aen *mfi_aen_entry, *tmp;
1655 int seq = 0, aborted = 0;
1658 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1660 hdr = &cm->cm_frame->header;
1662 if (sc->mfi_aen_cm == NULL)
1665 if (sc->mfi_aen_cm->cm_aen_abort ||
1666 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1667 sc->mfi_aen_cm->cm_aen_abort = 0;
1670 sc->mfi_aen_triggered = 1;
1671 if (sc->mfi_poll_waiting) {
1672 sc->mfi_poll_waiting = 0;
1673 KNOTE(&sc->mfi_kq.ki_note, 0);
1675 detail = cm->cm_data;
1676 mfi_queue_evt(sc, detail);
1677 seq = detail->seq + 1;
1678 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1680 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1682 lwkt_gettoken(&proc_token);
1683 ksignal(mfi_aen_entry->p, SIGIO);
1684 lwkt_reltoken(&proc_token);
1685 kfree(mfi_aen_entry, M_MFIBUF);
1689 kfree(cm->cm_data, M_MFIBUF);
1690 sc->mfi_aen_cm = NULL;
1691 wakeup(&sc->mfi_aen_cm);
1692 mfi_release_command(cm);
1694 /* set it up again so the driver can catch more events */
1696 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1697 mfi_aen_setup(sc, seq);
1698 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1702 #define MAX_EVENTS 15
1705 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1707 struct mfi_command *cm;
1708 struct mfi_dcmd_frame *dcmd;
1709 struct mfi_evt_list *el;
1710 union mfi_evt class_locale;
1711 int error, i, seq, size;
1713 class_locale.members.reserved = 0;
1714 class_locale.members.locale = mfi_event_locale;
1715 class_locale.members.evt_class = mfi_event_class;
1717 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1719 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1723 for (seq = start_seq;;) {
1724 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1725 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1726 kfree(el, M_MFIBUF);
1727 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1731 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1733 dcmd = &cm->cm_frame->dcmd;
1734 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1735 dcmd->header.cmd = MFI_CMD_DCMD;
1736 dcmd->header.timeout = 0;
1737 dcmd->header.data_len = size;
1738 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1739 ((uint32_t *)&dcmd->mbox)[0] = seq;
1740 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1741 cm->cm_sg = &dcmd->sgl;
1742 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1743 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1747 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1748 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1749 device_printf(sc->mfi_dev,
1750 "Failed to get controller entries\n");
1751 mfi_release_command(cm);
1752 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1756 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1757 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1758 BUS_DMASYNC_POSTREAD);
1759 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1761 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1762 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1763 mfi_release_command(cm);
1764 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1767 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1768 device_printf(sc->mfi_dev,
1769 "Error %d fetching controller entries\n",
1770 dcmd->header.cmd_status);
1771 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1772 mfi_release_command(cm);
1773 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1776 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1777 mfi_release_command(cm);
1778 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1780 for (i = 0; i < el->count; i++) {
1782 * If this event is newer than 'stop_seq' then
1783 * break out of the loop. Note that the log
1784 * is a circular buffer so we have to handle
1785 * the case that our stop point is earlier in
1786 * the buffer than our start point.
1788 if (el->event[i].seq >= stop_seq) {
1789 if (start_seq <= stop_seq)
1791 else if (el->event[i].seq < start_seq)
1794 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1795 mfi_queue_evt(sc, &el->event[i]);
1796 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1798 seq = el->event[el->count - 1].seq + 1;
1801 kfree(el, M_MFIBUF);
1806 mfi_add_ld(struct mfi_softc *sc, int id)
1808 struct mfi_command *cm;
1809 struct mfi_dcmd_frame *dcmd = NULL;
1810 struct mfi_ld_info *ld_info = NULL;
1813 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1815 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1816 (void **)&ld_info, sizeof(*ld_info));
1818 device_printf(sc->mfi_dev,
1819 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1821 kfree(ld_info, M_MFIBUF);
1824 cm->cm_flags = MFI_CMD_DATAIN;
1825 dcmd = &cm->cm_frame->dcmd;
1827 if (mfi_wait_command(sc, cm) != 0) {
1828 device_printf(sc->mfi_dev,
1829 "Failed to get logical drive: %d\n", id);
1830 kfree(ld_info, M_MFIBUF);
1833 if (ld_info->ld_config.params.isSSCD != 1) {
1834 mfi_add_ld_complete(cm);
1836 mfi_release_command(cm);
1837 if (ld_info) /* SSCD drives ld_info free here */
1838 kfree(ld_info, M_MFIBUF);
1844 mfi_add_ld_complete(struct mfi_command *cm)
1846 struct mfi_frame_header *hdr;
1847 struct mfi_ld_info *ld_info;
1848 struct mfi_softc *sc;
1852 hdr = &cm->cm_frame->header;
1853 ld_info = cm->cm_private;
1855 if (hdr->cmd_status != MFI_STAT_OK) {
1856 kfree(ld_info, M_MFIBUF);
1857 mfi_release_command(cm);
1860 mfi_release_command(cm);
1862 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1864 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1865 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1866 kfree(ld_info, M_MFIBUF);
1868 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1872 device_set_ivars(child, ld_info);
1873 device_set_desc(child, "MFI Logical Disk");
1874 bus_generic_attach(sc->mfi_dev);
1876 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1880 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1882 struct mfi_command *cm;
1883 struct mfi_dcmd_frame *dcmd = NULL;
1884 struct mfi_pd_info *pd_info = NULL;
1887 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1889 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1890 (void **)&pd_info, sizeof(*pd_info));
1892 device_printf(sc->mfi_dev,
1893 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1896 kfree(pd_info, M_MFIBUF);
1899 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1900 dcmd = &cm->cm_frame->dcmd;
1902 dcmd->header.scsi_status = 0;
1903 dcmd->header.pad0 = 0;
1904 if (mfi_mapcmd(sc, cm) != 0) {
1905 device_printf(sc->mfi_dev,
1906 "Failed to get physical drive info %d\n", id);
1907 kfree(pd_info, M_MFIBUF);
1910 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1911 BUS_DMASYNC_POSTREAD);
1912 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1913 mfi_add_sys_pd_complete(cm);
1918 mfi_add_sys_pd_complete(struct mfi_command *cm)
1920 struct mfi_frame_header *hdr;
1921 struct mfi_pd_info *pd_info;
1922 struct mfi_softc *sc;
1926 hdr = &cm->cm_frame->header;
1927 pd_info = cm->cm_private;
1929 if (hdr->cmd_status != MFI_STAT_OK) {
1930 kfree(pd_info, M_MFIBUF);
1931 mfi_release_command(cm);
1934 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1935 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1936 pd_info->ref.v.device_id);
1937 kfree(pd_info, M_MFIBUF);
1938 mfi_release_command(cm);
1941 mfi_release_command(cm);
1943 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1945 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1946 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1947 kfree(pd_info, M_MFIBUF);
1949 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1953 device_set_ivars(child, pd_info);
1954 device_set_desc(child, "MFI System PD");
1955 bus_generic_attach(sc->mfi_dev);
1957 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1960 static struct mfi_command *
1961 mfi_bio_command(struct mfi_softc *sc)
1964 struct mfi_command *cm = NULL;
1965 struct mfi_disk *mfid;
1967 /* reserving two commands to avoid starvation for IOCTL */
1968 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1970 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1972 mfid = bio->bio_driver_info;
1973 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1974 cm = mfi_build_syspdio(sc, bio);
1976 cm = mfi_build_ldio(sc, bio);
1978 mfi_enqueue_bio(sc, bio);
1982 static struct mfi_command *
1983 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1985 struct mfi_command *cm;
1987 struct mfi_system_pd *disk;
1988 struct mfi_pass_frame *pass;
1989 int flags = 0, blkcount = 0;
1990 uint32_t context = 0;
1992 if ((cm = mfi_dequeue_free(sc)) == NULL)
1995 /* Zero out the MFI frame */
1996 context = cm->cm_frame->header.context;
1997 bzero(cm->cm_frame, sizeof(union mfi_frame));
1998 cm->cm_frame->header.context = context;
2000 pass = &cm->cm_frame->pass;
2001 bzero(pass->cdb, 16);
2002 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2003 switch (bp->b_cmd & 0x03) {
2005 pass->cdb[0] = READ_10;
2006 flags = MFI_CMD_DATAIN;
2009 pass->cdb[0] = WRITE_10;
2010 flags = MFI_CMD_DATAOUT;
2013 panic("Invalid bio command");
2016 /* Cheat with the sector length to avoid a non-constant division */
2017 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2018 disk = bio->bio_driver_info;
2019 /* Fill the LBA and Transfer length in CDB */
2020 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2021 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2022 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2023 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2024 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2025 pass->cdb[8] = (blkcount & 0x00ff);
2026 pass->header.target_id = disk->pd_id;
2027 pass->header.timeout = 0;
2028 pass->header.flags = 0;
2029 pass->header.scsi_status = 0;
2030 pass->header.sense_len = MFI_SENSE_LEN;
2031 pass->header.data_len = bp->b_bcount;
2032 pass->header.cdb_len = 10;
2033 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2034 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2035 cm->cm_complete = mfi_bio_complete;
2036 cm->cm_private = bio;
2037 cm->cm_data = bp->b_data;
2038 cm->cm_len = bp->b_bcount;
2039 cm->cm_sg = &pass->sgl;
2040 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2041 cm->cm_flags = flags;
2045 static struct mfi_command *
2046 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2048 struct mfi_io_frame *io;
2050 struct mfi_disk *disk;
2051 struct mfi_command *cm;
2052 int flags, blkcount;
2053 uint32_t context = 0;
2055 if ((cm = mfi_dequeue_free(sc)) == NULL)
2058 /* Zero out the MFI frame */
2059 context = cm->cm_frame->header.context;
2060 bzero(cm->cm_frame, sizeof(union mfi_frame));
2061 cm->cm_frame->header.context = context;
2063 io = &cm->cm_frame->io;
2064 switch (bp->b_cmd & 0x03) {
2066 io->header.cmd = MFI_CMD_LD_READ;
2067 flags = MFI_CMD_DATAIN;
2070 io->header.cmd = MFI_CMD_LD_WRITE;
2071 flags = MFI_CMD_DATAOUT;
2074 panic("Invalid bio command");
2077 /* Cheat with the sector length to avoid a non-constant division */
2078 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2079 disk = bio->bio_driver_info;
2080 io->header.target_id = disk->ld_id;
2081 io->header.timeout = 0;
2082 io->header.flags = 0;
2083 io->header.scsi_status = 0;
2084 io->header.sense_len = MFI_SENSE_LEN;
2085 io->header.data_len = blkcount;
2086 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2087 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2088 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2089 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2090 cm->cm_complete = mfi_bio_complete;
2091 cm->cm_private = bio;
2092 cm->cm_data = bp->b_data;
2093 cm->cm_len = bp->b_bcount;
2094 cm->cm_sg = &io->sgl;
2095 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2096 cm->cm_flags = flags;
2101 mfi_bio_complete(struct mfi_command *cm)
2105 struct mfi_frame_header *hdr;
2106 struct mfi_softc *sc;
2108 bio = cm->cm_private;
2110 hdr = &cm->cm_frame->header;
2113 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2114 bp->b_flags |= B_ERROR;
2116 device_printf(sc->mfi_dev, "I/O error, status= %d "
2117 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2118 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2119 } else if (cm->cm_error != 0) {
2120 bp->b_flags |= B_ERROR;
2123 mfi_release_command(cm);
2124 mfi_disk_complete(bio);
2128 mfi_startio(struct mfi_softc *sc)
2130 struct mfi_command *cm;
2131 struct ccb_hdr *ccbh;
2134 /* Don't bother if we're short on resources */
2135 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2138 /* Try a command that has already been prepared */
2139 cm = mfi_dequeue_ready(sc);
2142 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2143 cm = sc->mfi_cam_start(ccbh);
2146 /* Nope, so look for work on the bioq */
2148 cm = mfi_bio_command(sc);
2150 /* No work available, so exit */
2154 /* Send the command to the controller */
2155 if (mfi_mapcmd(sc, cm) != 0) {
2156 mfi_requeue_ready(cm);
2163 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2167 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2169 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2170 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2171 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2172 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2173 if (error == EINPROGRESS) {
2174 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2178 if (sc->MFA_enabled)
2179 error = mfi_tbolt_send_frame(sc, cm);
2181 error = mfi_send_frame(sc, cm);
2188 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2190 struct mfi_frame_header *hdr;
2191 struct mfi_command *cm;
2193 struct mfi_softc *sc;
2194 int i, j, first, dir;
2197 cm = (struct mfi_command *)arg;
2199 hdr = &cm->cm_frame->header;
2203 kprintf("error %d in callback\n", error);
2204 cm->cm_error = error;
2205 mfi_complete(sc, cm);
2209 /* Use IEEE sgl only for IO's on a SKINNY controller
2210 * For other commands on a SKINNY controller use either
2211 * sg32 or sg64 based on the sizeof(bus_addr_t).
2212 * Also calculate the total frame size based on the type
2215 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2216 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2217 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2218 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2219 for (i = 0; i < nsegs; i++) {
2220 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2221 sgl->sg_skinny[i].len = segs[i].ds_len;
2222 sgl->sg_skinny[i].flag = 0;
2224 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2225 sge_size = sizeof(struct mfi_sg_skinny);
2226 hdr->sg_count = nsegs;
2229 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2230 first = cm->cm_stp_len;
2231 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2232 sgl->sg32[j].addr = segs[0].ds_addr;
2233 sgl->sg32[j++].len = first;
2235 sgl->sg64[j].addr = segs[0].ds_addr;
2236 sgl->sg64[j++].len = first;
2240 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2241 for (i = 0; i < nsegs; i++) {
2242 sgl->sg32[j].addr = segs[i].ds_addr + first;
2243 sgl->sg32[j++].len = segs[i].ds_len - first;
2247 for (i = 0; i < nsegs; i++) {
2248 sgl->sg64[j].addr = segs[i].ds_addr + first;
2249 sgl->sg64[j++].len = segs[i].ds_len - first;
2252 hdr->flags |= MFI_FRAME_SGL64;
2255 sge_size = sc->mfi_sge_size;
2259 if (cm->cm_flags & MFI_CMD_DATAIN) {
2260 dir |= BUS_DMASYNC_PREREAD;
2261 hdr->flags |= MFI_FRAME_DIR_READ;
2263 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2264 dir |= BUS_DMASYNC_PREWRITE;
2265 hdr->flags |= MFI_FRAME_DIR_WRITE;
2267 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2268 cm->cm_flags |= MFI_CMD_MAPPED;
2271 * Instead of calculating the total number of frames in the
2272 * compound frame, it's already assumed that there will be at
2273 * least 1 frame, so don't compensate for the modulo of the
2274 * following division.
2276 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2277 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2279 if (sc->MFA_enabled)
2280 mfi_tbolt_send_frame(sc, cm);
2282 mfi_send_frame(sc, cm);
2286 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2288 struct mfi_frame_header *hdr;
2289 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2291 hdr = &cm->cm_frame->header;
2293 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2294 cm->cm_timestamp = time_second;
2295 mfi_enqueue_busy(cm);
2297 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2298 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2302 * The bus address of the command is aligned on a 64 byte boundary,
2303 * leaving the least 6 bits as zero. For whatever reason, the
2304 * hardware wants the address shifted right by three, leaving just
2305 * 3 zero bits. These three bits are then used as a prefetching
2306 * hint for the hardware to predict how many frames need to be
2307 * fetched across the bus. If a command has more than 8 frames
2308 * then the 3 bits are set to 0x7 and the firmware uses other
2309 * information in the command to determine the total amount to fetch.
2310 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2311 * is enough for both 32bit and 64bit systems.
2313 if (cm->cm_extra_frames > 7)
2314 cm->cm_extra_frames = 7;
2316 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2318 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2321 /* This is a polled command, so busy-wait for it to complete. */
2322 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2329 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2330 device_printf(sc->mfi_dev, "Frame %p timed out "
2331 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2339 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2343 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2345 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2346 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2347 dir |= BUS_DMASYNC_POSTREAD;
2348 if (cm->cm_flags & MFI_CMD_DATAOUT)
2349 dir |= BUS_DMASYNC_POSTWRITE;
2351 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2352 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2353 cm->cm_flags &= ~MFI_CMD_MAPPED;
2356 cm->cm_flags |= MFI_CMD_COMPLETED;
2358 if (cm->cm_complete != NULL)
2359 cm->cm_complete(cm);
2365 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2367 struct mfi_command *cm;
2368 struct mfi_abort_frame *abort;
2370 uint32_t context = 0;
2372 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2374 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2378 /* Zero out the MFI frame */
2379 context = cm->cm_frame->header.context;
2380 bzero(cm->cm_frame, sizeof(union mfi_frame));
2381 cm->cm_frame->header.context = context;
2383 abort = &cm->cm_frame->abort;
2384 abort->header.cmd = MFI_CMD_ABORT;
2385 abort->header.flags = 0;
2386 abort->header.scsi_status = 0;
2387 abort->abort_context = cm_abort->cm_frame->header.context;
2388 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2389 abort->abort_mfi_addr_hi =
2390 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2392 cm->cm_flags = MFI_CMD_POLLED;
2395 sc->mfi_aen_cm->cm_aen_abort = 1;
2397 mfi_release_command(cm);
2399 while (i < 5 && sc->mfi_aen_cm != NULL) {
2400 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2409 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2412 struct mfi_command *cm;
2413 struct mfi_io_frame *io;
2415 uint32_t context = 0;
2417 if ((cm = mfi_dequeue_free(sc)) == NULL)
2420 /* Zero out the MFI frame */
2421 context = cm->cm_frame->header.context;
2422 bzero(cm->cm_frame, sizeof(union mfi_frame));
2423 cm->cm_frame->header.context = context;
2425 io = &cm->cm_frame->io;
2426 io->header.cmd = MFI_CMD_LD_WRITE;
2427 io->header.target_id = id;
2428 io->header.timeout = 0;
2429 io->header.flags = 0;
2430 io->header.scsi_status = 0;
2431 io->header.sense_len = MFI_SENSE_LEN;
2432 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2433 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2434 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2435 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2436 io->lba_lo = lba & 0xffffffff;
2439 cm->cm_sg = &io->sgl;
2440 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2441 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2443 error = mfi_mapcmd(sc, cm);
2444 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2445 BUS_DMASYNC_POSTWRITE);
2446 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2447 mfi_release_command(cm);
2453 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2456 struct mfi_command *cm;
2457 struct mfi_pass_frame *pass;
2461 if ((cm = mfi_dequeue_free(sc)) == NULL)
2464 pass = &cm->cm_frame->pass;
2465 bzero(pass->cdb, 16);
2466 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2467 pass->cdb[0] = WRITE_10;
2468 pass->cdb[2] = (lba & 0xff000000) >> 24;
2469 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2470 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2471 pass->cdb[5] = (lba & 0x000000ff);
2472 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2473 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2474 pass->cdb[8] = (blkcount & 0x00ff);
2475 pass->header.target_id = id;
2476 pass->header.timeout = 0;
2477 pass->header.flags = 0;
2478 pass->header.scsi_status = 0;
2479 pass->header.sense_len = MFI_SENSE_LEN;
2480 pass->header.data_len = len;
2481 pass->header.cdb_len = 10;
2482 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2483 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2486 cm->cm_sg = &pass->sgl;
2487 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2488 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2490 error = mfi_mapcmd(sc, cm);
2491 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2492 BUS_DMASYNC_POSTWRITE);
2493 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2494 mfi_release_command(cm);
2500 mfi_open(struct dev_open_args *ap)
2502 cdev_t dev = ap->a_head.a_dev;
2503 struct mfi_softc *sc;
2508 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2509 if (sc->mfi_detaching)
2512 sc->mfi_flags |= MFI_FLAGS_OPEN;
2515 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2521 mfi_close(struct dev_close_args *ap)
2523 cdev_t dev = ap->a_head.a_dev;
2524 struct mfi_softc *sc;
2525 struct mfi_aen *mfi_aen_entry, *tmp;
2529 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2530 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2532 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2533 if (mfi_aen_entry->p == curproc) {
2534 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2536 kfree(mfi_aen_entry, M_MFIBUF);
2539 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2544 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2548 case MFI_DCMD_LD_DELETE:
2549 case MFI_DCMD_CFG_ADD:
2550 case MFI_DCMD_CFG_CLEAR:
2551 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2559 mfi_config_unlock(struct mfi_softc *sc, int locked)
2563 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2567 * Perform pre-issue checks on commands from userland and possibly veto
2571 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2573 struct mfi_disk *ld, *ld2;
2575 struct mfi_system_pd *syspd = NULL;
2579 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2581 switch (cm->cm_frame->dcmd.opcode) {
2582 case MFI_DCMD_LD_DELETE:
2583 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2584 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2590 error = mfi_disk_disable(ld);
2592 case MFI_DCMD_CFG_CLEAR:
2593 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2594 error = mfi_disk_disable(ld);
2599 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2602 mfi_disk_enable(ld2);
2606 case MFI_DCMD_PD_STATE_SET:
2607 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2609 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2610 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2611 if (syspd->pd_id == syspd_id)
2618 error = mfi_syspd_disable(syspd);
2626 /* Perform post-issue checks on commands from userland. */
2628 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2630 struct mfi_disk *ld, *ldn;
2631 struct mfi_system_pd *syspd = NULL;
2635 switch (cm->cm_frame->dcmd.opcode) {
2636 case MFI_DCMD_LD_DELETE:
2637 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2638 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2641 KASSERT(ld != NULL, ("volume dissappeared"));
2642 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2643 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2645 device_delete_child(sc->mfi_dev, ld->ld_dev);
2647 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2649 mfi_disk_enable(ld);
2651 case MFI_DCMD_CFG_CLEAR:
2652 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2653 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2655 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2656 device_delete_child(sc->mfi_dev, ld->ld_dev);
2659 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2661 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2662 mfi_disk_enable(ld);
2665 case MFI_DCMD_CFG_ADD:
2668 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2671 case MFI_DCMD_PD_STATE_SET:
2672 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2674 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2675 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2676 if (syspd->pd_id == syspd_id)
2682 /* If the transition fails then enable the syspd again */
2683 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2684 mfi_syspd_enable(syspd);
2690 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2692 struct mfi_config_data *conf_data = cm->cm_data;
2693 struct mfi_command *ld_cm = NULL;
2694 struct mfi_ld_info *ld_info = NULL;
2697 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2698 (conf_data->ld[0].params.isSSCD == 1)) {
2700 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2701 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2702 (void **)&ld_info, sizeof(*ld_info));
2704 device_printf(sc->mfi_dev, "Failed to allocate"
2705 "MFI_DCMD_LD_GET_INFO %d", error);
2707 kfree(ld_info, M_MFIBUF);
2710 ld_cm->cm_flags = MFI_CMD_DATAIN;
2711 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2712 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2713 if (mfi_wait_command(sc, ld_cm) != 0) {
2714 device_printf(sc->mfi_dev, "failed to get log drv\n");
2715 mfi_release_command(ld_cm);
2716 kfree(ld_info, M_MFIBUF);
2720 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2721 kfree(ld_info, M_MFIBUF);
2722 mfi_release_command(ld_cm);
2725 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2728 if (ld_info->ld_config.params.isSSCD == 1)
2731 mfi_release_command(ld_cm);
2732 kfree(ld_info, M_MFIBUF);
2738 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2741 struct mfi_ioc_packet *ioc;
2742 ioc = (struct mfi_ioc_packet *)arg;
2743 int sge_size, error;
2744 struct megasas_sge *kern_sge;
2746 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2747 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2748 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2750 if (sizeof(bus_addr_t) == 8) {
2751 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2752 cm->cm_extra_frames = 2;
2753 sge_size = sizeof(struct mfi_sg64);
2755 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2756 sge_size = sizeof(struct mfi_sg32);
2759 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2760 for (i = 0; i < ioc->mfi_sge_count; i++) {
2761 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2762 1, 0, /* algnmnt, boundary */
2763 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2764 BUS_SPACE_MAXADDR, /* highaddr */
2765 NULL, NULL, /* filter, filterarg */
2766 ioc->mfi_sgl[i].iov_len,/* maxsize */
2768 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2769 BUS_DMA_ALLOCNOW, /* flags */
2770 &sc->mfi_kbuff_arr_dmat[i])) {
2771 device_printf(sc->mfi_dev,
2772 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2776 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2777 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2778 &sc->mfi_kbuff_arr_dmamap[i])) {
2779 device_printf(sc->mfi_dev,
2780 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2784 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2785 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2786 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2787 &sc->mfi_kbuff_arr_busaddr[i], 0);
2789 if (!sc->kbuff_arr[i]) {
2790 device_printf(sc->mfi_dev,
2791 "Could not allocate memory for kbuff_arr info\n");
2794 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2795 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2797 if (sizeof(bus_addr_t) == 8) {
2798 cm->cm_frame->stp.sgl.sg64[i].addr =
2799 kern_sge[i].phys_addr;
2800 cm->cm_frame->stp.sgl.sg64[i].len =
2801 ioc->mfi_sgl[i].iov_len;
2803 cm->cm_frame->stp.sgl.sg32[i].len =
2804 kern_sge[i].phys_addr;
2805 cm->cm_frame->stp.sgl.sg32[i].len =
2806 ioc->mfi_sgl[i].iov_len;
2809 error = copyin(ioc->mfi_sgl[i].iov_base,
2811 ioc->mfi_sgl[i].iov_len);
2813 device_printf(sc->mfi_dev, "Copy in failed\n");
2818 cm->cm_flags |=MFI_CMD_MAPPED;
2823 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2825 struct mfi_command *cm;
2826 struct mfi_dcmd_frame *dcmd;
2827 void *ioc_buf = NULL;
2829 int error = 0, locked;
2832 if (ioc->buf_size > 0) {
2833 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2834 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2836 device_printf(sc->mfi_dev, "failed to copyin\n");
2837 kfree(ioc_buf, M_MFIBUF);
2842 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2844 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2845 while ((cm = mfi_dequeue_free(sc)) == NULL)
2846 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2848 /* Save context for later */
2849 context = cm->cm_frame->header.context;
2851 dcmd = &cm->cm_frame->dcmd;
2852 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2854 cm->cm_sg = &dcmd->sgl;
2855 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2856 cm->cm_data = ioc_buf;
2857 cm->cm_len = ioc->buf_size;
2859 /* restore context */
2860 cm->cm_frame->header.context = context;
2862 /* Cheat since we don't know if we're writing or reading */
2863 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2865 error = mfi_check_command_pre(sc, cm);
2869 error = mfi_wait_command(sc, cm);
2871 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2874 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2875 mfi_check_command_post(sc, cm);
2877 mfi_release_command(cm);
2878 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2879 mfi_config_unlock(sc, locked);
2880 if (ioc->buf_size > 0)
2881 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2883 kfree(ioc_buf, M_MFIBUF);
2887 #define PTRIN(p) ((void *)(uintptr_t)(p))
2890 mfi_ioctl(struct dev_ioctl_args *ap)
2892 cdev_t dev = ap->a_head.a_dev;
2893 u_long cmd = ap->a_cmd;
2894 int flag = ap->a_fflag;
2895 caddr_t arg = ap->a_data;
2896 struct mfi_softc *sc;
2897 union mfi_statrequest *ms;
2898 struct mfi_ioc_packet *ioc;
2899 struct mfi_ioc_aen *aen;
2900 struct mfi_command *cm = NULL;
2902 union mfi_sense_ptr sense_ptr;
2903 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2906 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2916 if (sc->hw_crit_error)
2919 if (sc->issuepend_done == 0)
2924 ms = (union mfi_statrequest *)arg;
2925 switch (ms->ms_item) {
2930 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2931 sizeof(struct mfi_qstat));
2938 case MFIIO_QUERY_DISK:
2940 struct mfi_query_disk *qd;
2941 struct mfi_disk *ld;
2943 qd = (struct mfi_query_disk *)arg;
2944 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2945 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2946 if (ld->ld_id == qd->array_id)
2951 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2955 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2957 bzero(qd->devname, SPECNAMELEN + 1);
2958 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2959 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2964 devclass_t devclass;
2965 ioc = (struct mfi_ioc_packet *)arg;
2968 adapter = ioc->mfi_adapter_no;
2969 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2970 devclass = devclass_find("mfi");
2971 sc = devclass_get_softc(devclass, adapter);
2973 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2974 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2975 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2978 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2982 * save off original context since copying from user
2983 * will clobber some data
2985 context = cm->cm_frame->header.context;
2986 cm->cm_frame->header.context = cm->cm_index;
2988 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2989 2 * MEGAMFI_FRAME_SIZE);
2990 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2991 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2992 cm->cm_frame->header.scsi_status = 0;
2993 cm->cm_frame->header.pad0 = 0;
2994 if (ioc->mfi_sge_count) {
2996 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3000 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3001 cm->cm_flags |= MFI_CMD_DATAIN;
3002 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3003 cm->cm_flags |= MFI_CMD_DATAOUT;
3004 /* Legacy app shim */
3005 if (cm->cm_flags == 0)
3006 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3007 cm->cm_len = cm->cm_frame->header.data_len;
3008 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3009 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3010 cm->cm_len += cm->cm_stp_len;
3013 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3014 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3020 /* restore header context */
3021 cm->cm_frame->header.context = context;
3023 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3024 res = mfi_stp_cmd(sc, cm, arg);
3029 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3030 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3031 for (i = 0; i < ioc->mfi_sge_count; i++) {
3032 addr = ioc->mfi_sgl[i].iov_base;
3033 len = ioc->mfi_sgl[i].iov_len;
3034 error = copyin(addr, temp, len);
3036 device_printf(sc->mfi_dev,
3037 "Copy in failed\n");
3045 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3046 locked = mfi_config_lock(sc,
3047 cm->cm_frame->dcmd.opcode);
3049 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3050 cm->cm_frame->pass.sense_addr_lo =
3051 (uint32_t)cm->cm_sense_busaddr;
3052 cm->cm_frame->pass.sense_addr_hi =
3053 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3055 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3056 skip_pre_post = mfi_check_for_sscd(sc, cm);
3057 if (!skip_pre_post) {
3058 error = mfi_check_command_pre(sc, cm);
3060 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3065 if ((error = mfi_wait_command(sc, cm)) != 0) {
3066 device_printf(sc->mfi_dev,
3067 "Controller polled failed\n");
3068 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3073 mfi_check_command_post(sc, cm);
3074 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3076 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3078 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3079 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3080 for (i = 0; i < ioc->mfi_sge_count; i++) {
3081 addr = ioc->mfi_sgl[i].iov_base;
3082 len = ioc->mfi_sgl[i].iov_len;
3083 error = copyout(temp, addr, len);
3085 device_printf(sc->mfi_dev,
3086 "Copy out failed\n");
3094 if (ioc->mfi_sense_len) {
3095 /* get user-space sense ptr then copy out sense */
3096 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3097 &sense_ptr.sense_ptr_data[0],
3098 sizeof(sense_ptr.sense_ptr_data));
3099 error = copyout(cm->cm_sense, sense_ptr.user_space,
3100 ioc->mfi_sense_len);
3102 device_printf(sc->mfi_dev,
3103 "Copy out failed\n");
3108 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3110 mfi_config_unlock(sc, locked);
3112 kfree(data, M_MFIBUF);
3113 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3114 for (i = 0; i < 2; i++) {
3115 if (sc->kbuff_arr[i]) {
3116 if (sc->mfi_kbuff_arr_busaddr != 0)
3118 sc->mfi_kbuff_arr_dmat[i],
3119 sc->mfi_kbuff_arr_dmamap[i]
3121 if (sc->kbuff_arr[i] != NULL)
3123 sc->mfi_kbuff_arr_dmat[i],
3125 sc->mfi_kbuff_arr_dmamap[i]
3127 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3128 bus_dma_tag_destroy(
3129 sc->mfi_kbuff_arr_dmat[i]);
3134 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3135 mfi_release_command(cm);
3136 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3142 aen = (struct mfi_ioc_aen *)arg;
3143 error = mfi_aen_register(sc, aen->aen_seq_num,
3144 aen->aen_class_locale);
3147 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3149 devclass_t devclass;
3150 struct mfi_linux_ioc_packet l_ioc;
3153 devclass = devclass_find("mfi");
3154 if (devclass == NULL)
3157 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3160 adapter = l_ioc.lioc_adapter_no;
3161 sc = devclass_get_softc(devclass, adapter);
3164 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3168 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3170 devclass_t devclass;
3171 struct mfi_linux_ioc_aen l_aen;
3174 devclass = devclass_find("mfi");
3175 if (devclass == NULL)
3178 error = copyin(arg, &l_aen, sizeof(l_aen));
3181 adapter = l_aen.laen_adapter_no;
3182 sc = devclass_get_softc(devclass, adapter);
3185 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3189 case MFIIO_PASSTHRU:
3190 error = mfi_user_command(sc, iop);
3193 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3202 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3204 struct mfi_softc *sc;
3205 struct mfi_linux_ioc_packet l_ioc;
3206 struct mfi_linux_ioc_aen l_aen;
3207 struct mfi_command *cm = NULL;
3208 struct mfi_aen *mfi_aen_entry;
3209 union mfi_sense_ptr sense_ptr;
3211 uint8_t *data = NULL, *temp;
3218 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3219 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3223 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3227 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3228 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3229 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3232 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3236 * save off original context since copying from user
3237 * will clobber some data
3239 context = cm->cm_frame->header.context;
3241 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3242 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3243 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3244 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3245 cm->cm_frame->header.scsi_status = 0;
3246 cm->cm_frame->header.pad0 = 0;
3247 if (l_ioc.lioc_sge_count)
3249 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3251 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3252 cm->cm_flags |= MFI_CMD_DATAIN;
3253 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3254 cm->cm_flags |= MFI_CMD_DATAOUT;
3255 cm->cm_len = cm->cm_frame->header.data_len;
3257 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3258 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3264 /* restore header context */
3265 cm->cm_frame->header.context = context;
3268 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3269 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3270 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3272 l_ioc.lioc_sgl[i].iov_len);
3274 device_printf(sc->mfi_dev,
3275 "Copy in failed\n");
3278 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3282 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3283 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3285 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3286 cm->cm_frame->pass.sense_addr_lo =
3287 (uint32_t)cm->cm_sense_busaddr;
3288 cm->cm_frame->pass.sense_addr_hi =
3289 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3292 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3293 error = mfi_check_command_pre(sc, cm);
3295 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3299 if ((error = mfi_wait_command(sc, cm)) != 0) {
3300 device_printf(sc->mfi_dev,
3301 "Controller polled failed\n");
3302 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3306 mfi_check_command_post(sc, cm);
3307 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3310 if (cm->cm_flags & MFI_CMD_DATAIN) {
3311 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3312 error = copyout(temp,
3313 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3314 l_ioc.lioc_sgl[i].iov_len);
3316 device_printf(sc->mfi_dev,
3317 "Copy out failed\n");
3320 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3324 if (l_ioc.lioc_sense_len) {
3325 /* get user-space sense ptr then copy out sense */
3326 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3327 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3328 &sense_ptr.sense_ptr_data[0],
3329 sizeof(sense_ptr.sense_ptr_data));
3332 * only 32bit Linux support so zero out any
3333 * address over 32bit
3335 sense_ptr.addr.high = 0;
3337 error = copyout(cm->cm_sense, sense_ptr.user_space,
3338 l_ioc.lioc_sense_len);
3340 device_printf(sc->mfi_dev,
3341 "Copy out failed\n");
3346 error = copyout(&cm->cm_frame->header.cmd_status,
3347 &((struct mfi_linux_ioc_packet*)arg)
3348 ->lioc_frame.hdr.cmd_status,
3351 device_printf(sc->mfi_dev,
3352 "Copy out failed\n");
3357 mfi_config_unlock(sc, locked);
3359 kfree(data, M_MFIBUF);
3361 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3362 mfi_release_command(cm);
3363 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3367 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3368 error = copyin(arg, &l_aen, sizeof(l_aen));
3371 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3372 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3374 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3375 if (mfi_aen_entry != NULL) {
3376 mfi_aen_entry->p = curproc;
3377 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3380 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3381 l_aen.laen_class_locale);
3384 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3386 kfree(mfi_aen_entry, M_MFIBUF);
3388 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3392 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3401 mfi_kqfilter(struct dev_kqfilter_args *ap)
3403 cdev_t dev = ap->a_head.a_dev;
3404 struct knote *kn = ap->a_kn;
3405 struct mfi_softc *sc;
3406 struct klist *klist;
3411 switch (kn->kn_filter) {
3413 kn->kn_fop = &mfi_read_filterops;
3414 kn->kn_hook = (caddr_t)sc;
3417 kn->kn_fop = &mfi_write_filterops;
3418 kn->kn_hook = (caddr_t)sc;
3421 ap->a_result = EOPNOTSUPP;
3425 klist = &sc->mfi_kq.ki_note;
3426 knote_insert(klist, kn);
3432 mfi_filter_detach(struct knote *kn)
3434 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3435 struct klist *klist = &sc->mfi_kq.ki_note;
3437 knote_remove(klist, kn);
3441 mfi_filter_read(struct knote *kn, long hint)
3443 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3446 if (sc->mfi_aen_triggered != 0) {
3448 sc->mfi_aen_triggered = 0;
3450 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3451 kn->kn_flags |= EV_ERROR;
3454 sc->mfi_poll_waiting = 1;
3460 mfi_filter_write(struct knote *kn, long hint)
3468 struct mfi_softc *sc;
3469 struct mfi_command *cm;
3475 dc = devclass_find("mfi");
3477 kprintf("No mfi dev class\n");
3481 for (i = 0; ; i++) {
3482 sc = devclass_get_softc(dc, i);
3485 device_printf(sc->mfi_dev, "Dumping\n\n");
3487 deadline = time_second - MFI_CMD_TIMEOUT;
3488 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3489 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3490 if (cm->cm_timestamp < deadline) {
3491 device_printf(sc->mfi_dev,
3492 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3493 cm, (int)(time_second - cm->cm_timestamp));
3504 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3511 mfi_timeout(void *data)
3513 struct mfi_softc *sc = (struct mfi_softc *)data;
3514 struct mfi_command *cm;
3518 deadline = time_second - MFI_CMD_TIMEOUT;
3519 if (sc->adpreset == 0) {
3520 if (!mfi_tbolt_reset(sc)) {
3521 callout_reset(&sc->mfi_watchdog_callout,
3522 MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3526 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3527 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3528 if (sc->mfi_aen_cm == cm)
3530 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3531 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3532 cm->cm_timestamp = time_second;
3534 device_printf(sc->mfi_dev,
3535 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3536 cm, (int)(time_second - cm->cm_timestamp));
3538 MFI_VALIDATE_CMD(sc, cm);
3549 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3551 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,