2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
57 * Copyright 1994-2009 The FreeBSD Project.
58 * All rights reserved.
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
66 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
68 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
69 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
70 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
71 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
72 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
73 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
74 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
75 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
76 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 * The views and conclusions contained in the software and documentation
79 * are those of the authors and should not be interpreted as representing
80 * official policies,either expressed or implied, of the FreeBSD Project.
82 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.57 2011/07/14 20:20:33 jhb Exp $
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/sysctl.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
93 #include <sys/eventhandler.h>
95 #include <sys/bus_dma.h>
97 #include <sys/ioccom.h>
100 #include <sys/signalvar.h>
101 #include <sys/device.h>
102 #include <sys/mplock2.h>
104 #include <bus/cam/scsi/scsi_all.h>
106 #include <dev/raid/mfi/mfireg.h>
107 #include <dev/raid/mfi/mfi_ioctl.h>
108 #include <dev/raid/mfi/mfivar.h>
110 static int mfi_alloc_commands(struct mfi_softc *);
111 static int mfi_comms_init(struct mfi_softc *);
112 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
113 static int mfi_get_controller_info(struct mfi_softc *);
114 static int mfi_get_log_state(struct mfi_softc *,
115 struct mfi_evt_log_state **);
116 static int mfi_parse_entries(struct mfi_softc *, int, int);
117 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
118 uint32_t, void **, size_t);
119 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
120 static void mfi_startup(void *arg);
121 static void mfi_intr(void *arg);
122 static void mfi_ldprobe(struct mfi_softc *sc);
123 static void mfi_syspdprobe(struct mfi_softc *sc);
124 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
125 static void mfi_aen_complete(struct mfi_command *);
126 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
127 static int mfi_add_ld(struct mfi_softc *sc, int);
128 static void mfi_add_ld_complete(struct mfi_command *);
129 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
130 static void mfi_add_sys_pd_complete(struct mfi_command *);
131 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
132 static void mfi_bio_complete(struct mfi_command *);
133 static struct mfi_command * mfi_build_ldio(struct mfi_softc *,struct bio*);
134 static struct mfi_command * mfi_build_syspdio(struct mfi_softc *,struct bio*);
135 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
136 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
137 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
138 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
139 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
140 static void mfi_timeout(void *);
141 static int mfi_user_command(struct mfi_softc *,
142 struct mfi_ioc_passthru *);
143 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
144 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
145 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
146 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
147 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
148 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
149 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
150 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
151 static void mfi_filter_detach(struct knote *);
152 static int mfi_filter_read(struct knote *, long);
153 static int mfi_filter_write(struct knote *, long);
155 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
156 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
157 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
158 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
159 0, "event message locale");
161 static int mfi_event_class = MFI_EVT_CLASS_INFO;
162 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
163 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
164 0, "event message class");
166 static int mfi_max_cmds = 128;
167 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
168 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
171 /* Management interface */
172 static d_open_t mfi_open;
173 static d_close_t mfi_close;
174 static d_ioctl_t mfi_ioctl;
175 static d_kqfilter_t mfi_kqfilter;
177 static struct dev_ops mfi_ops = {
180 .d_close = mfi_close,
181 .d_ioctl = mfi_ioctl,
182 .d_kqfilter = mfi_kqfilter,
185 static struct filterops mfi_read_filterops =
186 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
187 static struct filterops mfi_write_filterops =
188 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
190 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
192 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
195 mfi_enable_intr_xscale(struct mfi_softc *sc)
197 MFI_WRITE4(sc, MFI_OMSK, 0x01);
201 mfi_enable_intr_ppc(struct mfi_softc *sc)
203 if (sc->mfi_flags & MFI_FLAGS_1078) {
204 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
205 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
206 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
207 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
208 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
209 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
210 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
215 mfi_read_fw_status_xscale(struct mfi_softc *sc)
217 return MFI_READ4(sc, MFI_OMSG0);
221 mfi_read_fw_status_ppc(struct mfi_softc *sc)
223 return MFI_READ4(sc, MFI_OSP0);
227 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
231 status = MFI_READ4(sc, MFI_OSTS);
232 if ((status & MFI_OSTS_INTR_VALID) == 0)
235 MFI_WRITE4(sc, MFI_OSTS, status);
240 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
244 status = MFI_READ4(sc, MFI_OSTS);
245 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
246 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
247 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
250 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
251 MFI_WRITE4(sc, MFI_OSTS, status);
253 MFI_WRITE4(sc, MFI_ODCR0, status);
258 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
260 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
264 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
266 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
267 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
268 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
270 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
275 mfi_transition_firmware(struct mfi_softc *sc)
277 uint32_t fw_state, cur_state;
279 uint32_t cur_abs_reg_val = 0;
280 uint32_t prev_abs_reg_val = 0;
281 bus_space_handle_t idb;
283 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
284 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
285 idb = sc->mfi_flags & MFI_FLAGS_SKINNY ? MFI_SKINNY_IDB : MFI_IDB;
286 while (fw_state != MFI_FWSTATE_READY) {
288 device_printf(sc->mfi_dev, "Waiting for firmware to "
290 cur_state = fw_state;
292 case MFI_FWSTATE_FAULT:
293 device_printf(sc->mfi_dev, "Firmware fault\n");
295 case MFI_FWSTATE_WAIT_HANDSHAKE:
296 MFI_WRITE4(sc, idb, MFI_FWINIT_CLEAR_HANDSHAKE);
299 case MFI_FWSTATE_OPERATIONAL:
300 MFI_WRITE4(sc, idb, MFI_FWINIT_READY);
303 case MFI_FWSTATE_UNDEFINED:
304 case MFI_FWSTATE_BB_INIT:
307 case MFI_FWSTATE_FW_INIT:
308 case MFI_FWSTATE_FLUSH_CACHE:
311 case MFI_FWSTATE_DEVICE_SCAN:
312 max_wait = 180; /* wait for 180 seconds */
313 prev_abs_reg_val = cur_abs_reg_val;
315 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
316 MFI_WRITE4(sc, idb, MFI_FWINIT_HOTPLUG);
320 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
324 for (i = 0; i < (max_wait * 10); i++) {
325 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
326 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
327 if (fw_state == cur_state)
332 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
333 /* Check the device scanning progress */
334 if (prev_abs_reg_val != cur_abs_reg_val)
337 if (fw_state == cur_state) {
338 device_printf(sc->mfi_dev, "Firmware stuck in state "
346 #if defined(__x86_64__)
348 mfi_addr64_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
353 *addr = segs[0].ds_addr;
357 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362 *addr = segs[0].ds_addr;
367 mfi_attach(struct mfi_softc *sc)
370 int error, commsz, framessz, sensesz;
371 int frames, unit, max_fw_sge;
373 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.981\n");
375 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
376 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
377 TAILQ_INIT(&sc->mfi_ld_tqh);
378 TAILQ_INIT(&sc->mfi_syspd_tqh);
379 TAILQ_INIT(&sc->mfi_aen_pids);
380 TAILQ_INIT(&sc->mfi_cam_ccbq);
387 if (sc->mfi_flags & MFI_FLAGS_1064R) {
388 sc->mfi_enable_intr = mfi_enable_intr_xscale;
389 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
390 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
391 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
393 sc->mfi_enable_intr = mfi_enable_intr_ppc;
394 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
395 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
396 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
400 /* Before we get too far, see if the firmware is working */
401 if ((error = mfi_transition_firmware(sc)) != 0) {
402 device_printf(sc->mfi_dev, "Firmware not in READY state, "
403 "error %d\n", error);
408 * Get information needed for sizing the contiguous memory for the
409 * frame pool. Size down the sgl parameter since we know that
410 * we will never need more than what's required for MAXPHYS.
411 * It would be nice if these constants were available at runtime
412 * instead of compile time.
414 status = sc->mfi_read_fw_status(sc);
415 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
416 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
417 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
420 * Create the dma tag for data buffers. Used both for block I/O
421 * and for various internal data queries.
423 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
424 1, 0, /* algnmnt, boundary */
425 BUS_SPACE_MAXADDR, /* lowaddr */
426 BUS_SPACE_MAXADDR, /* highaddr */
427 NULL, NULL, /* filter, filterarg */
428 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
429 sc->mfi_max_sge, /* nsegments */
430 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
431 BUS_DMA_ALLOCNOW, /* flags */
432 &sc->mfi_buffer_dmat)) {
433 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
438 * Allocate DMA memory for the comms queues. Keep it under 4GB for
439 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
440 * entry, so the calculated size here will be will be 1 more than
441 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
443 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
444 sizeof(struct mfi_hwcomms);
445 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
446 1, 0, /* algnmnt, boundary */
447 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
448 BUS_SPACE_MAXADDR, /* highaddr */
449 NULL, NULL, /* filter, filterarg */
450 commsz, /* maxsize */
452 commsz, /* maxsegsize */
454 &sc->mfi_comms_dmat)) {
455 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
458 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
459 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
460 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
463 bzero(sc->mfi_comms, commsz);
464 #if defined(__x86_64__)
465 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
466 sc->mfi_comms, commsz, mfi_addr64_cb, &sc->mfi_comms_busaddr, 0);
468 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
469 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
473 * Allocate DMA memory for the command frames. Keep them in the
474 * lower 4GB for efficiency. Calculate the size of the commands at
475 * the same time; each command is one 64 byte frame plus a set of
476 * additional frames for holding sg lists or other data.
477 * The assumption here is that the SG list will start at the second
478 * frame and not use the unused bytes in the first frame. While this
479 * isn't technically correct, it simplifies the calculation and allows
480 * for command frames that might be larger than an mfi_io_frame.
482 if (sizeof(bus_addr_t) == 8) {
483 sc->mfi_sge_size = sizeof(struct mfi_sg64);
484 sc->mfi_flags |= MFI_FLAGS_SG64;
486 sc->mfi_sge_size = sizeof(struct mfi_sg32);
488 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
489 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
490 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
491 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
492 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
493 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
494 64, 0, /* algnmnt, boundary */
495 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
496 BUS_SPACE_MAXADDR, /* highaddr */
497 NULL, NULL, /* filter, filterarg */
498 framessz, /* maxsize */
500 framessz, /* maxsegsize */
502 &sc->mfi_frames_dmat)) {
503 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
506 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
507 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
508 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
511 bzero(sc->mfi_frames, framessz);
512 #if defined(__x86_64__)
513 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
514 sc->mfi_frames, framessz, mfi_addr64_cb, &sc->mfi_frames_busaddr,0);
516 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
517 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
521 * Allocate DMA memory for the frame sense data. Keep them in the
522 * lower 4GB for efficiency
524 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
525 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
526 4, 0, /* algnmnt, boundary */
527 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
528 BUS_SPACE_MAXADDR, /* highaddr */
529 NULL, NULL, /* filter, filterarg */
530 sensesz, /* maxsize */
532 sensesz, /* maxsegsize */
534 &sc->mfi_sense_dmat)) {
535 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
538 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
539 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
540 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
543 #if defined(__x86_64__)
544 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
545 sc->mfi_sense, sensesz, mfi_addr64_cb, &sc->mfi_sense_busaddr, 0);
547 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
548 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
551 if ((error = mfi_alloc_commands(sc)) != 0)
554 if ((error = mfi_comms_init(sc)) != 0)
557 if ((error = mfi_get_controller_info(sc)) != 0)
560 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
561 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
562 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
565 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
568 * Set up the interrupt handler. XXX This should happen in
572 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
573 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
574 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
577 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
578 mfi_intr, sc, &sc->mfi_intr, NULL)) {
579 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
583 /* Register a config hook to probe the bus for arrays */
584 sc->mfi_ich.ich_func = mfi_startup;
585 sc->mfi_ich.ich_arg = sc;
586 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
587 device_printf(sc->mfi_dev, "Cannot establish configuration "
593 * Register a shutdown handler.
595 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
596 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
597 device_printf(sc->mfi_dev, "Warning: shutdown event "
598 "registration failed\n");
602 * Create the control device for doing management
604 unit = device_get_unit(sc->mfi_dev);
605 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
606 0640, "mfi%d", unit);
608 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
609 if (sc->mfi_cdev != NULL)
610 sc->mfi_cdev->si_drv1 = sc;
611 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
612 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
613 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
614 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
615 if (sc->mfi_sysctl_tree == NULL) {
616 device_printf(sc->mfi_dev, "can't add sysctl node\n");
619 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
620 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
621 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
622 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
623 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
624 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
625 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
626 &sc->mfi_keep_deleted_volumes, 0,
627 "Don't detach the mfid device for a busy volume that is deleted");
629 device_add_child(sc->mfi_dev, "mfip", -1);
630 bus_generic_attach(sc->mfi_dev);
632 /* Start the timeout watchdog */
633 callout_init(&sc->mfi_watchdog_callout);
634 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
641 mfi_alloc_commands(struct mfi_softc *sc)
643 struct mfi_command *cm;
647 * XXX Should we allocate all the commands up front, or allocate on
648 * demand later like 'aac' does?
650 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
652 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
653 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
655 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
658 for (i = 0; i < ncmds; i++) {
659 cm = &sc->mfi_commands[i];
660 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
661 sc->mfi_cmd_size * i);
662 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
663 sc->mfi_cmd_size * i;
664 cm->cm_frame->header.context = i;
665 cm->cm_sense = &sc->mfi_sense[i];
666 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
669 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
670 &cm->cm_dmamap) == 0)
671 mfi_release_command(cm);
674 sc->mfi_total_cmds++;
681 mfi_release_command(struct mfi_command *cm)
683 struct mfi_frame_header *hdr;
687 * Zero out the important fields of the frame, but make sure the
688 * context field is preserved. For efficiency, handle the fields
689 * as 32 bit words. Clear out the first S/G entry too for safety.
691 hdr = &cm->cm_frame->header;
692 if (cm->cm_data != NULL && hdr->sg_count) {
693 cm->cm_sg->sg32[0].len = 0;
694 cm->cm_sg->sg32[0].addr = 0;
697 hdr_data = (uint32_t *)cm->cm_frame;
698 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
699 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
700 hdr_data[4] = 0; /* flags, timeout */
701 hdr_data[5] = 0; /* data_len */
703 cm->cm_extra_frames = 0;
705 cm->cm_complete = NULL;
706 cm->cm_private = NULL;
709 cm->cm_total_frame_size = 0;
711 mfi_enqueue_free(cm);
715 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
716 void **bufp, size_t bufsize)
718 struct mfi_command *cm;
719 struct mfi_dcmd_frame *dcmd;
721 uint32_t context = 0;
723 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
725 cm = mfi_dequeue_free(sc);
729 /* Zero out the MFI frame */
730 context = cm->cm_frame->header.context;
731 bzero(cm->cm_frame, sizeof(union mfi_frame));
732 cm->cm_frame->header.context = context;
734 if ((bufsize > 0) && (bufp != NULL)) {
736 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
738 mfi_release_command(cm);
747 dcmd = &cm->cm_frame->dcmd;
748 bzero(dcmd->mbox, MFI_MBOX_SIZE);
749 dcmd->header.cmd = MFI_CMD_DCMD;
750 dcmd->header.timeout = 0;
751 dcmd->header.flags = 0;
752 dcmd->header.data_len = bufsize;
753 dcmd->header.scsi_status = 0;
754 dcmd->opcode = opcode;
755 cm->cm_sg = &dcmd->sgl;
756 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
759 cm->cm_private = buf;
760 cm->cm_len = bufsize;
763 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
769 mfi_comms_init(struct mfi_softc *sc)
771 struct mfi_command *cm;
772 struct mfi_init_frame *init;
773 struct mfi_init_qinfo *qinfo;
775 uint32_t context = 0;
777 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
778 if ((cm = mfi_dequeue_free(sc)) == NULL)
781 /* Zero out the MFI frame */
782 context = cm->cm_frame->header.context;
783 bzero(cm->cm_frame, sizeof(union mfi_frame));
784 cm->cm_frame->header.context = context;
787 * Abuse the SG list area of the frame to hold the init_qinfo
790 init = &cm->cm_frame->init;
791 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
793 bzero(qinfo, sizeof(struct mfi_init_qinfo));
794 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
795 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
796 offsetof(struct mfi_hwcomms, hw_reply_q);
797 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
798 offsetof(struct mfi_hwcomms, hw_pi);
799 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
800 offsetof(struct mfi_hwcomms, hw_ci);
802 init->header.cmd = MFI_CMD_INIT;
803 init->header.data_len = sizeof(struct mfi_init_qinfo);
804 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
806 cm->cm_flags = MFI_CMD_POLLED;
808 if ((error = mfi_mapcmd(sc, cm)) != 0) {
809 device_printf(sc->mfi_dev, "failed to send init command\n");
810 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
813 mfi_release_command(cm);
814 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
820 mfi_get_controller_info(struct mfi_softc *sc)
822 struct mfi_command *cm = NULL;
823 struct mfi_ctrl_info *ci = NULL;
824 uint32_t max_sectors_1, max_sectors_2;
827 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
828 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
829 (void **)&ci, sizeof(*ci));
832 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
834 if ((error = mfi_mapcmd(sc, cm)) != 0) {
835 device_printf(sc->mfi_dev, "Failed to get controller info\n");
836 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
842 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
843 BUS_DMASYNC_POSTREAD);
844 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
846 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
847 max_sectors_2 = ci->max_request_size;
848 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
854 mfi_release_command(cm);
855 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
860 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
862 struct mfi_command *cm = NULL;
865 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
866 (void **)log_state, sizeof(**log_state));
869 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
871 if ((error = mfi_mapcmd(sc, cm)) != 0) {
872 device_printf(sc->mfi_dev, "Failed to get log state\n");
876 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
877 BUS_DMASYNC_POSTREAD);
878 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
882 mfi_release_command(cm);
888 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
890 struct mfi_evt_log_state *log_state = NULL;
891 union mfi_evt class_locale;
895 class_locale.members.reserved = 0;
896 class_locale.members.locale = mfi_event_locale;
897 class_locale.members.evt_class = mfi_event_class;
899 if (seq_start == 0) {
900 error = mfi_get_log_state(sc, &log_state);
903 kfree(log_state, M_MFIBUF);
908 * Walk through any events that fired since the last
911 mfi_parse_entries(sc, log_state->shutdown_seq_num,
912 log_state->newest_seq_num);
913 seq = log_state->newest_seq_num;
916 mfi_aen_register(sc, seq, class_locale.word);
917 if (log_state != NULL)
918 kfree(log_state, M_MFIBUF);
924 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
927 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
928 cm->cm_complete = NULL;
932 * MegaCli can issue a DCMD of 0. In this case do nothing
933 * and return 0 to it as status
935 if (cm->cm_frame->dcmd.opcode == 0) {
936 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
938 return (cm->cm_error);
940 mfi_enqueue_ready(cm);
942 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
943 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
944 return (cm->cm_error);
948 mfi_free(struct mfi_softc *sc)
950 struct mfi_command *cm;
953 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
955 if (sc->mfi_cdev != NULL)
956 destroy_dev(sc->mfi_cdev);
957 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
959 if (sc->mfi_total_cmds != 0) {
960 for (i = 0; i < sc->mfi_total_cmds; i++) {
961 cm = &sc->mfi_commands[i];
962 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
964 kfree(sc->mfi_commands, M_MFIBUF);
968 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
969 if (sc->mfi_irq != NULL)
970 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
973 if (sc->mfi_sense_busaddr != 0)
974 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
975 if (sc->mfi_sense != NULL)
976 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
977 sc->mfi_sense_dmamap);
978 if (sc->mfi_sense_dmat != NULL)
979 bus_dma_tag_destroy(sc->mfi_sense_dmat);
981 if (sc->mfi_frames_busaddr != 0)
982 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
983 if (sc->mfi_frames != NULL)
984 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
985 sc->mfi_frames_dmamap);
986 if (sc->mfi_frames_dmat != NULL)
987 bus_dma_tag_destroy(sc->mfi_frames_dmat);
989 if (sc->mfi_comms_busaddr != 0)
990 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
991 if (sc->mfi_comms != NULL)
992 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
993 sc->mfi_comms_dmamap);
994 if (sc->mfi_comms_dmat != NULL)
995 bus_dma_tag_destroy(sc->mfi_comms_dmat);
997 if (sc->mfi_buffer_dmat != NULL)
998 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
999 if (sc->mfi_parent_dmat != NULL)
1000 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1002 if (sc->mfi_sysctl_tree != NULL)
1003 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1005 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1007 if (mtx_initialized(&sc->mfi_io_lock)) {
1008 lockuninit(&sc->mfi_io_lock);
1009 sx_destroy(&sc->mfi_config_lock);
1013 lockuninit(&sc->mfi_io_lock);
1014 lockuninit(&sc->mfi_config_lock);
1020 mfi_startup(void *arg)
1022 struct mfi_softc *sc;
1024 sc = (struct mfi_softc *)arg;
1026 config_intrhook_disestablish(&sc->mfi_ich);
1028 sc->mfi_enable_intr(sc);
1029 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1030 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1032 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1034 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1035 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1041 struct mfi_softc *sc;
1042 struct mfi_command *cm;
1043 uint32_t pi, ci, context;
1045 sc = (struct mfi_softc *)arg;
1047 if (sc->mfi_check_clear_intr(sc))
1050 pi = sc->mfi_comms->hw_pi;
1051 ci = sc->mfi_comms->hw_ci;
1052 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1054 context = sc->mfi_comms->hw_reply_q[ci];
1055 if (context < sc->mfi_max_fw_cmds) {
1056 cm = &sc->mfi_commands[context];
1057 mfi_remove_busy(cm);
1059 mfi_complete(sc, cm);
1061 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1066 sc->mfi_comms->hw_ci = ci;
1068 /* Give defered I/O a chance to run */
1069 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1070 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1072 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1078 mfi_shutdown(struct mfi_softc *sc)
1080 struct mfi_dcmd_frame *dcmd;
1081 struct mfi_command *cm;
1084 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1085 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1087 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1091 if (sc->mfi_aen_cm != NULL)
1092 mfi_abort(sc, sc->mfi_aen_cm);
1094 dcmd = &cm->cm_frame->dcmd;
1095 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1096 cm->cm_flags = MFI_CMD_POLLED;
1099 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1100 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1103 mfi_release_command(cm);
1104 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1108 mfi_syspdprobe(struct mfi_softc *sc)
1110 struct mfi_frame_header *hdr;
1111 struct mfi_command *cm = NULL;
1112 struct mfi_pd_list *pdlist = NULL;
1113 struct mfi_system_pd *syspd;
1116 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1117 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1118 /* Add SYSTEM PD's */
1119 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1120 (void **)&pdlist, sizeof(*pdlist));
1122 device_printf(sc->mfi_dev,"Error while forming syspd list\n");
1126 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1127 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1128 cm->cm_frame->dcmd.mbox[1] = 0;
1129 if (mfi_mapcmd(sc, cm) != 0) {
1130 device_printf(sc->mfi_dev, "Failed to get syspd device list\n");
1133 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1134 BUS_DMASYNC_POSTREAD);
1135 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1136 hdr = &cm->cm_frame->header;
1137 if (hdr->cmd_status != MFI_STAT_OK) {
1138 device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n",
1142 for (i = 0; i < pdlist->count; i++) {
1143 if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id)
1144 goto skip_sys_pd_add;
1145 /* Get each PD and add it to the system */
1146 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1147 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1148 if (syspd->pd_id == pdlist->addr[i].device_id)
1149 goto skip_sys_pd_add;
1152 mfi_add_sys_pd(sc,pdlist->addr[i].device_id);
1156 /* Delete SYSPD's whose state has been changed */
1157 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1158 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1159 for (i=0;i<pdlist->count;i++) {
1160 if (syspd->pd_id == pdlist->addr[i].device_id)
1161 goto skip_sys_pd_delete;
1164 device_delete_child(sc->mfi_dev,syspd->pd_dev);
1172 kfree(pdlist, M_MFIBUF);
1174 mfi_release_command(cm);
1178 mfi_ldprobe(struct mfi_softc *sc)
1180 struct mfi_frame_header *hdr;
1181 struct mfi_command *cm = NULL;
1182 struct mfi_ld_list *list = NULL;
1183 struct mfi_disk *ld;
1186 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1187 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1189 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1190 (void **)&list, sizeof(*list));
1194 cm->cm_flags = MFI_CMD_DATAIN;
1195 if (mfi_wait_command(sc, cm) != 0) {
1196 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1200 hdr = &cm->cm_frame->header;
1201 if (hdr->cmd_status != MFI_STAT_OK) {
1202 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1207 for (i = 0; i < list->ld_count; i++) {
1208 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1209 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1212 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1217 kfree(list, M_MFIBUF);
1219 mfi_release_command(cm);
1225 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1226 * the bits in 24-31 are all set, then it is the number of seconds since
1230 format_timestamp(uint32_t timestamp)
1232 static char buffer[32];
1234 if ((timestamp & 0xff000000) == 0xff000000)
1235 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1238 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1243 format_class(int8_t class)
1245 static char buffer[6];
1248 case MFI_EVT_CLASS_DEBUG:
1250 case MFI_EVT_CLASS_PROGRESS:
1251 return ("progress");
1252 case MFI_EVT_CLASS_INFO:
1254 case MFI_EVT_CLASS_WARNING:
1256 case MFI_EVT_CLASS_CRITICAL:
1258 case MFI_EVT_CLASS_FATAL:
1260 case MFI_EVT_CLASS_DEAD:
1263 ksnprintf(buffer, sizeof(buffer), "%d", class);
1269 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1272 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1273 format_timestamp(detail->time), detail->evt_class.members.locale,
1274 format_class(detail->evt_class.members.evt_class), detail->description);
1278 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1280 struct mfi_command *cm;
1281 struct mfi_dcmd_frame *dcmd;
1282 union mfi_evt current_aen, prior_aen;
1283 struct mfi_evt_detail *ed = NULL;
1286 current_aen.word = locale;
1287 if (sc->mfi_aen_cm != NULL) {
1289 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1290 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1291 !((prior_aen.members.locale & current_aen.members.locale)
1292 ^current_aen.members.locale)) {
1295 prior_aen.members.locale |= current_aen.members.locale;
1296 if (prior_aen.members.evt_class
1297 < current_aen.members.evt_class)
1298 current_aen.members.evt_class =
1299 prior_aen.members.evt_class;
1300 mfi_abort(sc, sc->mfi_aen_cm);
1304 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1305 (void **)&ed, sizeof(*ed));
1310 dcmd = &cm->cm_frame->dcmd;
1311 ((uint32_t *)&dcmd->mbox)[0] = seq;
1312 ((uint32_t *)&dcmd->mbox)[1] = locale;
1313 cm->cm_flags = MFI_CMD_DATAIN;
1314 cm->cm_complete = mfi_aen_complete;
1316 sc->mfi_aen_cm = cm;
1318 mfi_enqueue_ready(cm);
1326 mfi_aen_complete(struct mfi_command *cm)
1328 struct mfi_frame_header *hdr;
1329 struct mfi_softc *sc;
1330 struct mfi_evt_detail *detail;
1331 struct mfi_aen *mfi_aen_entry, *tmp;
1332 int seq = 0, aborted = 0;
1335 hdr = &cm->cm_frame->header;
1337 if (sc->mfi_aen_cm == NULL)
1340 if (sc->mfi_aen_cm->cm_aen_abort ||
1341 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1342 sc->mfi_aen_cm->cm_aen_abort = 0;
1345 sc->mfi_aen_triggered = 1;
1346 if (sc->mfi_poll_waiting) {
1347 sc->mfi_poll_waiting = 0;
1348 KNOTE(&sc->mfi_kq.ki_note, 0);
1350 detail = cm->cm_data;
1352 * XXX If this function is too expensive or is recursive, then
1353 * events should be put onto a queue and processed later.
1355 mfi_decode_evt(sc, detail);
1356 seq = detail->seq + 1;
1357 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1358 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1360 lwkt_gettoken(&proc_token);
1361 ksignal(mfi_aen_entry->p, SIGIO);
1362 lwkt_reltoken(&proc_token);
1363 kfree(mfi_aen_entry, M_MFIBUF);
1367 kfree(cm->cm_data, M_MFIBUF);
1368 sc->mfi_aen_cm = NULL;
1369 wakeup(&sc->mfi_aen_cm);
1370 mfi_release_command(cm);
1372 /* set it up again so the driver can catch more events */
1374 mfi_aen_setup(sc, seq);
1378 #define MAX_EVENTS 15
1381 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1383 struct mfi_command *cm;
1384 struct mfi_dcmd_frame *dcmd;
1385 struct mfi_evt_list *el;
1386 union mfi_evt class_locale;
1387 int error, i, seq, size;
1388 uint32_t context = 0;
1390 class_locale.members.reserved = 0;
1391 class_locale.members.locale = mfi_event_locale;
1392 class_locale.members.evt_class = mfi_event_class;
1394 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1396 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1400 for (seq = start_seq;;) {
1401 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1402 kfree(el, M_MFIBUF);
1406 /* Zero out the MFI frame */
1407 context = cm->cm_frame->header.context;
1408 bzero(cm->cm_frame, sizeof(union mfi_frame));
1409 cm->cm_frame->header.context = context;
1411 dcmd = &cm->cm_frame->dcmd;
1412 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1413 dcmd->header.cmd = MFI_CMD_DCMD;
1414 dcmd->header.timeout = 0;
1415 dcmd->header.data_len = size;
1416 dcmd->header.scsi_status = 0;
1417 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1418 ((uint32_t *)&dcmd->mbox)[0] = seq;
1419 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1420 cm->cm_sg = &dcmd->sgl;
1421 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1422 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1426 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1427 device_printf(sc->mfi_dev,
1428 "Failed to get controller entries\n");
1429 mfi_release_command(cm);
1433 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1434 BUS_DMASYNC_POSTREAD);
1435 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1437 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1438 mfi_release_command(cm);
1441 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1442 device_printf(sc->mfi_dev,
1443 "Error %d fetching controller entries\n",
1444 dcmd->header.cmd_status);
1445 mfi_release_command(cm);
1448 mfi_release_command(cm);
1450 for (i = 0; i < el->count; i++) {
1452 * If this event is newer than 'stop_seq' then
1453 * break out of the loop. Note that the log
1454 * is a circular buffer so we have to handle
1455 * the case that our stop point is earlier in
1456 * the buffer than our start point.
1458 if (el->event[i].seq >= stop_seq) {
1459 if (start_seq <= stop_seq)
1461 else if (el->event[i].seq < start_seq)
1464 mfi_decode_evt(sc, &el->event[i]);
1466 seq = el->event[el->count - 1].seq + 1;
1469 kfree(el, M_MFIBUF);
1474 mfi_add_ld(struct mfi_softc *sc, int id)
1476 struct mfi_command *cm;
1477 struct mfi_dcmd_frame *dcmd = NULL;
1478 struct mfi_ld_info *ld_info = NULL;
1481 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1483 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1484 (void **)&ld_info, sizeof(*ld_info));
1486 device_printf(sc->mfi_dev,
1487 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1489 kfree(ld_info, M_MFIBUF);
1492 cm->cm_flags = MFI_CMD_DATAIN;
1493 dcmd = &cm->cm_frame->dcmd;
1495 if (mfi_wait_command(sc, cm) != 0) {
1496 device_printf(sc->mfi_dev,
1497 "Failed to get logical drive: %d\n", id);
1498 kfree(ld_info, M_MFIBUF);
1501 if (ld_info->ld_config.params.isSSCD != 1) {
1502 mfi_add_ld_complete(cm);
1504 mfi_release_command(cm);
1505 if(ld_info) /* SSCD drives ld_info free here */
1506 kfree(ld_info, M_MFIBUF);
1512 mfi_add_ld_complete(struct mfi_command *cm)
1514 struct mfi_frame_header *hdr;
1515 struct mfi_ld_info *ld_info;
1516 struct mfi_softc *sc;
1520 hdr = &cm->cm_frame->header;
1521 ld_info = cm->cm_private;
1523 if (hdr->cmd_status != MFI_STAT_OK) {
1524 kfree(ld_info, M_MFIBUF);
1525 mfi_release_command(cm);
1528 mfi_release_command(cm);
1530 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1532 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1533 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1534 kfree(ld_info, M_MFIBUF);
1536 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1540 device_set_ivars(child, ld_info);
1541 device_set_desc(child, "MFI Logical Disk");
1542 bus_generic_attach(sc->mfi_dev);
1544 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1548 mfi_add_sys_pd(struct mfi_softc *sc,int id)
1550 struct mfi_command *cm;
1551 struct mfi_dcmd_frame *dcmd = NULL;
1552 struct mfi_pd_info *pd_info = NULL;
1555 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1557 error = mfi_dcmd_command(sc,&cm,MFI_DCMD_PD_GET_INFO,
1558 (void **)&pd_info, sizeof(*pd_info));
1560 device_printf(sc->mfi_dev,
1561 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error);
1563 kfree(pd_info,M_MFIBUF);
1566 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1567 dcmd = &cm->cm_frame->dcmd;
1569 dcmd->header.scsi_status = 0;
1570 dcmd->header.pad0 = 0;
1571 if (mfi_mapcmd(sc, cm) != 0) {
1572 device_printf(sc->mfi_dev,
1573 "Failed to get physical drive info %d\n", id);
1574 kfree(pd_info,M_MFIBUF);
1577 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1578 BUS_DMASYNC_POSTREAD);
1579 bus_dmamap_unload(sc->mfi_buffer_dmat,cm->cm_dmamap);
1580 mfi_add_sys_pd_complete(cm);
1585 mfi_add_sys_pd_complete(struct mfi_command *cm)
1587 struct mfi_frame_header *hdr;
1588 struct mfi_pd_info *pd_info;
1589 struct mfi_softc *sc;
1593 hdr = &cm->cm_frame->header;
1594 pd_info = cm->cm_private;
1596 if (hdr->cmd_status != MFI_STAT_OK) {
1597 kfree(pd_info, M_MFIBUF);
1598 mfi_release_command(cm);
1601 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1602 device_printf(sc->mfi_dev,"PD=%x is not SYSTEM PD\n",
1603 pd_info->ref.v.device_id);
1604 kfree(pd_info, M_MFIBUF);
1605 mfi_release_command(cm);
1608 mfi_release_command(cm);
1610 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1612 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1613 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1614 kfree(pd_info, M_MFIBUF);
1616 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1620 device_set_ivars(child, pd_info);
1621 device_set_desc(child, "MFI System PD");
1622 bus_generic_attach(sc->mfi_dev);
1624 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1627 static struct mfi_command *
1628 mfi_bio_command(struct mfi_softc *sc)
1631 struct mfi_command *cm = NULL;
1632 struct mfi_disk *mfid;
1634 /* reserving two commands to avoid starvation for IOCTL */
1635 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1637 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1639 mfid = bio->bio_driver_info;
1640 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1641 cm = mfi_build_syspdio(sc, bio);
1643 cm = mfi_build_ldio(sc, bio);
1645 mfi_enqueue_bio(sc,bio);
1649 static struct mfi_command *
1650 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1652 struct mfi_command *cm;
1654 struct mfi_system_pd *disk;
1655 struct mfi_pass_frame *pass;
1656 int flags = 0,blkcount = 0;
1657 uint32_t context = 0;
1659 if ((cm = mfi_dequeue_free(sc)) == NULL)
1662 /* Zero out the MFI frame */
1663 context = cm->cm_frame->header.context;
1664 bzero(cm->cm_frame, sizeof(union mfi_frame));
1665 cm->cm_frame->header.context = context;
1667 pass = &cm->cm_frame->pass;
1668 bzero(pass->cdb, 16);
1669 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1670 switch (bp->b_cmd & 0x03) {
1672 pass->cdb[0] = READ_10;
1673 flags = MFI_CMD_DATAIN;
1676 pass->cdb[0] = WRITE_10;
1677 flags = MFI_CMD_DATAOUT;
1680 panic("Invalid bio command");
1683 /* Cheat with the sector length to avoid a non-constant division */
1684 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1685 disk = bio->bio_driver_info;
1686 /* Fill the LBA and Transfer length in CDB */
1687 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
1688 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
1689 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
1690 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
1691 pass->cdb[7] = (blkcount & 0xff00) >> 8;
1692 pass->cdb[8] = (blkcount & 0x00ff);
1693 pass->header.target_id = disk->pd_id;
1694 pass->header.timeout = 0;
1695 pass->header.flags = 0;
1696 pass->header.scsi_status = 0;
1697 pass->header.sense_len = MFI_SENSE_LEN;
1698 pass->header.data_len = bp->b_bcount;
1699 pass->header.cdb_len = 10;
1700 #if defined(__x86_64__)
1701 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1702 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1704 pass->sense_addr_lo = cm->cm_sense_busaddr;
1705 pass->sense_addr_hi = 0;
1707 cm->cm_complete = mfi_bio_complete;
1708 cm->cm_private = bio;
1709 cm->cm_data = bp->b_data;
1710 cm->cm_len = bp->b_bcount;
1711 cm->cm_sg = &pass->sgl;
1712 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
1713 cm->cm_flags = flags;
1717 static struct mfi_command *
1718 mfi_build_ldio(struct mfi_softc *sc,struct bio *bio)
1720 struct mfi_io_frame *io;
1722 struct mfi_disk *disk;
1723 struct mfi_command *cm;
1724 int flags, blkcount;
1725 uint32_t context = 0;
1727 if ((cm = mfi_dequeue_free(sc)) == NULL)
1730 /* Zero out the MFI frame */
1731 context = cm->cm_frame->header.context;
1732 bzero(cm->cm_frame,sizeof(union mfi_frame));
1733 cm->cm_frame->header.context = context;
1735 io = &cm->cm_frame->io;
1736 switch (bp->b_cmd & 0x03) {
1738 io->header.cmd = MFI_CMD_LD_READ;
1739 flags = MFI_CMD_DATAIN;
1742 io->header.cmd = MFI_CMD_LD_WRITE;
1743 flags = MFI_CMD_DATAOUT;
1746 panic("Invalid bio command");
1749 /* Cheat with the sector length to avoid a non-constant division */
1750 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1751 disk = bio->bio_driver_info;
1752 io->header.target_id = disk->ld_id;
1753 io->header.timeout = 0;
1754 io->header.flags = 0;
1755 io->header.scsi_status = 0;
1756 io->header.sense_len = MFI_SENSE_LEN;
1757 io->header.data_len = blkcount;
1758 #if defined(__x86_64__)
1759 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1760 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1762 io->sense_addr_lo = cm->cm_sense_busaddr;
1763 io->sense_addr_hi = 0;
1765 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
1766 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
1767 cm->cm_complete = mfi_bio_complete;
1768 cm->cm_private = bio;
1769 cm->cm_data = bp->b_data;
1770 cm->cm_len = bp->b_bcount;
1771 cm->cm_sg = &io->sgl;
1772 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1773 cm->cm_flags = flags;
1778 mfi_bio_complete(struct mfi_command *cm)
1782 struct mfi_frame_header *hdr;
1783 struct mfi_softc *sc;
1785 bio = cm->cm_private;
1787 hdr = &cm->cm_frame->header;
1790 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1791 bp->b_flags |= B_ERROR;
1793 device_printf(sc->mfi_dev, "I/O error, status= %d "
1794 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1795 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1796 } else if (cm->cm_error != 0) {
1797 bp->b_flags |= B_ERROR;
1800 mfi_release_command(cm);
1801 mfi_disk_complete(bio);
1805 mfi_startio(struct mfi_softc *sc)
1807 struct mfi_command *cm;
1808 struct ccb_hdr *ccbh;
1811 /* Don't bother if we're short on resources */
1812 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1815 /* Try a command that has already been prepared */
1816 cm = mfi_dequeue_ready(sc);
1819 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1820 cm = sc->mfi_cam_start(ccbh);
1823 /* Nope, so look for work on the bioq */
1825 cm = mfi_bio_command(sc);
1827 /* No work available, so exit */
1831 /* Send the command to the controller */
1832 if (mfi_mapcmd(sc, cm) != 0) {
1833 mfi_requeue_ready(cm);
1840 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1844 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1846 if (cm->cm_data != NULL) {
1847 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1848 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1849 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1850 if (error == EINPROGRESS) {
1851 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1855 error = mfi_send_frame(sc, cm);
1862 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1864 struct mfi_frame_header *hdr;
1865 struct mfi_command *cm;
1867 struct mfi_softc *sc;
1872 cm = (struct mfi_command *)arg;
1874 hdr = &cm->cm_frame->header;
1878 kprintf("error %d in callback\n", error);
1879 cm->cm_error = error;
1880 mfi_complete(sc, cm);
1884 /* Use IEEE sgl only for IO's on a SKINNY controller
1885 * For other commands on a SKINNY controller use either
1886 * sg32 or sg64 based on the sizeof(bus_addr_t).
1887 * Also calculate the total frame size based on the type
1890 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
1891 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
1892 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
1893 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
1894 for (i = 0; i < nsegs; i++) {
1895 sgl->sg_skinny[i].addr = segs[i].ds_addr;
1896 sgl->sg_skinny[i].len = segs[i].ds_len;
1897 sgl->sg_skinny[i].flag = 0;
1899 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1901 sge_size = sizeof(struct mfi_sg_skinny);
1904 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1905 for (i = 0; i < nsegs; i++) {
1906 sgl->sg32[i].addr = segs[i].ds_addr;
1907 sgl->sg32[i].len = segs[i].ds_len;
1909 sge_size = sizeof(struct mfi_sg32);
1911 for (i = 0; i < nsegs; i++) {
1912 sgl->sg64[i].addr = segs[i].ds_addr;
1913 sgl->sg64[i].len = segs[i].ds_len;
1915 hdr->flags |= MFI_FRAME_SGL64;
1916 sge_size = sizeof(struct mfi_sg64);
1919 hdr->sg_count = nsegs;
1922 if (cm->cm_flags & MFI_CMD_DATAIN) {
1923 dir |= BUS_DMASYNC_PREREAD;
1924 hdr->flags |= MFI_FRAME_DIR_READ;
1926 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1927 dir |= BUS_DMASYNC_PREWRITE;
1928 hdr->flags |= MFI_FRAME_DIR_WRITE;
1930 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1931 cm->cm_flags |= MFI_CMD_MAPPED;
1934 * Instead of calculating the total number of frames in the
1935 * compound frame, it's already assumed that there will be at
1936 * least 1 frame, so don't compensate for the modulo of the
1937 * following division.
1939 cm->cm_total_frame_size += (sge_size * nsegs);
1940 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1942 mfi_send_frame(sc, cm);
1946 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1948 struct mfi_frame_header *hdr;
1949 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1951 hdr = &cm->cm_frame->header;
1953 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1954 cm->cm_timestamp = time_second;
1955 mfi_enqueue_busy(cm);
1957 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1958 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1962 * The bus address of the command is aligned on a 64 byte boundary,
1963 * leaving the least 6 bits as zero. For whatever reason, the
1964 * hardware wants the address shifted right by three, leaving just
1965 * 3 zero bits. These three bits are then used as a prefetching
1966 * hint for the hardware to predict how many frames need to be
1967 * fetched across the bus. If a command has more than 8 frames
1968 * then the 3 bits are set to 0x7 and the firmware uses other
1969 * information in the command to determine the total amount to fetch.
1970 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1971 * is enough for both 32bit and 64bit systems.
1973 if (cm->cm_extra_frames > 7)
1974 cm->cm_extra_frames = 7;
1976 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1978 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1981 /* This is a polled command, so busy-wait for it to complete. */
1982 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1989 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1990 device_printf(sc->mfi_dev, "Frame %p timed out "
1991 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1999 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2003 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2005 if (cm->cm_flags & MFI_CMD_DATAIN)
2006 dir |= BUS_DMASYNC_POSTREAD;
2007 if (cm->cm_flags & MFI_CMD_DATAOUT)
2008 dir |= BUS_DMASYNC_POSTWRITE;
2010 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2011 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2012 cm->cm_flags &= ~MFI_CMD_MAPPED;
2015 cm->cm_flags |= MFI_CMD_COMPLETED;
2017 if (cm->cm_complete != NULL)
2018 cm->cm_complete(cm);
2024 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2026 struct mfi_command *cm;
2027 struct mfi_abort_frame *abort;
2029 uint32_t context = 0;
2031 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2033 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2037 /* Zero out the MFI frame */
2038 context = cm->cm_frame->header.context;
2039 bzero(cm->cm_frame, sizeof(union mfi_frame));
2040 cm->cm_frame->header.context = context;
2042 abort = &cm->cm_frame->abort;
2043 abort->header.cmd = MFI_CMD_ABORT;
2044 abort->header.flags = 0;
2045 abort->header.scsi_status = 0;
2046 abort->abort_context = cm_abort->cm_frame->header.context;
2047 #if defined(__x86_64__)
2048 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr & 0xFFFFFFFF;
2049 abort->abort_mfi_addr_hi = (cm_abort->cm_frame_busaddr & 0xFFFFFFFF00000000 ) >> 32 ;
2051 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
2052 abort->abort_mfi_addr_hi = 0;
2055 cm->cm_flags = MFI_CMD_POLLED;
2057 sc->mfi_aen_cm->cm_aen_abort = 1;
2059 mfi_release_command(cm);
2061 while (i < 5 && sc->mfi_aen_cm != NULL) {
2062 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
2070 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
2072 struct mfi_command *cm;
2073 struct mfi_io_frame *io;
2075 uint32_t context = 0;
2077 if ((cm = mfi_dequeue_free(sc)) == NULL)
2080 /* Zero out the MFI frame */
2081 context = cm->cm_frame->header.context;
2082 bzero(cm->cm_frame, sizeof(union mfi_frame));
2083 cm->cm_frame->header.context = context;
2085 io = &cm->cm_frame->io;
2086 io->header.cmd = MFI_CMD_LD_WRITE;
2087 io->header.target_id = id;
2088 io->header.timeout = 0;
2089 io->header.flags = 0;
2090 io->header.scsi_status = 0;
2091 io->header.sense_len = MFI_SENSE_LEN;
2092 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2093 #if defined(__x86_64__)
2094 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2095 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2097 io->sense_addr_lo = cm->cm_sense_busaddr;
2098 io->sense_addr_hi = 0;
2100 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2101 io->lba_lo = lba & 0xffffffff;
2104 cm->cm_sg = &io->sgl;
2105 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2106 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2108 error = mfi_mapcmd(sc, cm);
2109 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2110 BUS_DMASYNC_POSTWRITE);
2111 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2112 mfi_release_command(cm);
2118 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2121 struct mfi_command *cm;
2122 struct mfi_pass_frame *pass;
2126 if ((cm = mfi_dequeue_free(sc)) == NULL)
2129 pass = &cm->cm_frame->pass;
2130 bzero(pass->cdb, 16);
2131 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2132 pass->cdb[0] = WRITE_10;
2133 pass->cdb[2] = (lba & 0xff000000) >> 24;
2134 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2135 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2136 pass->cdb[5] = (lba & 0x000000ff);
2137 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2138 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2139 pass->cdb[8] = (blkcount & 0x00ff);
2140 pass->header.target_id = id;
2141 pass->header.timeout = 0;
2142 pass->header.flags = 0;
2143 pass->header.scsi_status = 0;
2144 pass->header.sense_len = MFI_SENSE_LEN;
2145 pass->header.data_len = len;
2146 pass->header.cdb_len = 10;
2147 #if defined(__x86_64__)
2148 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2149 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2151 pass->sense_addr_lo = cm->cm_sense_busaddr;
2152 pass->sense_addr_hi = 0;
2156 cm->cm_sg = &pass->sgl;
2157 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2158 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2160 error = mfi_mapcmd(sc, cm);
2161 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2162 BUS_DMASYNC_POSTWRITE);
2163 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2164 mfi_release_command(cm);
2170 mfi_open(struct dev_open_args *ap)
2172 cdev_t dev = ap->a_head.a_dev;
2173 struct mfi_softc *sc;
2178 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2179 if (sc->mfi_detaching)
2182 sc->mfi_flags |= MFI_FLAGS_OPEN;
2185 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2191 mfi_close(struct dev_close_args *ap)
2193 cdev_t dev = ap->a_head.a_dev;
2194 struct mfi_softc *sc;
2195 struct mfi_aen *mfi_aen_entry, *tmp;
2199 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2200 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2202 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2203 if (mfi_aen_entry->p == curproc) {
2204 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2206 kfree(mfi_aen_entry, M_MFIBUF);
2209 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2214 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2218 case MFI_DCMD_LD_DELETE:
2219 case MFI_DCMD_CFG_ADD:
2220 case MFI_DCMD_CFG_CLEAR:
2221 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2229 mfi_config_unlock(struct mfi_softc *sc, int locked)
2233 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2236 /* Perform pre-issue checks on commands from userland and possibly veto them. */
2238 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2240 struct mfi_disk *ld, *ld2;
2242 struct mfi_system_pd *syspd = NULL;
2246 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2248 switch (cm->cm_frame->dcmd.opcode) {
2249 case MFI_DCMD_LD_DELETE:
2250 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2251 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2257 error = mfi_disk_disable(ld);
2259 case MFI_DCMD_CFG_CLEAR:
2260 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2261 error = mfi_disk_disable(ld);
2266 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2269 mfi_disk_enable(ld2);
2273 case MFI_DCMD_PD_STATE_SET:
2274 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2276 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2277 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2278 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2279 if (syspd->pd_id == syspd_id)
2287 error = mfi_syspd_disable(syspd);
2295 /* Perform post-issue checks on commands from userland. */
2297 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2299 struct mfi_disk *ld, *ldn;
2300 struct mfi_system_pd *syspd = NULL;
2304 switch (cm->cm_frame->dcmd.opcode) {
2305 case MFI_DCMD_LD_DELETE:
2306 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2307 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2310 KASSERT(ld != NULL, ("volume dissappeared"));
2311 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2312 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2314 device_delete_child(sc->mfi_dev, ld->ld_dev);
2316 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2318 mfi_disk_enable(ld);
2320 case MFI_DCMD_CFG_CLEAR:
2321 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2322 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2324 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2325 device_delete_child(sc->mfi_dev, ld->ld_dev);
2328 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2330 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2331 mfi_disk_enable(ld);
2334 case MFI_DCMD_CFG_ADD:
2337 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2340 case MFI_DCMD_PD_STATE_SET:
2341 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2343 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2344 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2345 TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
2346 if (syspd->pd_id == syspd_id)
2353 /* If the transition fails then enable the syspd again */
2354 if(syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2355 mfi_syspd_enable(syspd);
2361 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2363 struct mfi_command *cm;
2364 struct mfi_dcmd_frame *dcmd;
2365 void *ioc_buf = NULL;
2367 int error = 0, locked;
2370 if (ioc->buf_size > 0) {
2371 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2372 if (ioc_buf == NULL) {
2375 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2377 device_printf(sc->mfi_dev, "failed to copyin\n");
2378 kfree(ioc_buf, M_MFIBUF);
2383 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2385 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2386 while ((cm = mfi_dequeue_free(sc)) == NULL)
2387 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2389 /* Save context for later */
2390 context = cm->cm_frame->header.context;
2392 dcmd = &cm->cm_frame->dcmd;
2393 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2395 cm->cm_sg = &dcmd->sgl;
2396 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2397 cm->cm_data = ioc_buf;
2398 cm->cm_len = ioc->buf_size;
2400 /* restore context */
2401 cm->cm_frame->header.context = context;
2403 /* Cheat since we don't know if we're writing or reading */
2404 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2406 error = mfi_check_command_pre(sc, cm);
2410 error = mfi_wait_command(sc, cm);
2412 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2415 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2416 mfi_check_command_post(sc, cm);
2418 mfi_release_command(cm);
2419 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2420 mfi_config_unlock(sc, locked);
2421 if (ioc->buf_size > 0)
2422 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2424 kfree(ioc_buf, M_MFIBUF);
2429 #define PTRIN(p) ((void *)(uintptr_t)(p))
2431 #define PTRIN(p) (p)
2435 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2437 struct mfi_config_data *conf_data = cm->cm_data;
2438 struct mfi_command *ld_cm = NULL;
2439 struct mfi_ld_info *ld_info = NULL;
2442 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2443 (conf_data->ld[0].params.isSSCD == 1)) {
2445 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2446 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2447 (void **)&ld_info, sizeof(*ld_info));
2449 device_printf(sc->mfi_dev,"Failed to allocate "
2450 "MFI_DCMD_LD_GET_INFO %d", error);
2452 kfree(ld_info, M_MFIBUF);
2455 ld_cm->cm_flags = MFI_CMD_DATAIN;
2456 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2457 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2458 if (mfi_wait_command(sc, ld_cm) != 0) {
2459 device_printf(sc->mfi_dev, "failed to get log drv\n");
2460 mfi_release_command(ld_cm);
2461 kfree(ld_info, M_MFIBUF);
2465 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2466 kfree(ld_info, M_MFIBUF);
2467 mfi_release_command(ld_cm);
2470 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2473 if (ld_info->ld_config.params.isSSCD == 1)
2476 mfi_release_command(ld_cm);
2477 kfree(ld_info, M_MFIBUF);
2483 mfi_ioctl(struct dev_ioctl_args *ap)
2485 cdev_t dev = ap->a_head.a_dev;
2486 u_long cmd = ap->a_cmd;
2487 int flag = ap->a_fflag;
2488 caddr_t arg = ap->a_data;
2489 struct mfi_softc *sc;
2490 union mfi_statrequest *ms;
2491 struct mfi_ioc_packet *ioc;
2493 struct mfi_ioc_packet32 *ioc32;
2495 struct mfi_ioc_aen *aen;
2496 struct mfi_command *cm = NULL;
2498 union mfi_sense_ptr sense_ptr;
2499 uint8_t *data = NULL, *temp, skip_pre_post = 0;
2501 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2503 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2504 struct mfi_ioc_passthru iop_swab;
2513 ms = (union mfi_statrequest *)arg;
2514 switch (ms->ms_item) {
2519 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2520 sizeof(struct mfi_qstat));
2527 case MFIIO_QUERY_DISK:
2529 struct mfi_query_disk *qd;
2530 struct mfi_disk *ld;
2532 qd = (struct mfi_query_disk *)arg;
2533 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2534 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2535 if (ld->ld_id == qd->array_id)
2540 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2544 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2546 bzero(qd->devname, SPECNAMELEN + 1);
2547 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2548 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2556 devclass_t devclass;
2557 ioc = (struct mfi_ioc_packet *)arg;
2560 adapter = ioc->mfi_adapter_no;
2561 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2562 devclass = devclass_find("mfi");
2563 sc = devclass_get_softc(devclass, adapter);
2565 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2566 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2567 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2570 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2574 * save off original context since copying from user
2575 * will clobber some data
2577 context = cm->cm_frame->header.context;
2579 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2580 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2581 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2582 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2583 cm->cm_frame->header.scsi_status = 0;
2584 cm->cm_frame->header.pad0 = 0;
2585 if (ioc->mfi_sge_count) {
2587 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2590 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2591 cm->cm_flags |= MFI_CMD_DATAIN;
2592 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2593 cm->cm_flags |= MFI_CMD_DATAOUT;
2594 /* Legacy app shim */
2595 if (cm->cm_flags == 0)
2596 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2597 cm->cm_len = cm->cm_frame->header.data_len;
2599 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2600 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2602 if (cm->cm_data == NULL) {
2603 device_printf(sc->mfi_dev, "Malloc failed\n");
2610 /* restore header context */
2611 cm->cm_frame->header.context = context;
2614 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2615 for (i = 0; i < ioc->mfi_sge_count; i++) {
2617 if (cmd == MFI_CMD) {
2619 error = copyin(ioc->mfi_sgl[i].iov_base,
2621 ioc->mfi_sgl[i].iov_len);
2625 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2627 PTRIN(ioc32->mfi_sgl[i].iov_base);
2628 error = copyin(temp_convert,
2630 ioc32->mfi_sgl[i].iov_len);
2633 error = copyin(ioc->mfi_sgl[i].iov_base,
2635 ioc->mfi_sgl[i].iov_len);
2638 device_printf(sc->mfi_dev,
2639 "Copy in failed\n");
2642 temp = &temp[ioc->mfi_sgl[i].iov_len];
2646 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2647 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2649 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2650 #if defined(__x86_64__)
2651 cm->cm_frame->pass.sense_addr_lo =
2652 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2653 cm->cm_frame->pass.sense_addr_hi =
2654 (cm->cm_sense_busaddr& 0xFFFFFFFF00000000) >> 32;
2656 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2657 cm->cm_frame->pass.sense_addr_hi = 0;
2661 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2662 skip_pre_post = mfi_check_for_sscd(sc, cm);
2663 if (!skip_pre_post) {
2664 error = mfi_check_command_pre(sc, cm);
2666 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2671 if ((error = mfi_wait_command(sc, cm)) != 0) {
2672 device_printf(sc->mfi_dev,
2673 "Controller polled failed\n");
2674 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2679 mfi_check_command_post(sc, cm);
2680 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2683 if (cm->cm_flags & MFI_CMD_DATAIN) {
2684 for (i = 0; i < ioc->mfi_sge_count; i++) {
2686 if (cmd == MFI_CMD) {
2688 error = copyout(temp,
2689 ioc->mfi_sgl[i].iov_base,
2690 ioc->mfi_sgl[i].iov_len);
2694 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2696 PTRIN(ioc32->mfi_sgl[i].iov_base);
2697 error = copyout(temp,
2699 ioc32->mfi_sgl[i].iov_len);
2702 error = copyout(temp,
2703 ioc->mfi_sgl[i].iov_base,
2704 ioc->mfi_sgl[i].iov_len);
2707 device_printf(sc->mfi_dev,
2708 "Copy out failed\n");
2711 temp = &temp[ioc->mfi_sgl[i].iov_len];
2715 if (ioc->mfi_sense_len) {
2716 /* get user-space sense ptr then copy out sense */
2717 bcopy(&((struct mfi_ioc_packet*)arg)
2718 ->mfi_frame.raw[ioc->mfi_sense_off],
2719 &sense_ptr.sense_ptr_data[0],
2720 sizeof(sense_ptr.sense_ptr_data));
2722 if (cmd != MFI_CMD) {
2724 * not 64bit native so zero out any address
2726 sense_ptr.addr.high = 0;
2729 error = copyout(cm->cm_sense, sense_ptr.user_space,
2730 ioc->mfi_sense_len);
2732 device_printf(sc->mfi_dev,
2733 "Copy out failed\n");
2738 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2740 mfi_config_unlock(sc, locked);
2742 kfree(data, M_MFIBUF);
2744 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2745 mfi_release_command(cm);
2746 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2752 aen = (struct mfi_ioc_aen *)arg;
2753 error = mfi_aen_register(sc, aen->aen_seq_num,
2754 aen->aen_class_locale);
2757 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2759 devclass_t devclass;
2760 struct mfi_linux_ioc_packet l_ioc;
2763 devclass = devclass_find("mfi");
2764 if (devclass == NULL)
2767 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2770 adapter = l_ioc.lioc_adapter_no;
2771 sc = devclass_get_softc(devclass, adapter);
2774 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2778 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2780 devclass_t devclass;
2781 struct mfi_linux_ioc_aen l_aen;
2784 devclass = devclass_find("mfi");
2785 if (devclass == NULL)
2788 error = copyin(arg, &l_aen, sizeof(l_aen));
2791 adapter = l_aen.laen_adapter_no;
2792 sc = devclass_get_softc(devclass, adapter);
2795 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2800 case MFIIO_PASSTHRU32:
2801 iop_swab.ioc_frame = iop32->ioc_frame;
2802 iop_swab.buf_size = iop32->buf_size;
2803 iop_swab.buf = PTRIN(iop32->buf);
2807 case MFIIO_PASSTHRU:
2808 error = mfi_user_command(sc, iop);
2810 if (cmd == MFIIO_PASSTHRU32)
2811 iop32->ioc_frame = iop_swab.ioc_frame;
2815 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2824 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
2826 struct mfi_softc *sc;
2827 struct mfi_linux_ioc_packet l_ioc;
2828 struct mfi_linux_ioc_aen l_aen;
2829 struct mfi_command *cm = NULL;
2830 struct mfi_aen *mfi_aen_entry;
2831 union mfi_sense_ptr sense_ptr;
2833 uint8_t *data = NULL, *temp;
2840 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2841 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2845 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2849 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2850 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2851 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2854 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2858 * save off original context since copying from user
2859 * will clobber some data
2861 context = cm->cm_frame->header.context;
2863 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2864 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2865 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2866 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2867 cm->cm_frame->header.scsi_status = 0;
2868 cm->cm_frame->header.pad0 = 0;
2869 if (l_ioc.lioc_sge_count)
2871 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2873 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2874 cm->cm_flags |= MFI_CMD_DATAIN;
2875 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2876 cm->cm_flags |= MFI_CMD_DATAOUT;
2877 cm->cm_len = cm->cm_frame->header.data_len;
2879 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2880 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2882 if (cm->cm_data == NULL) {
2883 device_printf(sc->mfi_dev, "Malloc failed\n");
2890 /* restore header context */
2891 cm->cm_frame->header.context = context;
2894 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2895 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2896 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2898 l_ioc.lioc_sgl[i].iov_len);
2900 device_printf(sc->mfi_dev,
2901 "Copy in failed\n");
2904 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2908 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2909 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2911 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2912 #if defined(__x86_64__)
2913 cm->cm_frame->pass.sense_addr_lo =
2914 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2915 cm->cm_frame->pass.sense_addr_hi =
2916 (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
2918 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2919 cm->cm_frame->pass.sense_addr_hi = 0;
2923 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2924 error = mfi_check_command_pre(sc, cm);
2926 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2930 if ((error = mfi_wait_command(sc, cm)) != 0) {
2931 device_printf(sc->mfi_dev,
2932 "Controller polled failed\n");
2933 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2937 mfi_check_command_post(sc, cm);
2938 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2941 if (cm->cm_flags & MFI_CMD_DATAIN) {
2942 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2943 error = copyout(temp,
2944 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2945 l_ioc.lioc_sgl[i].iov_len);
2947 device_printf(sc->mfi_dev,
2948 "Copy out failed\n");
2951 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2955 if (l_ioc.lioc_sense_len) {
2956 /* get user-space sense ptr then copy out sense */
2957 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2958 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2959 &sense_ptr.sense_ptr_data[0],
2960 sizeof(sense_ptr.sense_ptr_data));
2963 * only 32bit Linux support so zero out any
2964 * address over 32bit
2966 sense_ptr.addr.high = 0;
2968 error = copyout(cm->cm_sense, sense_ptr.user_space,
2969 l_ioc.lioc_sense_len);
2971 device_printf(sc->mfi_dev,
2972 "Copy out failed\n");
2977 error = copyout(&cm->cm_frame->header.cmd_status,
2978 &((struct mfi_linux_ioc_packet*)arg)
2979 ->lioc_frame.hdr.cmd_status,
2982 device_printf(sc->mfi_dev,
2983 "Copy out failed\n");
2988 mfi_config_unlock(sc, locked);
2990 kfree(data, M_MFIBUF);
2992 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2993 mfi_release_command(cm);
2994 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2998 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2999 error = copyin(arg, &l_aen, sizeof(l_aen));
3002 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3003 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3005 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3006 if (mfi_aen_entry != NULL) {
3007 mfi_aen_entry->p = curproc;
3008 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3011 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3012 l_aen.laen_class_locale);
3015 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3017 kfree(mfi_aen_entry, M_MFIBUF);
3019 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3023 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3032 mfi_kqfilter(struct dev_kqfilter_args *ap)
3034 cdev_t dev = ap->a_head.a_dev;
3035 struct knote *kn = ap->a_kn;
3036 struct mfi_softc *sc;
3037 struct klist *klist;
3042 switch (kn->kn_filter) {
3044 kn->kn_fop = &mfi_read_filterops;
3045 kn->kn_hook = (caddr_t)sc;
3048 kn->kn_fop = &mfi_write_filterops;
3049 kn->kn_hook = (caddr_t)sc;
3052 ap->a_result = EOPNOTSUPP;
3056 klist = &sc->mfi_kq.ki_note;
3057 knote_insert(klist, kn);
3063 mfi_filter_detach(struct knote *kn)
3065 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3066 struct klist *klist = &sc->mfi_kq.ki_note;
3068 knote_remove(klist, kn);
3072 mfi_filter_read(struct knote *kn, long hint)
3074 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3077 if (sc->mfi_aen_triggered != 0) {
3079 sc->mfi_aen_triggered = 0;
3081 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3082 kn->kn_flags |= EV_ERROR;
3085 sc->mfi_poll_waiting = 1;
3091 mfi_filter_write(struct knote *kn, long hint)
3099 struct mfi_softc *sc;
3100 struct mfi_command *cm;
3106 dc = devclass_find("mfi");
3108 kprintf("No mfi dev class\n");
3112 for (i = 0; ; i++) {
3113 sc = devclass_get_softc(dc, i);
3116 device_printf(sc->mfi_dev, "Dumping\n\n");
3118 deadline = time_second - MFI_CMD_TIMEOUT;
3119 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3120 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3121 if (cm->cm_timestamp < deadline) {
3122 device_printf(sc->mfi_dev,
3123 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3124 (int)(time_second - cm->cm_timestamp));
3135 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3142 mfi_timeout(void *data)
3144 struct mfi_softc *sc = (struct mfi_softc *)data;
3145 struct mfi_command *cm;
3149 deadline = time_second - MFI_CMD_TIMEOUT;
3150 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3151 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3152 if (sc->mfi_aen_cm == cm)
3154 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3155 device_printf(sc->mfi_dev,
3156 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3157 (int)(time_second - cm->cm_timestamp));
3159 MFI_VALIDATE_CMD(sc, cm);
3169 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3171 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,