2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.57 2011/07/14 20:20:33 jhb Exp $
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/sysctl.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
63 #include <sys/eventhandler.h>
65 #include <sys/bus_dma.h>
67 #include <sys/ioccom.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
74 #include <dev/raid/mfi/mfireg.h>
75 #include <dev/raid/mfi/mfi_ioctl.h>
76 #include <dev/raid/mfi/mfivar.h>
78 static int mfi_alloc_commands(struct mfi_softc *);
79 static int mfi_comms_init(struct mfi_softc *);
80 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
81 static int mfi_get_controller_info(struct mfi_softc *);
82 static int mfi_get_log_state(struct mfi_softc *,
83 struct mfi_evt_log_state **);
84 static int mfi_parse_entries(struct mfi_softc *, int, int);
85 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
86 uint32_t, void **, size_t);
87 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
88 static void mfi_startup(void *arg);
89 static void mfi_intr(void *arg);
90 static void mfi_ldprobe(struct mfi_softc *sc);
91 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
92 static void mfi_aen_complete(struct mfi_command *);
93 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
94 static int mfi_add_ld(struct mfi_softc *sc, int);
95 static void mfi_add_ld_complete(struct mfi_command *);
96 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
97 static void mfi_bio_complete(struct mfi_command *);
98 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
99 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
100 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
101 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
102 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
103 static void mfi_timeout(void *);
104 static int mfi_user_command(struct mfi_softc *,
105 struct mfi_ioc_passthru *);
106 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
107 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
108 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
109 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
110 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
111 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
112 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
113 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
114 static void mfi_filter_detach(struct knote *);
115 static int mfi_filter_read(struct knote *, long);
116 static int mfi_filter_write(struct knote *, long);
118 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
119 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
120 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
121 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
122 0, "event message locale");
124 static int mfi_event_class = MFI_EVT_CLASS_INFO;
125 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
126 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
127 0, "event message class");
129 static int mfi_max_cmds = 128;
130 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
131 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
134 /* Management interface */
135 static d_open_t mfi_open;
136 static d_close_t mfi_close;
137 static d_ioctl_t mfi_ioctl;
138 static d_kqfilter_t mfi_kqfilter;
140 static struct dev_ops mfi_ops = {
143 .d_close = mfi_close,
144 .d_ioctl = mfi_ioctl,
145 .d_kqfilter = mfi_kqfilter,
148 static struct filterops mfi_read_filterops =
149 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
150 static struct filterops mfi_write_filterops =
151 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
153 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
155 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
158 mfi_enable_intr_xscale(struct mfi_softc *sc)
160 MFI_WRITE4(sc, MFI_OMSK, 0x01);
164 mfi_enable_intr_ppc(struct mfi_softc *sc)
166 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
167 if (sc->mfi_flags & MFI_FLAGS_1078) {
168 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
169 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
170 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
175 mfi_read_fw_status_xscale(struct mfi_softc *sc)
177 return MFI_READ4(sc, MFI_OMSG0);
181 mfi_read_fw_status_ppc(struct mfi_softc *sc)
183 return MFI_READ4(sc, MFI_OSP0);
187 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
191 status = MFI_READ4(sc, MFI_OSTS);
192 if ((status & MFI_OSTS_INTR_VALID) == 0)
195 MFI_WRITE4(sc, MFI_OSTS, status);
200 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
204 status = MFI_READ4(sc, MFI_OSTS);
205 if (sc->mfi_flags & MFI_FLAGS_1078) {
206 if (!(status & MFI_1078_RM)) {
209 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
210 if (!(status & MFI_GEN2_RM)) {
215 MFI_WRITE4(sc, MFI_ODCR0, status);
220 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
222 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
226 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
228 MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
232 mfi_transition_firmware(struct mfi_softc *sc)
234 uint32_t fw_state, cur_state;
237 fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
238 while (fw_state != MFI_FWSTATE_READY) {
240 device_printf(sc->mfi_dev, "Waiting for firmware to "
242 cur_state = fw_state;
244 case MFI_FWSTATE_FAULT:
245 device_printf(sc->mfi_dev, "Firmware fault\n");
247 case MFI_FWSTATE_WAIT_HANDSHAKE:
248 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
251 case MFI_FWSTATE_OPERATIONAL:
252 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
255 case MFI_FWSTATE_UNDEFINED:
256 case MFI_FWSTATE_BB_INIT:
259 case MFI_FWSTATE_FW_INIT:
260 case MFI_FWSTATE_DEVICE_SCAN:
261 case MFI_FWSTATE_FLUSH_CACHE:
264 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
265 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
269 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
273 for (i = 0; i < (max_wait * 10); i++) {
274 fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
275 if (fw_state == cur_state)
280 if (fw_state == cur_state) {
281 device_printf(sc->mfi_dev, "Firmware stuck in state "
290 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
295 *addr = segs[0].ds_addr;
299 mfi_attach(struct mfi_softc *sc)
302 int error, commsz, framessz, sensesz;
303 int frames, unit, max_fw_sge;
305 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.00 \n");
307 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
308 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
309 TAILQ_INIT(&sc->mfi_ld_tqh);
310 TAILQ_INIT(&sc->mfi_aen_pids);
311 TAILQ_INIT(&sc->mfi_cam_ccbq);
318 if (sc->mfi_flags & MFI_FLAGS_1064R) {
319 sc->mfi_enable_intr = mfi_enable_intr_xscale;
320 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
321 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
322 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
325 sc->mfi_enable_intr = mfi_enable_intr_ppc;
326 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
327 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
328 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
332 /* Before we get too far, see if the firmware is working */
333 if ((error = mfi_transition_firmware(sc)) != 0) {
334 device_printf(sc->mfi_dev, "Firmware not in READY state, "
335 "error %d\n", error);
340 * Get information needed for sizing the contiguous memory for the
341 * frame pool. Size down the sgl parameter since we know that
342 * we will never need more than what's required for MAXPHYS.
343 * It would be nice if these constants were available at runtime
344 * instead of compile time.
346 status = sc->mfi_read_fw_status(sc);
347 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
348 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
349 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
352 * Create the dma tag for data buffers. Used both for block I/O
353 * and for various internal data queries.
355 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
356 1, 0, /* algnmnt, boundary */
357 BUS_SPACE_MAXADDR, /* lowaddr */
358 BUS_SPACE_MAXADDR, /* highaddr */
359 NULL, NULL, /* filter, filterarg */
360 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
361 sc->mfi_max_sge, /* nsegments */
362 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
363 BUS_DMA_ALLOCNOW, /* flags */
364 &sc->mfi_buffer_dmat)) {
365 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
370 * Allocate DMA memory for the comms queues. Keep it under 4GB for
371 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
372 * entry, so the calculated size here will be will be 1 more than
373 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
375 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
376 sizeof(struct mfi_hwcomms);
377 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
378 1, 0, /* algnmnt, boundary */
379 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
380 BUS_SPACE_MAXADDR, /* highaddr */
381 NULL, NULL, /* filter, filterarg */
382 commsz, /* maxsize */
384 commsz, /* maxsegsize */
386 &sc->mfi_comms_dmat)) {
387 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
390 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
391 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
392 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
395 bzero(sc->mfi_comms, commsz);
396 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
397 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
400 * Allocate DMA memory for the command frames. Keep them in the
401 * lower 4GB for efficiency. Calculate the size of the commands at
402 * the same time; each command is one 64 byte frame plus a set of
403 * additional frames for holding sg lists or other data.
404 * The assumption here is that the SG list will start at the second
405 * frame and not use the unused bytes in the first frame. While this
406 * isn't technically correct, it simplifies the calculation and allows
407 * for command frames that might be larger than an mfi_io_frame.
409 if (sizeof(bus_addr_t) == 8) {
410 sc->mfi_sge_size = sizeof(struct mfi_sg64);
411 sc->mfi_flags |= MFI_FLAGS_SG64;
413 sc->mfi_sge_size = sizeof(struct mfi_sg32);
415 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
416 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
417 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
418 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
419 64, 0, /* algnmnt, boundary */
420 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
421 BUS_SPACE_MAXADDR, /* highaddr */
422 NULL, NULL, /* filter, filterarg */
423 framessz, /* maxsize */
425 framessz, /* maxsegsize */
427 &sc->mfi_frames_dmat)) {
428 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
431 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
432 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
433 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
436 bzero(sc->mfi_frames, framessz);
437 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
438 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
441 * Allocate DMA memory for the frame sense data. Keep them in the
442 * lower 4GB for efficiency
444 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
445 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
446 4, 0, /* algnmnt, boundary */
447 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
448 BUS_SPACE_MAXADDR, /* highaddr */
449 NULL, NULL, /* filter, filterarg */
450 sensesz, /* maxsize */
452 sensesz, /* maxsegsize */
454 &sc->mfi_sense_dmat)) {
455 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
458 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
459 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
460 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
463 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
464 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
466 if ((error = mfi_alloc_commands(sc)) != 0)
469 if ((error = mfi_comms_init(sc)) != 0)
472 if ((error = mfi_get_controller_info(sc)) != 0)
475 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
476 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
477 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
480 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
483 * Set up the interrupt handler. XXX This should happen in
487 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
488 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
489 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
492 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
493 mfi_intr, sc, &sc->mfi_intr, NULL)) {
494 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
498 /* Register a config hook to probe the bus for arrays */
499 sc->mfi_ich.ich_func = mfi_startup;
500 sc->mfi_ich.ich_arg = sc;
501 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
502 device_printf(sc->mfi_dev, "Cannot establish configuration "
508 * Register a shutdown handler.
510 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
511 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
512 device_printf(sc->mfi_dev, "Warning: shutdown event "
513 "registration failed\n");
517 * Create the control device for doing management
519 unit = device_get_unit(sc->mfi_dev);
520 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
521 0640, "mfi%d", unit);
523 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
524 if (sc->mfi_cdev != NULL)
525 sc->mfi_cdev->si_drv1 = sc;
526 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
527 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
528 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
529 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
530 if (sc->mfi_sysctl_tree == NULL) {
531 device_printf(sc->mfi_dev, "can't add sysctl node\n");
534 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
535 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
536 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
537 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
538 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
539 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
540 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
541 &sc->mfi_keep_deleted_volumes, 0,
542 "Don't detach the mfid device for a busy volume that is deleted");
544 device_add_child(sc->mfi_dev, "mfip", -1);
545 bus_generic_attach(sc->mfi_dev);
547 /* Start the timeout watchdog */
548 callout_init(&sc->mfi_watchdog_callout);
549 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
556 mfi_alloc_commands(struct mfi_softc *sc)
558 struct mfi_command *cm;
562 * XXX Should we allocate all the commands up front, or allocate on
563 * demand later like 'aac' does?
565 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
567 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
568 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
570 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
573 for (i = 0; i < ncmds; i++) {
574 cm = &sc->mfi_commands[i];
575 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
576 sc->mfi_cmd_size * i);
577 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
578 sc->mfi_cmd_size * i;
579 cm->cm_frame->header.context = i;
580 cm->cm_sense = &sc->mfi_sense[i];
581 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
584 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
585 &cm->cm_dmamap) == 0)
586 mfi_release_command(cm);
589 sc->mfi_total_cmds++;
596 mfi_release_command(struct mfi_command *cm)
598 struct mfi_frame_header *hdr;
602 * Zero out the important fields of the frame, but make sure the
603 * context field is preserved. For efficiency, handle the fields
604 * as 32 bit words. Clear out the first S/G entry too for safety.
606 hdr = &cm->cm_frame->header;
607 if (cm->cm_data != NULL && hdr->sg_count) {
608 cm->cm_sg->sg32[0].len = 0;
609 cm->cm_sg->sg32[0].addr = 0;
612 hdr_data = (uint32_t *)cm->cm_frame;
613 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
614 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
615 hdr_data[4] = 0; /* flags, timeout */
616 hdr_data[5] = 0; /* data_len */
618 cm->cm_extra_frames = 0;
620 cm->cm_complete = NULL;
621 cm->cm_private = NULL;
624 cm->cm_total_frame_size = 0;
626 mfi_enqueue_free(cm);
630 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
631 void **bufp, size_t bufsize)
633 struct mfi_command *cm;
634 struct mfi_dcmd_frame *dcmd;
637 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
639 cm = mfi_dequeue_free(sc);
643 if ((bufsize > 0) && (bufp != NULL)) {
645 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
647 mfi_release_command(cm);
656 dcmd = &cm->cm_frame->dcmd;
657 bzero(dcmd->mbox, MFI_MBOX_SIZE);
658 dcmd->header.cmd = MFI_CMD_DCMD;
659 dcmd->header.timeout = 0;
660 dcmd->header.flags = 0;
661 dcmd->header.data_len = bufsize;
662 dcmd->opcode = opcode;
663 cm->cm_sg = &dcmd->sgl;
664 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
667 cm->cm_private = buf;
668 cm->cm_len = bufsize;
671 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
677 mfi_comms_init(struct mfi_softc *sc)
679 struct mfi_command *cm;
680 struct mfi_init_frame *init;
681 struct mfi_init_qinfo *qinfo;
684 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
685 if ((cm = mfi_dequeue_free(sc)) == NULL)
689 * Abuse the SG list area of the frame to hold the init_qinfo
692 init = &cm->cm_frame->init;
693 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
695 bzero(qinfo, sizeof(struct mfi_init_qinfo));
696 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
697 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
698 offsetof(struct mfi_hwcomms, hw_reply_q);
699 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
700 offsetof(struct mfi_hwcomms, hw_pi);
701 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
702 offsetof(struct mfi_hwcomms, hw_ci);
704 init->header.cmd = MFI_CMD_INIT;
705 init->header.data_len = sizeof(struct mfi_init_qinfo);
706 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
708 cm->cm_flags = MFI_CMD_POLLED;
710 if ((error = mfi_mapcmd(sc, cm)) != 0) {
711 device_printf(sc->mfi_dev, "failed to send init command\n");
712 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
715 mfi_release_command(cm);
716 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
722 mfi_get_controller_info(struct mfi_softc *sc)
724 struct mfi_command *cm = NULL;
725 struct mfi_ctrl_info *ci = NULL;
726 uint32_t max_sectors_1, max_sectors_2;
729 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
730 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
731 (void **)&ci, sizeof(*ci));
734 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
736 if ((error = mfi_mapcmd(sc, cm)) != 0) {
737 device_printf(sc->mfi_dev, "Failed to get controller info\n");
738 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
744 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
745 BUS_DMASYNC_POSTREAD);
746 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
748 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
749 max_sectors_2 = ci->max_request_size;
750 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
756 mfi_release_command(cm);
757 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
762 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
764 struct mfi_command *cm = NULL;
767 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
768 (void **)log_state, sizeof(**log_state));
771 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
773 if ((error = mfi_mapcmd(sc, cm)) != 0) {
774 device_printf(sc->mfi_dev, "Failed to get log state\n");
778 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
779 BUS_DMASYNC_POSTREAD);
780 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
784 mfi_release_command(cm);
790 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
792 struct mfi_evt_log_state *log_state = NULL;
793 union mfi_evt class_locale;
797 class_locale.members.reserved = 0;
798 class_locale.members.locale = mfi_event_locale;
799 class_locale.members.evt_class = mfi_event_class;
801 if (seq_start == 0) {
802 error = mfi_get_log_state(sc, &log_state);
805 kfree(log_state, M_MFIBUF);
810 * Walk through any events that fired since the last
813 mfi_parse_entries(sc, log_state->shutdown_seq_num,
814 log_state->newest_seq_num);
815 seq = log_state->newest_seq_num;
818 mfi_aen_register(sc, seq, class_locale.word);
819 if (log_state != NULL)
820 kfree(log_state, M_MFIBUF);
826 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
829 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
830 cm->cm_complete = NULL;
834 * MegaCli can issue a DCMD of 0. In this case do nothing
835 * and return 0 to it as status
837 if (cm->cm_frame->dcmd.opcode == 0) {
838 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
840 return (cm->cm_error);
842 mfi_enqueue_ready(cm);
844 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
845 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
846 return (cm->cm_error);
850 mfi_free(struct mfi_softc *sc)
852 struct mfi_command *cm;
855 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
857 if (sc->mfi_cdev != NULL)
858 destroy_dev(sc->mfi_cdev);
859 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
861 if (sc->mfi_total_cmds != 0) {
862 for (i = 0; i < sc->mfi_total_cmds; i++) {
863 cm = &sc->mfi_commands[i];
864 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
866 kfree(sc->mfi_commands, M_MFIBUF);
870 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
871 if (sc->mfi_irq != NULL)
872 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
875 if (sc->mfi_sense_busaddr != 0)
876 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
877 if (sc->mfi_sense != NULL)
878 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
879 sc->mfi_sense_dmamap);
880 if (sc->mfi_sense_dmat != NULL)
881 bus_dma_tag_destroy(sc->mfi_sense_dmat);
883 if (sc->mfi_frames_busaddr != 0)
884 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
885 if (sc->mfi_frames != NULL)
886 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
887 sc->mfi_frames_dmamap);
888 if (sc->mfi_frames_dmat != NULL)
889 bus_dma_tag_destroy(sc->mfi_frames_dmat);
891 if (sc->mfi_comms_busaddr != 0)
892 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
893 if (sc->mfi_comms != NULL)
894 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
895 sc->mfi_comms_dmamap);
896 if (sc->mfi_comms_dmat != NULL)
897 bus_dma_tag_destroy(sc->mfi_comms_dmat);
899 if (sc->mfi_buffer_dmat != NULL)
900 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
901 if (sc->mfi_parent_dmat != NULL)
902 bus_dma_tag_destroy(sc->mfi_parent_dmat);
904 if (sc->mfi_sysctl_tree != NULL)
905 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
907 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
909 if (mtx_initialized(&sc->mfi_io_lock)) {
910 lockuninit(&sc->mfi_io_lock);
911 sx_destroy(&sc->mfi_config_lock);
915 lockuninit(&sc->mfi_io_lock);
916 lockuninit(&sc->mfi_config_lock);
922 mfi_startup(void *arg)
924 struct mfi_softc *sc;
926 sc = (struct mfi_softc *)arg;
928 config_intrhook_disestablish(&sc->mfi_ich);
930 sc->mfi_enable_intr(sc);
931 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
932 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
934 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
935 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
941 struct mfi_softc *sc;
942 struct mfi_command *cm;
943 uint32_t pi, ci, context;
945 sc = (struct mfi_softc *)arg;
947 if (sc->mfi_check_clear_intr(sc))
950 pi = sc->mfi_comms->hw_pi;
951 ci = sc->mfi_comms->hw_ci;
952 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
954 context = sc->mfi_comms->hw_reply_q[ci];
955 if (context < sc->mfi_max_fw_cmds) {
956 cm = &sc->mfi_commands[context];
959 mfi_complete(sc, cm);
961 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
966 sc->mfi_comms->hw_ci = ci;
968 /* Give defered I/O a chance to run */
969 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
970 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
972 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
978 mfi_shutdown(struct mfi_softc *sc)
980 struct mfi_dcmd_frame *dcmd;
981 struct mfi_command *cm;
984 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
985 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
987 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
991 if (sc->mfi_aen_cm != NULL)
992 mfi_abort(sc, sc->mfi_aen_cm);
994 dcmd = &cm->cm_frame->dcmd;
995 dcmd->header.flags = MFI_FRAME_DIR_NONE;
996 cm->cm_flags = MFI_CMD_POLLED;
999 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1000 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1003 mfi_release_command(cm);
1004 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1009 mfi_ldprobe(struct mfi_softc *sc)
1011 struct mfi_frame_header *hdr;
1012 struct mfi_command *cm = NULL;
1013 struct mfi_ld_list *list = NULL;
1014 struct mfi_disk *ld;
1017 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1018 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1020 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1021 (void **)&list, sizeof(*list));
1025 cm->cm_flags = MFI_CMD_DATAIN;
1026 if (mfi_wait_command(sc, cm) != 0) {
1027 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1031 hdr = &cm->cm_frame->header;
1032 if (hdr->cmd_status != MFI_STAT_OK) {
1033 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1038 for (i = 0; i < list->ld_count; i++) {
1039 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1040 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1043 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1048 kfree(list, M_MFIBUF);
1050 mfi_release_command(cm);
1056 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1057 * the bits in 24-31 are all set, then it is the number of seconds since
1061 format_timestamp(uint32_t timestamp)
1063 static char buffer[32];
1065 if ((timestamp & 0xff000000) == 0xff000000)
1066 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1069 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1074 format_class(int8_t class)
1076 static char buffer[6];
1079 case MFI_EVT_CLASS_DEBUG:
1081 case MFI_EVT_CLASS_PROGRESS:
1082 return ("progress");
1083 case MFI_EVT_CLASS_INFO:
1085 case MFI_EVT_CLASS_WARNING:
1087 case MFI_EVT_CLASS_CRITICAL:
1089 case MFI_EVT_CLASS_FATAL:
1091 case MFI_EVT_CLASS_DEAD:
1094 ksnprintf(buffer, sizeof(buffer), "%d", class);
1100 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1103 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1104 format_timestamp(detail->time), detail->evt_class.members.locale,
1105 format_class(detail->evt_class.members.evt_class), detail->description);
1109 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1111 struct mfi_command *cm;
1112 struct mfi_dcmd_frame *dcmd;
1113 union mfi_evt current_aen, prior_aen;
1114 struct mfi_evt_detail *ed = NULL;
1117 current_aen.word = locale;
1118 if (sc->mfi_aen_cm != NULL) {
1120 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1121 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1122 !((prior_aen.members.locale & current_aen.members.locale)
1123 ^current_aen.members.locale)) {
1126 prior_aen.members.locale |= current_aen.members.locale;
1127 if (prior_aen.members.evt_class
1128 < current_aen.members.evt_class)
1129 current_aen.members.evt_class =
1130 prior_aen.members.evt_class;
1131 mfi_abort(sc, sc->mfi_aen_cm);
1135 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1136 (void **)&ed, sizeof(*ed));
1141 dcmd = &cm->cm_frame->dcmd;
1142 ((uint32_t *)&dcmd->mbox)[0] = seq;
1143 ((uint32_t *)&dcmd->mbox)[1] = locale;
1144 cm->cm_flags = MFI_CMD_DATAIN;
1145 cm->cm_complete = mfi_aen_complete;
1147 sc->mfi_aen_cm = cm;
1149 mfi_enqueue_ready(cm);
1157 mfi_aen_complete(struct mfi_command *cm)
1159 struct mfi_frame_header *hdr;
1160 struct mfi_softc *sc;
1161 struct mfi_evt_detail *detail;
1162 struct mfi_aen *mfi_aen_entry, *tmp;
1163 int seq = 0, aborted = 0;
1166 hdr = &cm->cm_frame->header;
1168 if (sc->mfi_aen_cm == NULL)
1171 if (sc->mfi_aen_cm->cm_aen_abort ||
1172 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1173 sc->mfi_aen_cm->cm_aen_abort = 0;
1176 sc->mfi_aen_triggered = 1;
1177 if (sc->mfi_poll_waiting) {
1178 sc->mfi_poll_waiting = 0;
1179 KNOTE(&sc->mfi_kq.ki_note, 0);
1181 detail = cm->cm_data;
1183 * XXX If this function is too expensive or is recursive, then
1184 * events should be put onto a queue and processed later.
1186 mfi_decode_evt(sc, detail);
1187 seq = detail->seq + 1;
1188 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1189 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1191 lwkt_gettoken(&proc_token);
1192 ksignal(mfi_aen_entry->p, SIGIO);
1193 lwkt_reltoken(&proc_token);
1194 kfree(mfi_aen_entry, M_MFIBUF);
1198 kfree(cm->cm_data, M_MFIBUF);
1199 sc->mfi_aen_cm = NULL;
1200 wakeup(&sc->mfi_aen_cm);
1201 mfi_release_command(cm);
1203 /* set it up again so the driver can catch more events */
1205 mfi_aen_setup(sc, seq);
1209 #define MAX_EVENTS 15
1212 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1214 struct mfi_command *cm;
1215 struct mfi_dcmd_frame *dcmd;
1216 struct mfi_evt_list *el;
1217 union mfi_evt class_locale;
1218 int error, i, seq, size;
1220 class_locale.members.reserved = 0;
1221 class_locale.members.locale = mfi_event_locale;
1222 class_locale.members.evt_class = mfi_event_class;
1224 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1226 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1230 for (seq = start_seq;;) {
1231 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1232 kfree(el, M_MFIBUF);
1236 dcmd = &cm->cm_frame->dcmd;
1237 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1238 dcmd->header.cmd = MFI_CMD_DCMD;
1239 dcmd->header.timeout = 0;
1240 dcmd->header.data_len = size;
1241 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1242 ((uint32_t *)&dcmd->mbox)[0] = seq;
1243 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1244 cm->cm_sg = &dcmd->sgl;
1245 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1246 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1250 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1251 device_printf(sc->mfi_dev,
1252 "Failed to get controller entries\n");
1253 mfi_release_command(cm);
1257 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1258 BUS_DMASYNC_POSTREAD);
1259 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1261 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1262 mfi_release_command(cm);
1265 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1266 device_printf(sc->mfi_dev,
1267 "Error %d fetching controller entries\n",
1268 dcmd->header.cmd_status);
1269 mfi_release_command(cm);
1272 mfi_release_command(cm);
1274 for (i = 0; i < el->count; i++) {
1276 * If this event is newer than 'stop_seq' then
1277 * break out of the loop. Note that the log
1278 * is a circular buffer so we have to handle
1279 * the case that our stop point is earlier in
1280 * the buffer than our start point.
1282 if (el->event[i].seq >= stop_seq) {
1283 if (start_seq <= stop_seq)
1285 else if (el->event[i].seq < start_seq)
1288 mfi_decode_evt(sc, &el->event[i]);
1290 seq = el->event[el->count - 1].seq + 1;
1293 kfree(el, M_MFIBUF);
1298 mfi_add_ld(struct mfi_softc *sc, int id)
1300 struct mfi_command *cm;
1301 struct mfi_dcmd_frame *dcmd = NULL;
1302 struct mfi_ld_info *ld_info = NULL;
1305 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1307 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1308 (void **)&ld_info, sizeof(*ld_info));
1310 device_printf(sc->mfi_dev,
1311 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1313 kfree(ld_info, M_MFIBUF);
1316 cm->cm_flags = MFI_CMD_DATAIN;
1317 dcmd = &cm->cm_frame->dcmd;
1319 if (mfi_wait_command(sc, cm) != 0) {
1320 device_printf(sc->mfi_dev,
1321 "Failed to get logical drive: %d\n", id);
1322 kfree(ld_info, M_MFIBUF);
1326 mfi_add_ld_complete(cm);
1331 mfi_add_ld_complete(struct mfi_command *cm)
1333 struct mfi_frame_header *hdr;
1334 struct mfi_ld_info *ld_info;
1335 struct mfi_softc *sc;
1339 hdr = &cm->cm_frame->header;
1340 ld_info = cm->cm_private;
1342 if (hdr->cmd_status != MFI_STAT_OK) {
1343 kfree(ld_info, M_MFIBUF);
1344 mfi_release_command(cm);
1347 mfi_release_command(cm);
1349 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1351 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1352 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1353 kfree(ld_info, M_MFIBUF);
1355 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1359 device_set_ivars(child, ld_info);
1360 device_set_desc(child, "MFI Logical Disk");
1361 bus_generic_attach(sc->mfi_dev);
1363 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1366 static struct mfi_command *
1367 mfi_bio_command(struct mfi_softc *sc)
1369 struct mfi_io_frame *io;
1370 struct mfi_command *cm;
1373 struct mfi_disk *disk;
1374 int flags, blkcount;
1376 if ((cm = mfi_dequeue_free(sc)) == NULL)
1379 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1380 mfi_release_command(cm);
1385 io = &cm->cm_frame->io;
1386 switch (bp->b_cmd & 0x03) {
1388 io->header.cmd = MFI_CMD_LD_READ;
1389 flags = MFI_CMD_DATAIN;
1392 io->header.cmd = MFI_CMD_LD_WRITE;
1393 flags = MFI_CMD_DATAOUT;
1396 panic("Invalid bio command");
1399 /* Cheat with the sector length to avoid a non-constant division */
1400 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1401 disk = bio->bio_driver_info;
1402 io->header.target_id = disk->ld_id;
1403 io->header.timeout = 0;
1404 io->header.flags = 0;
1405 io->header.sense_len = MFI_SENSE_LEN;
1406 io->header.data_len = blkcount;
1407 io->sense_addr_lo = cm->cm_sense_busaddr;
1408 io->sense_addr_hi = 0;
1409 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
1410 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
1411 cm->cm_complete = mfi_bio_complete;
1412 cm->cm_private = bio;
1413 cm->cm_data = bp->b_data;
1414 cm->cm_len = bp->b_bcount;
1415 cm->cm_sg = &io->sgl;
1416 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1417 cm->cm_flags = flags;
1422 mfi_bio_complete(struct mfi_command *cm)
1426 struct mfi_frame_header *hdr;
1427 struct mfi_softc *sc;
1429 bio = cm->cm_private;
1431 hdr = &cm->cm_frame->header;
1434 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1435 bp->b_flags |= B_ERROR;
1437 device_printf(sc->mfi_dev, "I/O error, status= %d "
1438 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1439 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1440 } else if (cm->cm_error != 0) {
1441 bp->b_flags |= B_ERROR;
1444 mfi_release_command(cm);
1445 mfi_disk_complete(bio);
1449 mfi_startio(struct mfi_softc *sc)
1451 struct mfi_command *cm;
1452 struct ccb_hdr *ccbh;
1455 /* Don't bother if we're short on resources */
1456 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1459 /* Try a command that has already been prepared */
1460 cm = mfi_dequeue_ready(sc);
1463 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1464 cm = sc->mfi_cam_start(ccbh);
1467 /* Nope, so look for work on the bioq */
1469 cm = mfi_bio_command(sc);
1471 /* No work available, so exit */
1475 /* Send the command to the controller */
1476 if (mfi_mapcmd(sc, cm) != 0) {
1477 mfi_requeue_ready(cm);
1484 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1488 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1490 if (cm->cm_data != NULL) {
1491 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1492 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1493 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1494 if (error == EINPROGRESS) {
1495 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1499 error = mfi_send_frame(sc, cm);
1506 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1508 struct mfi_frame_header *hdr;
1509 struct mfi_command *cm;
1511 struct mfi_softc *sc;
1514 cm = (struct mfi_command *)arg;
1516 hdr = &cm->cm_frame->header;
1520 kprintf("error %d in callback\n", error);
1521 cm->cm_error = error;
1522 mfi_complete(sc, cm);
1526 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1527 for (i = 0; i < nsegs; i++) {
1528 sgl->sg32[i].addr = segs[i].ds_addr;
1529 sgl->sg32[i].len = segs[i].ds_len;
1532 for (i = 0; i < nsegs; i++) {
1533 sgl->sg64[i].addr = segs[i].ds_addr;
1534 sgl->sg64[i].len = segs[i].ds_len;
1536 hdr->flags |= MFI_FRAME_SGL64;
1538 hdr->sg_count = nsegs;
1541 if (cm->cm_flags & MFI_CMD_DATAIN) {
1542 dir |= BUS_DMASYNC_PREREAD;
1543 hdr->flags |= MFI_FRAME_DIR_READ;
1545 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1546 dir |= BUS_DMASYNC_PREWRITE;
1547 hdr->flags |= MFI_FRAME_DIR_WRITE;
1549 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1550 cm->cm_flags |= MFI_CMD_MAPPED;
1553 * Instead of calculating the total number of frames in the
1554 * compound frame, it's already assumed that there will be at
1555 * least 1 frame, so don't compensate for the modulo of the
1556 * following division.
1558 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1559 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1561 mfi_send_frame(sc, cm);
1567 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1569 struct mfi_frame_header *hdr;
1570 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1572 hdr = &cm->cm_frame->header;
1574 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1575 cm->cm_timestamp = time_second;
1576 mfi_enqueue_busy(cm);
1578 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1579 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1583 * The bus address of the command is aligned on a 64 byte boundary,
1584 * leaving the least 6 bits as zero. For whatever reason, the
1585 * hardware wants the address shifted right by three, leaving just
1586 * 3 zero bits. These three bits are then used as a prefetching
1587 * hint for the hardware to predict how many frames need to be
1588 * fetched across the bus. If a command has more than 8 frames
1589 * then the 3 bits are set to 0x7 and the firmware uses other
1590 * information in the command to determine the total amount to fetch.
1591 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1592 * is enough for both 32bit and 64bit systems.
1594 if (cm->cm_extra_frames > 7)
1595 cm->cm_extra_frames = 7;
1597 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1599 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1602 /* This is a polled command, so busy-wait for it to complete. */
1603 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1610 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1611 device_printf(sc->mfi_dev, "Frame %p timed out "
1612 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1620 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1624 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1626 if (cm->cm_flags & MFI_CMD_DATAIN)
1627 dir |= BUS_DMASYNC_POSTREAD;
1628 if (cm->cm_flags & MFI_CMD_DATAOUT)
1629 dir |= BUS_DMASYNC_POSTWRITE;
1631 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1632 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1633 cm->cm_flags &= ~MFI_CMD_MAPPED;
1636 cm->cm_flags |= MFI_CMD_COMPLETED;
1638 if (cm->cm_complete != NULL)
1639 cm->cm_complete(cm);
1645 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1647 struct mfi_command *cm;
1648 struct mfi_abort_frame *abort;
1651 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1653 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1657 abort = &cm->cm_frame->abort;
1658 abort->header.cmd = MFI_CMD_ABORT;
1659 abort->header.flags = 0;
1660 abort->abort_context = cm_abort->cm_frame->header.context;
1661 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1662 abort->abort_mfi_addr_hi = 0;
1664 cm->cm_flags = MFI_CMD_POLLED;
1666 sc->mfi_aen_cm->cm_aen_abort = 1;
1668 mfi_release_command(cm);
1670 while (i < 5 && sc->mfi_aen_cm != NULL) {
1671 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
1679 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1681 struct mfi_command *cm;
1682 struct mfi_io_frame *io;
1685 if ((cm = mfi_dequeue_free(sc)) == NULL)
1688 io = &cm->cm_frame->io;
1689 io->header.cmd = MFI_CMD_LD_WRITE;
1690 io->header.target_id = id;
1691 io->header.timeout = 0;
1692 io->header.flags = 0;
1693 io->header.sense_len = MFI_SENSE_LEN;
1694 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1695 io->sense_addr_lo = cm->cm_sense_busaddr;
1696 io->sense_addr_hi = 0;
1697 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1698 io->lba_lo = lba & 0xffffffff;
1701 cm->cm_sg = &io->sgl;
1702 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1703 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1705 error = mfi_mapcmd(sc, cm);
1706 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1707 BUS_DMASYNC_POSTWRITE);
1708 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1709 mfi_release_command(cm);
1715 mfi_open(struct dev_open_args *ap)
1717 cdev_t dev = ap->a_head.a_dev;
1718 struct mfi_softc *sc;
1723 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1724 if (sc->mfi_detaching)
1727 sc->mfi_flags |= MFI_FLAGS_OPEN;
1730 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1736 mfi_close(struct dev_close_args *ap)
1738 cdev_t dev = ap->a_head.a_dev;
1739 struct mfi_softc *sc;
1740 struct mfi_aen *mfi_aen_entry, *tmp;
1744 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1745 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1747 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1748 if (mfi_aen_entry->p == curproc) {
1749 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1751 kfree(mfi_aen_entry, M_MFIBUF);
1754 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1759 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
1763 case MFI_DCMD_LD_DELETE:
1764 case MFI_DCMD_CFG_ADD:
1765 case MFI_DCMD_CFG_CLEAR:
1766 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1774 mfi_config_unlock(struct mfi_softc *sc, int locked)
1778 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1781 /* Perform pre-issue checks on commands from userland and possibly veto them. */
1783 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
1785 struct mfi_disk *ld, *ld2;
1788 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1790 switch (cm->cm_frame->dcmd.opcode) {
1791 case MFI_DCMD_LD_DELETE:
1792 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1793 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1799 error = mfi_disk_disable(ld);
1801 case MFI_DCMD_CFG_CLEAR:
1802 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1803 error = mfi_disk_disable(ld);
1808 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
1811 mfi_disk_enable(ld2);
1821 /* Perform post-issue checks on commands from userland. */
1823 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
1825 struct mfi_disk *ld, *ldn;
1827 switch (cm->cm_frame->dcmd.opcode) {
1828 case MFI_DCMD_LD_DELETE:
1829 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1830 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1833 KASSERT(ld != NULL, ("volume dissappeared"));
1834 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1835 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1837 device_delete_child(sc->mfi_dev, ld->ld_dev);
1839 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1841 mfi_disk_enable(ld);
1843 case MFI_DCMD_CFG_CLEAR:
1844 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1845 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1847 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
1848 device_delete_child(sc->mfi_dev, ld->ld_dev);
1851 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1853 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
1854 mfi_disk_enable(ld);
1857 case MFI_DCMD_CFG_ADD:
1860 case MFI_DCMD_CFG_FOREIGN_IMPORT:
1867 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
1869 struct mfi_command *cm;
1870 struct mfi_dcmd_frame *dcmd;
1871 void *ioc_buf = NULL;
1873 int error = 0, locked;
1876 if (ioc->buf_size > 0) {
1877 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
1878 if (ioc_buf == NULL) {
1881 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
1883 device_printf(sc->mfi_dev, "failed to copyin\n");
1884 kfree(ioc_buf, M_MFIBUF);
1889 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
1891 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1892 while ((cm = mfi_dequeue_free(sc)) == NULL)
1893 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
1895 /* Save context for later */
1896 context = cm->cm_frame->header.context;
1898 dcmd = &cm->cm_frame->dcmd;
1899 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
1901 cm->cm_sg = &dcmd->sgl;
1902 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1903 cm->cm_data = ioc_buf;
1904 cm->cm_len = ioc->buf_size;
1906 /* restore context */
1907 cm->cm_frame->header.context = context;
1909 /* Cheat since we don't know if we're writing or reading */
1910 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
1912 error = mfi_check_command_pre(sc, cm);
1916 error = mfi_wait_command(sc, cm);
1918 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
1921 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
1922 mfi_check_command_post(sc, cm);
1924 mfi_release_command(cm);
1925 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1926 mfi_config_unlock(sc, locked);
1927 if (ioc->buf_size > 0)
1928 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
1930 kfree(ioc_buf, M_MFIBUF);
1935 #define PTRIN(p) ((void *)(uintptr_t)(p))
1937 #define PTRIN(p) (p)
1941 mfi_ioctl(struct dev_ioctl_args *ap)
1943 cdev_t dev = ap->a_head.a_dev;
1944 u_long cmd = ap->a_cmd;
1945 int flag = ap->a_fflag;
1946 caddr_t arg = ap->a_data;
1947 struct mfi_softc *sc;
1948 union mfi_statrequest *ms;
1949 struct mfi_ioc_packet *ioc;
1951 struct mfi_ioc_packet32 *ioc32;
1953 struct mfi_ioc_aen *aen;
1954 struct mfi_command *cm = NULL;
1956 union mfi_sense_ptr sense_ptr;
1957 uint8_t *data = NULL, *temp;
1959 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
1961 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
1962 struct mfi_ioc_passthru iop_swab;
1971 ms = (union mfi_statrequest *)arg;
1972 switch (ms->ms_item) {
1977 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1978 sizeof(struct mfi_qstat));
1985 case MFIIO_QUERY_DISK:
1987 struct mfi_query_disk *qd;
1988 struct mfi_disk *ld;
1990 qd = (struct mfi_query_disk *)arg;
1991 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1992 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1993 if (ld->ld_id == qd->array_id)
1998 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2002 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2004 bzero(qd->devname, SPECNAMELEN + 1);
2005 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2006 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2014 devclass_t devclass;
2015 ioc = (struct mfi_ioc_packet *)arg;
2018 adapter = ioc->mfi_adapter_no;
2019 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2020 devclass = devclass_find("mfi");
2021 sc = devclass_get_softc(devclass, adapter);
2023 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2024 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2025 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2028 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2032 * save off original context since copying from user
2033 * will clobber some data
2035 context = cm->cm_frame->header.context;
2037 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2038 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2039 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2040 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2041 if (ioc->mfi_sge_count) {
2043 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2046 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2047 cm->cm_flags |= MFI_CMD_DATAIN;
2048 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2049 cm->cm_flags |= MFI_CMD_DATAOUT;
2050 /* Legacy app shim */
2051 if (cm->cm_flags == 0)
2052 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2053 cm->cm_len = cm->cm_frame->header.data_len;
2055 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2056 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2058 if (cm->cm_data == NULL) {
2059 device_printf(sc->mfi_dev, "Malloc failed\n");
2066 /* restore header context */
2067 cm->cm_frame->header.context = context;
2070 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2071 for (i = 0; i < ioc->mfi_sge_count; i++) {
2073 if (cmd == MFI_CMD) {
2075 error = copyin(ioc->mfi_sgl[i].iov_base,
2077 ioc->mfi_sgl[i].iov_len);
2081 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2083 PTRIN(ioc32->mfi_sgl[i].iov_base);
2084 error = copyin(temp_convert,
2086 ioc32->mfi_sgl[i].iov_len);
2089 error = copyin(ioc->mfi_sgl[i].iov_base,
2091 ioc->mfi_sgl[i].iov_len);
2094 device_printf(sc->mfi_dev,
2095 "Copy in failed\n");
2098 temp = &temp[ioc->mfi_sgl[i].iov_len];
2102 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2103 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2105 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2106 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2107 cm->cm_frame->pass.sense_addr_hi = 0;
2110 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2111 error = mfi_check_command_pre(sc, cm);
2113 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2117 if ((error = mfi_wait_command(sc, cm)) != 0) {
2118 device_printf(sc->mfi_dev,
2119 "Controller polled failed\n");
2120 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2124 mfi_check_command_post(sc, cm);
2125 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2128 if (cm->cm_flags & MFI_CMD_DATAIN) {
2129 for (i = 0; i < ioc->mfi_sge_count; i++) {
2131 if (cmd == MFI_CMD) {
2133 error = copyout(temp,
2134 ioc->mfi_sgl[i].iov_base,
2135 ioc->mfi_sgl[i].iov_len);
2139 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2141 PTRIN(ioc32->mfi_sgl[i].iov_base);
2142 error = copyout(temp,
2144 ioc32->mfi_sgl[i].iov_len);
2147 error = copyout(temp,
2148 ioc->mfi_sgl[i].iov_base,
2149 ioc->mfi_sgl[i].iov_len);
2152 device_printf(sc->mfi_dev,
2153 "Copy out failed\n");
2156 temp = &temp[ioc->mfi_sgl[i].iov_len];
2160 if (ioc->mfi_sense_len) {
2161 /* get user-space sense ptr then copy out sense */
2162 bcopy(&((struct mfi_ioc_packet*)arg)
2163 ->mfi_frame.raw[ioc->mfi_sense_off],
2164 &sense_ptr.sense_ptr_data[0],
2165 sizeof(sense_ptr.sense_ptr_data));
2167 if (cmd != MFI_CMD) {
2169 * not 64bit native so zero out any address
2171 sense_ptr.addr.high = 0;
2174 error = copyout(cm->cm_sense, sense_ptr.user_space,
2175 ioc->mfi_sense_len);
2177 device_printf(sc->mfi_dev,
2178 "Copy out failed\n");
2183 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2185 mfi_config_unlock(sc, locked);
2187 kfree(data, M_MFIBUF);
2189 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2190 mfi_release_command(cm);
2191 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2197 aen = (struct mfi_ioc_aen *)arg;
2198 error = mfi_aen_register(sc, aen->aen_seq_num,
2199 aen->aen_class_locale);
2202 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2204 devclass_t devclass;
2205 struct mfi_linux_ioc_packet l_ioc;
2208 devclass = devclass_find("mfi");
2209 if (devclass == NULL)
2212 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2215 adapter = l_ioc.lioc_adapter_no;
2216 sc = devclass_get_softc(devclass, adapter);
2219 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2223 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2225 devclass_t devclass;
2226 struct mfi_linux_ioc_aen l_aen;
2229 devclass = devclass_find("mfi");
2230 if (devclass == NULL)
2233 error = copyin(arg, &l_aen, sizeof(l_aen));
2236 adapter = l_aen.laen_adapter_no;
2237 sc = devclass_get_softc(devclass, adapter);
2240 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2245 case MFIIO_PASSTHRU32:
2246 iop_swab.ioc_frame = iop32->ioc_frame;
2247 iop_swab.buf_size = iop32->buf_size;
2248 iop_swab.buf = PTRIN(iop32->buf);
2252 case MFIIO_PASSTHRU:
2253 error = mfi_user_command(sc, iop);
2255 if (cmd == MFIIO_PASSTHRU32)
2256 iop32->ioc_frame = iop_swab.ioc_frame;
2260 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2269 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
2271 struct mfi_softc *sc;
2272 struct mfi_linux_ioc_packet l_ioc;
2273 struct mfi_linux_ioc_aen l_aen;
2274 struct mfi_command *cm = NULL;
2275 struct mfi_aen *mfi_aen_entry;
2276 union mfi_sense_ptr sense_ptr;
2278 uint8_t *data = NULL, *temp;
2285 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2286 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2290 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2294 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2295 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2296 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2299 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2303 * save off original context since copying from user
2304 * will clobber some data
2306 context = cm->cm_frame->header.context;
2308 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2309 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2310 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2311 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2312 if (l_ioc.lioc_sge_count)
2314 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2316 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2317 cm->cm_flags |= MFI_CMD_DATAIN;
2318 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2319 cm->cm_flags |= MFI_CMD_DATAOUT;
2320 cm->cm_len = cm->cm_frame->header.data_len;
2322 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2323 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2325 if (cm->cm_data == NULL) {
2326 device_printf(sc->mfi_dev, "Malloc failed\n");
2333 /* restore header context */
2334 cm->cm_frame->header.context = context;
2337 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2338 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2339 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2341 l_ioc.lioc_sgl[i].iov_len);
2343 device_printf(sc->mfi_dev,
2344 "Copy in failed\n");
2347 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2351 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2352 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2354 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2355 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2356 cm->cm_frame->pass.sense_addr_hi = 0;
2359 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2360 error = mfi_check_command_pre(sc, cm);
2362 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2366 if ((error = mfi_wait_command(sc, cm)) != 0) {
2367 device_printf(sc->mfi_dev,
2368 "Controller polled failed\n");
2369 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2373 mfi_check_command_post(sc, cm);
2374 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2377 if (cm->cm_flags & MFI_CMD_DATAIN) {
2378 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2379 error = copyout(temp,
2380 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2381 l_ioc.lioc_sgl[i].iov_len);
2383 device_printf(sc->mfi_dev,
2384 "Copy out failed\n");
2387 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2391 if (l_ioc.lioc_sense_len) {
2392 /* get user-space sense ptr then copy out sense */
2393 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2394 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2395 &sense_ptr.sense_ptr_data[0],
2396 sizeof(sense_ptr.sense_ptr_data));
2399 * only 32bit Linux support so zero out any
2400 * address over 32bit
2402 sense_ptr.addr.high = 0;
2404 error = copyout(cm->cm_sense, sense_ptr.user_space,
2405 l_ioc.lioc_sense_len);
2407 device_printf(sc->mfi_dev,
2408 "Copy out failed\n");
2413 error = copyout(&cm->cm_frame->header.cmd_status,
2414 &((struct mfi_linux_ioc_packet*)arg)
2415 ->lioc_frame.hdr.cmd_status,
2418 device_printf(sc->mfi_dev,
2419 "Copy out failed\n");
2424 mfi_config_unlock(sc, locked);
2426 kfree(data, M_MFIBUF);
2428 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2429 mfi_release_command(cm);
2430 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2434 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2435 error = copyin(arg, &l_aen, sizeof(l_aen));
2438 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2439 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
2441 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2442 if (mfi_aen_entry != NULL) {
2443 mfi_aen_entry->p = curproc;
2444 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2447 error = mfi_aen_register(sc, l_aen.laen_seq_num,
2448 l_aen.laen_class_locale);
2451 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2453 kfree(mfi_aen_entry, M_MFIBUF);
2455 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2459 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2468 mfi_kqfilter(struct dev_kqfilter_args *ap)
2470 cdev_t dev = ap->a_head.a_dev;
2471 struct knote *kn = ap->a_kn;
2472 struct mfi_softc *sc;
2473 struct klist *klist;
2478 switch (kn->kn_filter) {
2480 kn->kn_fop = &mfi_read_filterops;
2481 kn->kn_hook = (caddr_t)sc;
2484 kn->kn_fop = &mfi_write_filterops;
2485 kn->kn_hook = (caddr_t)sc;
2488 ap->a_result = EOPNOTSUPP;
2492 klist = &sc->mfi_kq.ki_note;
2493 knote_insert(klist, kn);
2499 mfi_filter_detach(struct knote *kn)
2501 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
2502 struct klist *klist = &sc->mfi_kq.ki_note;
2504 knote_remove(klist, kn);
2508 mfi_filter_read(struct knote *kn, long hint)
2510 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
2513 if (sc->mfi_aen_triggered != 0) {
2515 sc->mfi_aen_triggered = 0;
2517 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
2518 kn->kn_flags |= EV_ERROR;
2521 sc->mfi_poll_waiting = 1;
2527 mfi_filter_write(struct knote *kn, long hint)
2535 struct mfi_softc *sc;
2536 struct mfi_command *cm;
2542 dc = devclass_find("mfi");
2544 kprintf("No mfi dev class\n");
2548 for (i = 0; ; i++) {
2549 sc = devclass_get_softc(dc, i);
2552 device_printf(sc->mfi_dev, "Dumping\n\n");
2554 deadline = time_second - MFI_CMD_TIMEOUT;
2555 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2556 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2557 if (cm->cm_timestamp < deadline) {
2558 device_printf(sc->mfi_dev,
2559 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2560 (int)(time_second - cm->cm_timestamp));
2571 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2578 mfi_timeout(void *data)
2580 struct mfi_softc *sc = (struct mfi_softc *)data;
2581 struct mfi_command *cm;
2585 deadline = time_second - MFI_CMD_TIMEOUT;
2586 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2587 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2588 if (sc->mfi_aen_cm == cm)
2590 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
2591 device_printf(sc->mfi_dev,
2592 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2593 (int)(time_second - cm->cm_timestamp));
2595 MFI_VALIDATE_CMD(sc, cm);
2605 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2607 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,