2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
57 * Copyright 1994-2009 The FreeBSD Project.
58 * All rights reserved.
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
66 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
68 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
69 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
70 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
71 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
72 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
73 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
74 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
75 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
76 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 * The views and conclusions contained in the software and documentation
79 * are those of the authors and should not be interpreted as representing
80 * official policies,either expressed or implied, of the FreeBSD Project.
82 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.57 2011/07/14 20:20:33 jhb Exp $
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/sysctl.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
93 #include <sys/eventhandler.h>
95 #include <sys/bus_dma.h>
97 #include <sys/ioccom.h>
100 #include <sys/signalvar.h>
101 #include <sys/device.h>
102 #include <sys/mplock2.h>
104 #include <bus/cam/scsi/scsi_all.h>
106 #include <bus/pci/pcivar.h>
108 #include <dev/raid/mfi/mfireg.h>
109 #include <dev/raid/mfi/mfi_ioctl.h>
110 #include <dev/raid/mfi/mfivar.h>
112 static int mfi_alloc_commands(struct mfi_softc *);
113 static int mfi_comms_init(struct mfi_softc *);
114 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
115 static int mfi_get_controller_info(struct mfi_softc *);
116 static int mfi_get_log_state(struct mfi_softc *,
117 struct mfi_evt_log_state **);
118 static int mfi_parse_entries(struct mfi_softc *, int, int);
119 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
120 uint32_t, void **, size_t);
121 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
122 static void mfi_startup(void *arg);
123 static void mfi_intr(void *arg);
124 static void mfi_ldprobe(struct mfi_softc *sc);
125 static void mfi_syspdprobe(struct mfi_softc *sc);
126 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
127 static void mfi_aen_complete(struct mfi_command *);
128 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
129 static int mfi_add_ld(struct mfi_softc *sc, int);
130 static void mfi_add_ld_complete(struct mfi_command *);
131 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
132 static void mfi_add_sys_pd_complete(struct mfi_command *);
133 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
134 static void mfi_bio_complete(struct mfi_command *);
135 static struct mfi_command * mfi_build_ldio(struct mfi_softc *,struct bio*);
136 static struct mfi_command * mfi_build_syspdio(struct mfi_softc *,struct bio*);
137 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
138 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
139 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
140 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
141 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
142 static void mfi_timeout(void *);
143 static int mfi_user_command(struct mfi_softc *,
144 struct mfi_ioc_passthru *);
145 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
146 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
147 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
148 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
149 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
150 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
151 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
152 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
153 static void mfi_filter_detach(struct knote *);
154 static int mfi_filter_read(struct knote *, long);
155 static int mfi_filter_write(struct knote *, long);
157 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
158 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
159 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
160 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
161 0, "event message locale");
163 static int mfi_event_class = MFI_EVT_CLASS_INFO;
164 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
165 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
166 0, "event message class");
168 static int mfi_max_cmds = 128;
169 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
170 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
173 static int mfi_msi_enable = 1;
174 TUNABLE_INT("hw.mfi.msi.enable", &mfi_msi_enable);
176 /* Management interface */
177 static d_open_t mfi_open;
178 static d_close_t mfi_close;
179 static d_ioctl_t mfi_ioctl;
180 static d_kqfilter_t mfi_kqfilter;
182 static struct dev_ops mfi_ops = {
185 .d_close = mfi_close,
186 .d_ioctl = mfi_ioctl,
187 .d_kqfilter = mfi_kqfilter,
190 static struct filterops mfi_read_filterops =
191 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
192 static struct filterops mfi_write_filterops =
193 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
195 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
197 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
200 mfi_enable_intr_xscale(struct mfi_softc *sc)
202 MFI_WRITE4(sc, MFI_OMSK, 0x01);
206 mfi_enable_intr_ppc(struct mfi_softc *sc)
208 if (sc->mfi_flags & MFI_FLAGS_1078) {
209 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
210 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
211 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
212 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
213 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
214 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
215 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
217 panic("unknown adapter type");
222 mfi_read_fw_status_xscale(struct mfi_softc *sc)
224 return MFI_READ4(sc, MFI_OMSG0);
228 mfi_read_fw_status_ppc(struct mfi_softc *sc)
230 return MFI_READ4(sc, MFI_OSP0);
234 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
238 status = MFI_READ4(sc, MFI_OSTS);
239 if ((status & MFI_OSTS_INTR_VALID) == 0)
242 MFI_WRITE4(sc, MFI_OSTS, status);
247 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
251 status = MFI_READ4(sc, MFI_OSTS);
252 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
253 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
254 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
258 MFI_WRITE4(sc, MFI_OSTS, status);
260 MFI_WRITE4(sc, MFI_ODCR0, status);
265 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
267 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
271 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
273 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
274 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
275 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
277 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
282 mfi_transition_firmware(struct mfi_softc *sc)
284 uint32_t fw_state, cur_state;
286 uint32_t cur_abs_reg_val = 0;
287 uint32_t prev_abs_reg_val = 0;
288 bus_space_handle_t idb;
290 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
291 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
292 idb = sc->mfi_flags & MFI_FLAGS_SKINNY ? MFI_SKINNY_IDB : MFI_IDB;
293 while (fw_state != MFI_FWSTATE_READY) {
295 device_printf(sc->mfi_dev, "Waiting for firmware to "
297 cur_state = fw_state;
299 case MFI_FWSTATE_FAULT:
300 device_printf(sc->mfi_dev, "Firmware fault\n");
302 case MFI_FWSTATE_WAIT_HANDSHAKE:
303 MFI_WRITE4(sc, idb, MFI_FWINIT_CLEAR_HANDSHAKE);
306 case MFI_FWSTATE_OPERATIONAL:
307 MFI_WRITE4(sc, idb, MFI_FWINIT_READY);
310 case MFI_FWSTATE_UNDEFINED:
311 case MFI_FWSTATE_BB_INIT:
314 case MFI_FWSTATE_FW_INIT:
315 case MFI_FWSTATE_FLUSH_CACHE:
318 case MFI_FWSTATE_DEVICE_SCAN:
319 max_wait = 180; /* wait for 180 seconds */
320 prev_abs_reg_val = cur_abs_reg_val;
322 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
323 MFI_WRITE4(sc, idb, MFI_FWINIT_HOTPLUG);
327 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
331 for (i = 0; i < (max_wait * 10); i++) {
332 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
333 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
334 if (fw_state == cur_state)
339 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
340 /* Check the device scanning progress */
341 if (prev_abs_reg_val != cur_abs_reg_val)
344 if (fw_state == cur_state) {
345 device_printf(sc->mfi_dev, "Firmware stuck in state "
353 #if defined(__x86_64__)
355 mfi_addr64_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
360 *addr = segs[0].ds_addr;
364 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
369 *addr = segs[0].ds_addr;
374 mfi_attach(struct mfi_softc *sc)
377 int error, commsz, framessz, sensesz;
378 int frames, unit, max_fw_sge;
381 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.981\n");
383 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
384 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
385 TAILQ_INIT(&sc->mfi_ld_tqh);
386 TAILQ_INIT(&sc->mfi_syspd_tqh);
387 TAILQ_INIT(&sc->mfi_aen_pids);
388 TAILQ_INIT(&sc->mfi_cam_ccbq);
395 if (sc->mfi_flags & MFI_FLAGS_1064R) {
396 sc->mfi_enable_intr = mfi_enable_intr_xscale;
397 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
398 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
399 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
401 sc->mfi_enable_intr = mfi_enable_intr_ppc;
402 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
403 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
404 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
408 /* Before we get too far, see if the firmware is working */
409 if ((error = mfi_transition_firmware(sc)) != 0) {
410 device_printf(sc->mfi_dev, "Firmware not in READY state, "
411 "error %d\n", error);
416 * Get information needed for sizing the contiguous memory for the
417 * frame pool. Size down the sgl parameter since we know that
418 * we will never need more than what's required for MAXPHYS.
419 * It would be nice if these constants were available at runtime
420 * instead of compile time.
422 status = sc->mfi_read_fw_status(sc);
423 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
424 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
425 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
428 * Create the dma tag for data buffers. Used both for block I/O
429 * and for various internal data queries.
431 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
432 1, 0, /* algnmnt, boundary */
433 BUS_SPACE_MAXADDR, /* lowaddr */
434 BUS_SPACE_MAXADDR, /* highaddr */
435 NULL, NULL, /* filter, filterarg */
436 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
437 sc->mfi_max_sge, /* nsegments */
438 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
439 BUS_DMA_ALLOCNOW, /* flags */
440 &sc->mfi_buffer_dmat)) {
441 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
446 * Allocate DMA memory for the comms queues. Keep it under 4GB for
447 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
448 * entry, so the calculated size here will be will be 1 more than
449 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
451 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
452 sizeof(struct mfi_hwcomms);
453 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
454 1, 0, /* algnmnt, boundary */
455 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
456 BUS_SPACE_MAXADDR, /* highaddr */
457 NULL, NULL, /* filter, filterarg */
458 commsz, /* maxsize */
460 commsz, /* maxsegsize */
462 &sc->mfi_comms_dmat)) {
463 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
466 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
467 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
468 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
471 bzero(sc->mfi_comms, commsz);
472 #if defined(__x86_64__)
473 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
474 sc->mfi_comms, commsz, mfi_addr64_cb, &sc->mfi_comms_busaddr, 0);
476 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
477 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
481 * Allocate DMA memory for the command frames. Keep them in the
482 * lower 4GB for efficiency. Calculate the size of the commands at
483 * the same time; each command is one 64 byte frame plus a set of
484 * additional frames for holding sg lists or other data.
485 * The assumption here is that the SG list will start at the second
486 * frame and not use the unused bytes in the first frame. While this
487 * isn't technically correct, it simplifies the calculation and allows
488 * for command frames that might be larger than an mfi_io_frame.
490 if (sizeof(bus_addr_t) == 8) {
491 sc->mfi_sge_size = sizeof(struct mfi_sg64);
492 sc->mfi_flags |= MFI_FLAGS_SG64;
494 sc->mfi_sge_size = sizeof(struct mfi_sg32);
496 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
497 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
498 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
499 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
500 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
501 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
502 64, 0, /* algnmnt, boundary */
503 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
504 BUS_SPACE_MAXADDR, /* highaddr */
505 NULL, NULL, /* filter, filterarg */
506 framessz, /* maxsize */
508 framessz, /* maxsegsize */
510 &sc->mfi_frames_dmat)) {
511 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
514 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
515 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
516 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
519 bzero(sc->mfi_frames, framessz);
520 #if defined(__x86_64__)
521 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
522 sc->mfi_frames, framessz, mfi_addr64_cb, &sc->mfi_frames_busaddr,0);
524 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
525 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
529 * Allocate DMA memory for the frame sense data. Keep them in the
530 * lower 4GB for efficiency
532 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
533 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
534 4, 0, /* algnmnt, boundary */
535 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
536 BUS_SPACE_MAXADDR, /* highaddr */
537 NULL, NULL, /* filter, filterarg */
538 sensesz, /* maxsize */
540 sensesz, /* maxsegsize */
542 &sc->mfi_sense_dmat)) {
543 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
546 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
547 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
548 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
551 #if defined(__x86_64__)
552 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
553 sc->mfi_sense, sensesz, mfi_addr64_cb, &sc->mfi_sense_busaddr, 0);
555 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
556 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
559 if ((error = mfi_alloc_commands(sc)) != 0)
562 if ((error = mfi_comms_init(sc)) != 0)
565 if ((error = mfi_get_controller_info(sc)) != 0)
568 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
569 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
570 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
573 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
576 * Set up the interrupt handler. XXX This should happen in
580 sc->mfi_irq_type = pci_alloc_1intr(sc->mfi_dev, mfi_msi_enable,
581 &sc->mfi_irq_rid, &irq_flags);
582 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
583 &sc->mfi_irq_rid, irq_flags)) == NULL) {
584 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
587 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
588 mfi_intr, sc, &sc->mfi_intr, NULL)) {
589 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
593 /* Register a config hook to probe the bus for arrays */
594 sc->mfi_ich.ich_func = mfi_startup;
595 sc->mfi_ich.ich_arg = sc;
596 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
597 device_printf(sc->mfi_dev, "Cannot establish configuration "
603 * Register a shutdown handler.
605 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
606 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
607 device_printf(sc->mfi_dev, "Warning: shutdown event "
608 "registration failed\n");
612 * Create the control device for doing management
614 unit = device_get_unit(sc->mfi_dev);
615 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
616 0640, "mfi%d", unit);
618 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
619 if (sc->mfi_cdev != NULL)
620 sc->mfi_cdev->si_drv1 = sc;
621 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
622 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
623 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
624 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
625 if (sc->mfi_sysctl_tree == NULL) {
626 device_printf(sc->mfi_dev, "can't add sysctl node\n");
629 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
630 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
631 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
632 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
633 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
634 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
635 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
636 &sc->mfi_keep_deleted_volumes, 0,
637 "Don't detach the mfid device for a busy volume that is deleted");
639 device_add_child(sc->mfi_dev, "mfip", -1);
640 bus_generic_attach(sc->mfi_dev);
642 /* Start the timeout watchdog */
643 callout_init(&sc->mfi_watchdog_callout);
644 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
651 mfi_alloc_commands(struct mfi_softc *sc)
653 struct mfi_command *cm;
657 * XXX Should we allocate all the commands up front, or allocate on
658 * demand later like 'aac' does?
660 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
662 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
663 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
665 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
668 for (i = 0; i < ncmds; i++) {
669 cm = &sc->mfi_commands[i];
670 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
671 sc->mfi_cmd_size * i);
672 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
673 sc->mfi_cmd_size * i;
674 cm->cm_frame->header.context = i;
675 cm->cm_sense = &sc->mfi_sense[i];
676 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
679 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
680 &cm->cm_dmamap) == 0)
681 mfi_release_command(cm);
684 sc->mfi_total_cmds++;
691 mfi_release_command(struct mfi_command *cm)
693 struct mfi_frame_header *hdr;
697 * Zero out the important fields of the frame, but make sure the
698 * context field is preserved. For efficiency, handle the fields
699 * as 32 bit words. Clear out the first S/G entry too for safety.
701 hdr = &cm->cm_frame->header;
702 if (cm->cm_data != NULL && hdr->sg_count) {
703 cm->cm_sg->sg32[0].len = 0;
704 cm->cm_sg->sg32[0].addr = 0;
707 hdr_data = (uint32_t *)cm->cm_frame;
708 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
709 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
710 hdr_data[4] = 0; /* flags, timeout */
711 hdr_data[5] = 0; /* data_len */
713 cm->cm_extra_frames = 0;
715 cm->cm_complete = NULL;
716 cm->cm_private = NULL;
719 cm->cm_total_frame_size = 0;
721 mfi_enqueue_free(cm);
725 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
726 void **bufp, size_t bufsize)
728 struct mfi_command *cm;
729 struct mfi_dcmd_frame *dcmd;
731 uint32_t context = 0;
733 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
735 cm = mfi_dequeue_free(sc);
739 /* Zero out the MFI frame */
740 context = cm->cm_frame->header.context;
741 bzero(cm->cm_frame, sizeof(union mfi_frame));
742 cm->cm_frame->header.context = context;
744 if ((bufsize > 0) && (bufp != NULL)) {
746 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
748 mfi_release_command(cm);
757 dcmd = &cm->cm_frame->dcmd;
758 bzero(dcmd->mbox, MFI_MBOX_SIZE);
759 dcmd->header.cmd = MFI_CMD_DCMD;
760 dcmd->header.timeout = 0;
761 dcmd->header.flags = 0;
762 dcmd->header.data_len = bufsize;
763 dcmd->header.scsi_status = 0;
764 dcmd->opcode = opcode;
765 cm->cm_sg = &dcmd->sgl;
766 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
769 cm->cm_private = buf;
770 cm->cm_len = bufsize;
773 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
779 mfi_comms_init(struct mfi_softc *sc)
781 struct mfi_command *cm;
782 struct mfi_init_frame *init;
783 struct mfi_init_qinfo *qinfo;
785 uint32_t context = 0;
787 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
788 if ((cm = mfi_dequeue_free(sc)) == NULL)
791 /* Zero out the MFI frame */
792 context = cm->cm_frame->header.context;
793 bzero(cm->cm_frame, sizeof(union mfi_frame));
794 cm->cm_frame->header.context = context;
797 * Abuse the SG list area of the frame to hold the init_qinfo
800 init = &cm->cm_frame->init;
801 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
803 bzero(qinfo, sizeof(struct mfi_init_qinfo));
804 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
805 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
806 offsetof(struct mfi_hwcomms, hw_reply_q);
807 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
808 offsetof(struct mfi_hwcomms, hw_pi);
809 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
810 offsetof(struct mfi_hwcomms, hw_ci);
812 init->header.cmd = MFI_CMD_INIT;
813 init->header.data_len = sizeof(struct mfi_init_qinfo);
814 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
816 cm->cm_flags = MFI_CMD_POLLED;
818 if ((error = mfi_mapcmd(sc, cm)) != 0) {
819 device_printf(sc->mfi_dev, "failed to send init command\n");
820 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
823 mfi_release_command(cm);
824 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
830 mfi_get_controller_info(struct mfi_softc *sc)
832 struct mfi_command *cm = NULL;
833 struct mfi_ctrl_info *ci = NULL;
834 uint32_t max_sectors_1, max_sectors_2;
837 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
838 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
839 (void **)&ci, sizeof(*ci));
842 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
844 if ((error = mfi_mapcmd(sc, cm)) != 0) {
845 device_printf(sc->mfi_dev, "Failed to get controller info\n");
846 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
852 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
853 BUS_DMASYNC_POSTREAD);
854 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
856 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
857 max_sectors_2 = ci->max_request_size;
858 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
864 mfi_release_command(cm);
865 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
870 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
872 struct mfi_command *cm = NULL;
875 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
876 (void **)log_state, sizeof(**log_state));
879 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
881 if ((error = mfi_mapcmd(sc, cm)) != 0) {
882 device_printf(sc->mfi_dev, "Failed to get log state\n");
886 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
887 BUS_DMASYNC_POSTREAD);
888 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
892 mfi_release_command(cm);
898 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
900 struct mfi_evt_log_state *log_state = NULL;
901 union mfi_evt class_locale;
905 class_locale.members.reserved = 0;
906 class_locale.members.locale = mfi_event_locale;
907 class_locale.members.evt_class = mfi_event_class;
909 if (seq_start == 0) {
910 error = mfi_get_log_state(sc, &log_state);
913 kfree(log_state, M_MFIBUF);
918 * Walk through any events that fired since the last
921 mfi_parse_entries(sc, log_state->shutdown_seq_num,
922 log_state->newest_seq_num);
923 seq = log_state->newest_seq_num;
926 mfi_aen_register(sc, seq, class_locale.word);
927 if (log_state != NULL)
928 kfree(log_state, M_MFIBUF);
934 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
937 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
938 cm->cm_complete = NULL;
942 * MegaCli can issue a DCMD of 0. In this case do nothing
943 * and return 0 to it as status
945 if (cm->cm_frame->dcmd.opcode == 0) {
946 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
948 return (cm->cm_error);
950 mfi_enqueue_ready(cm);
952 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
953 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
954 return (cm->cm_error);
958 mfi_free(struct mfi_softc *sc)
960 struct mfi_command *cm;
963 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
965 if (sc->mfi_cdev != NULL)
966 destroy_dev(sc->mfi_cdev);
967 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
969 if (sc->mfi_total_cmds != 0) {
970 for (i = 0; i < sc->mfi_total_cmds; i++) {
971 cm = &sc->mfi_commands[i];
972 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
974 kfree(sc->mfi_commands, M_MFIBUF);
978 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
979 if (sc->mfi_irq != NULL)
980 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
982 if (sc->mfi_irq_type == PCI_INTR_TYPE_MSI)
983 pci_release_msi(sc->mfi_dev);
984 if (sc->mfi_sense_busaddr != 0)
985 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
986 if (sc->mfi_sense != NULL)
987 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
988 sc->mfi_sense_dmamap);
989 if (sc->mfi_sense_dmat != NULL)
990 bus_dma_tag_destroy(sc->mfi_sense_dmat);
992 if (sc->mfi_frames_busaddr != 0)
993 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
994 if (sc->mfi_frames != NULL)
995 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
996 sc->mfi_frames_dmamap);
997 if (sc->mfi_frames_dmat != NULL)
998 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1000 if (sc->mfi_comms_busaddr != 0)
1001 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1002 if (sc->mfi_comms != NULL)
1003 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1004 sc->mfi_comms_dmamap);
1005 if (sc->mfi_comms_dmat != NULL)
1006 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1008 if (sc->mfi_buffer_dmat != NULL)
1009 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1010 if (sc->mfi_parent_dmat != NULL)
1011 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1013 if (sc->mfi_sysctl_tree != NULL)
1014 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1016 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1018 if (mtx_initialized(&sc->mfi_io_lock)) {
1019 lockuninit(&sc->mfi_io_lock);
1020 sx_destroy(&sc->mfi_config_lock);
1024 lockuninit(&sc->mfi_io_lock);
1025 lockuninit(&sc->mfi_config_lock);
1031 mfi_startup(void *arg)
1033 struct mfi_softc *sc;
1035 sc = (struct mfi_softc *)arg;
1037 config_intrhook_disestablish(&sc->mfi_ich);
1039 sc->mfi_enable_intr(sc);
1040 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1041 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1043 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1045 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1046 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1052 struct mfi_softc *sc;
1053 struct mfi_command *cm;
1054 uint32_t pi, ci, context;
1056 sc = (struct mfi_softc *)arg;
1058 if (sc->mfi_check_clear_intr(sc))
1061 pi = sc->mfi_comms->hw_pi;
1062 ci = sc->mfi_comms->hw_ci;
1063 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1065 context = sc->mfi_comms->hw_reply_q[ci];
1066 if (context < sc->mfi_max_fw_cmds) {
1067 cm = &sc->mfi_commands[context];
1068 mfi_remove_busy(cm);
1070 mfi_complete(sc, cm);
1072 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1077 sc->mfi_comms->hw_ci = ci;
1079 /* Give defered I/O a chance to run */
1080 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1081 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1083 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1089 mfi_shutdown(struct mfi_softc *sc)
1091 struct mfi_dcmd_frame *dcmd;
1092 struct mfi_command *cm;
1095 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1096 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1098 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1102 if (sc->mfi_aen_cm != NULL)
1103 mfi_abort(sc, sc->mfi_aen_cm);
1105 dcmd = &cm->cm_frame->dcmd;
1106 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1107 cm->cm_flags = MFI_CMD_POLLED;
1110 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1111 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1114 mfi_release_command(cm);
1115 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1119 mfi_syspdprobe(struct mfi_softc *sc)
1121 struct mfi_frame_header *hdr;
1122 struct mfi_command *cm = NULL;
1123 struct mfi_pd_list *pdlist = NULL;
1124 struct mfi_system_pd *syspd;
1127 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1128 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1129 /* Add SYSTEM PD's */
1130 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1131 (void **)&pdlist, sizeof(*pdlist));
1133 device_printf(sc->mfi_dev,"Error while forming syspd list\n");
1137 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1138 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1139 cm->cm_frame->dcmd.mbox[1] = 0;
1140 if (mfi_mapcmd(sc, cm) != 0) {
1141 device_printf(sc->mfi_dev, "Failed to get syspd device list\n");
1144 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1145 BUS_DMASYNC_POSTREAD);
1146 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1147 hdr = &cm->cm_frame->header;
1148 if (hdr->cmd_status != MFI_STAT_OK) {
1149 device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n",
1153 for (i = 0; i < pdlist->count; i++) {
1154 if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id)
1155 goto skip_sys_pd_add;
1156 /* Get each PD and add it to the system */
1157 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1158 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1159 if (syspd->pd_id == pdlist->addr[i].device_id)
1160 goto skip_sys_pd_add;
1163 mfi_add_sys_pd(sc,pdlist->addr[i].device_id);
1167 /* Delete SYSPD's whose state has been changed */
1168 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1169 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1170 for (i=0;i<pdlist->count;i++) {
1171 if (syspd->pd_id == pdlist->addr[i].device_id)
1172 goto skip_sys_pd_delete;
1175 device_delete_child(sc->mfi_dev,syspd->pd_dev);
1183 kfree(pdlist, M_MFIBUF);
1185 mfi_release_command(cm);
1189 mfi_ldprobe(struct mfi_softc *sc)
1191 struct mfi_frame_header *hdr;
1192 struct mfi_command *cm = NULL;
1193 struct mfi_ld_list *list = NULL;
1194 struct mfi_disk *ld;
1197 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1198 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1200 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1201 (void **)&list, sizeof(*list));
1205 cm->cm_flags = MFI_CMD_DATAIN;
1206 if (mfi_wait_command(sc, cm) != 0) {
1207 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1211 hdr = &cm->cm_frame->header;
1212 if (hdr->cmd_status != MFI_STAT_OK) {
1213 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1218 for (i = 0; i < list->ld_count; i++) {
1219 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1220 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1223 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1228 kfree(list, M_MFIBUF);
1230 mfi_release_command(cm);
1236 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1237 * the bits in 24-31 are all set, then it is the number of seconds since
1241 format_timestamp(uint32_t timestamp)
1243 static char buffer[32];
1245 if ((timestamp & 0xff000000) == 0xff000000)
1246 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1249 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1254 format_class(int8_t class)
1256 static char buffer[6];
1259 case MFI_EVT_CLASS_DEBUG:
1261 case MFI_EVT_CLASS_PROGRESS:
1262 return ("progress");
1263 case MFI_EVT_CLASS_INFO:
1265 case MFI_EVT_CLASS_WARNING:
1267 case MFI_EVT_CLASS_CRITICAL:
1269 case MFI_EVT_CLASS_FATAL:
1271 case MFI_EVT_CLASS_DEAD:
1274 ksnprintf(buffer, sizeof(buffer), "%d", class);
1280 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1283 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1284 format_timestamp(detail->time), detail->evt_class.members.locale,
1285 format_class(detail->evt_class.members.evt_class), detail->description);
1289 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1291 struct mfi_command *cm;
1292 struct mfi_dcmd_frame *dcmd;
1293 union mfi_evt current_aen, prior_aen;
1294 struct mfi_evt_detail *ed = NULL;
1297 current_aen.word = locale;
1298 if (sc->mfi_aen_cm != NULL) {
1300 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1301 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1302 !((prior_aen.members.locale & current_aen.members.locale)
1303 ^current_aen.members.locale)) {
1306 prior_aen.members.locale |= current_aen.members.locale;
1307 if (prior_aen.members.evt_class
1308 < current_aen.members.evt_class)
1309 current_aen.members.evt_class =
1310 prior_aen.members.evt_class;
1311 mfi_abort(sc, sc->mfi_aen_cm);
1315 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1316 (void **)&ed, sizeof(*ed));
1321 dcmd = &cm->cm_frame->dcmd;
1322 ((uint32_t *)&dcmd->mbox)[0] = seq;
1323 ((uint32_t *)&dcmd->mbox)[1] = locale;
1324 cm->cm_flags = MFI_CMD_DATAIN;
1325 cm->cm_complete = mfi_aen_complete;
1327 sc->mfi_aen_cm = cm;
1329 mfi_enqueue_ready(cm);
1337 mfi_aen_complete(struct mfi_command *cm)
1339 struct mfi_frame_header *hdr;
1340 struct mfi_softc *sc;
1341 struct mfi_evt_detail *detail;
1342 struct mfi_aen *mfi_aen_entry, *tmp;
1343 int seq = 0, aborted = 0;
1346 hdr = &cm->cm_frame->header;
1348 if (sc->mfi_aen_cm == NULL)
1351 if (sc->mfi_aen_cm->cm_aen_abort ||
1352 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1353 sc->mfi_aen_cm->cm_aen_abort = 0;
1356 sc->mfi_aen_triggered = 1;
1357 if (sc->mfi_poll_waiting) {
1358 sc->mfi_poll_waiting = 0;
1359 KNOTE(&sc->mfi_kq.ki_note, 0);
1361 detail = cm->cm_data;
1363 * XXX If this function is too expensive or is recursive, then
1364 * events should be put onto a queue and processed later.
1366 mfi_decode_evt(sc, detail);
1367 seq = detail->seq + 1;
1368 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1369 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1371 lwkt_gettoken(&proc_token);
1372 ksignal(mfi_aen_entry->p, SIGIO);
1373 lwkt_reltoken(&proc_token);
1374 kfree(mfi_aen_entry, M_MFIBUF);
1378 kfree(cm->cm_data, M_MFIBUF);
1379 sc->mfi_aen_cm = NULL;
1380 wakeup(&sc->mfi_aen_cm);
1381 mfi_release_command(cm);
1383 /* set it up again so the driver can catch more events */
1385 mfi_aen_setup(sc, seq);
1389 #define MAX_EVENTS 15
1392 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1394 struct mfi_command *cm;
1395 struct mfi_dcmd_frame *dcmd;
1396 struct mfi_evt_list *el;
1397 union mfi_evt class_locale;
1398 int error, i, seq, size;
1399 uint32_t context = 0;
1401 class_locale.members.reserved = 0;
1402 class_locale.members.locale = mfi_event_locale;
1403 class_locale.members.evt_class = mfi_event_class;
1405 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1407 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1411 for (seq = start_seq;;) {
1412 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1413 kfree(el, M_MFIBUF);
1417 /* Zero out the MFI frame */
1418 context = cm->cm_frame->header.context;
1419 bzero(cm->cm_frame, sizeof(union mfi_frame));
1420 cm->cm_frame->header.context = context;
1422 dcmd = &cm->cm_frame->dcmd;
1423 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1424 dcmd->header.cmd = MFI_CMD_DCMD;
1425 dcmd->header.timeout = 0;
1426 dcmd->header.data_len = size;
1427 dcmd->header.scsi_status = 0;
1428 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1429 ((uint32_t *)&dcmd->mbox)[0] = seq;
1430 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1431 cm->cm_sg = &dcmd->sgl;
1432 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1433 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1437 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1438 device_printf(sc->mfi_dev,
1439 "Failed to get controller entries\n");
1440 mfi_release_command(cm);
1444 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1445 BUS_DMASYNC_POSTREAD);
1446 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1448 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1449 mfi_release_command(cm);
1452 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1453 device_printf(sc->mfi_dev,
1454 "Error %d fetching controller entries\n",
1455 dcmd->header.cmd_status);
1456 mfi_release_command(cm);
1459 mfi_release_command(cm);
1461 for (i = 0; i < el->count; i++) {
1463 * If this event is newer than 'stop_seq' then
1464 * break out of the loop. Note that the log
1465 * is a circular buffer so we have to handle
1466 * the case that our stop point is earlier in
1467 * the buffer than our start point.
1469 if (el->event[i].seq >= stop_seq) {
1470 if (start_seq <= stop_seq)
1472 else if (el->event[i].seq < start_seq)
1475 mfi_decode_evt(sc, &el->event[i]);
1477 seq = el->event[el->count - 1].seq + 1;
1480 kfree(el, M_MFIBUF);
1485 mfi_add_ld(struct mfi_softc *sc, int id)
1487 struct mfi_command *cm;
1488 struct mfi_dcmd_frame *dcmd = NULL;
1489 struct mfi_ld_info *ld_info = NULL;
1492 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1494 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1495 (void **)&ld_info, sizeof(*ld_info));
1497 device_printf(sc->mfi_dev,
1498 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1500 kfree(ld_info, M_MFIBUF);
1503 cm->cm_flags = MFI_CMD_DATAIN;
1504 dcmd = &cm->cm_frame->dcmd;
1506 if (mfi_wait_command(sc, cm) != 0) {
1507 device_printf(sc->mfi_dev,
1508 "Failed to get logical drive: %d\n", id);
1509 kfree(ld_info, M_MFIBUF);
1512 if (ld_info->ld_config.params.isSSCD != 1) {
1513 mfi_add_ld_complete(cm);
1515 mfi_release_command(cm);
1516 if(ld_info) /* SSCD drives ld_info free here */
1517 kfree(ld_info, M_MFIBUF);
1523 mfi_add_ld_complete(struct mfi_command *cm)
1525 struct mfi_frame_header *hdr;
1526 struct mfi_ld_info *ld_info;
1527 struct mfi_softc *sc;
1531 hdr = &cm->cm_frame->header;
1532 ld_info = cm->cm_private;
1534 if (hdr->cmd_status != MFI_STAT_OK) {
1535 kfree(ld_info, M_MFIBUF);
1536 mfi_release_command(cm);
1539 mfi_release_command(cm);
1541 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1543 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1544 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1545 kfree(ld_info, M_MFIBUF);
1547 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1551 device_set_ivars(child, ld_info);
1552 device_set_desc(child, "MFI Logical Disk");
1553 bus_generic_attach(sc->mfi_dev);
1555 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1559 mfi_add_sys_pd(struct mfi_softc *sc,int id)
1561 struct mfi_command *cm;
1562 struct mfi_dcmd_frame *dcmd = NULL;
1563 struct mfi_pd_info *pd_info = NULL;
1566 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1568 error = mfi_dcmd_command(sc,&cm,MFI_DCMD_PD_GET_INFO,
1569 (void **)&pd_info, sizeof(*pd_info));
1571 device_printf(sc->mfi_dev,
1572 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error);
1574 kfree(pd_info,M_MFIBUF);
1577 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1578 dcmd = &cm->cm_frame->dcmd;
1580 dcmd->header.scsi_status = 0;
1581 dcmd->header.pad0 = 0;
1582 if (mfi_mapcmd(sc, cm) != 0) {
1583 device_printf(sc->mfi_dev,
1584 "Failed to get physical drive info %d\n", id);
1585 kfree(pd_info,M_MFIBUF);
1588 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1589 BUS_DMASYNC_POSTREAD);
1590 bus_dmamap_unload(sc->mfi_buffer_dmat,cm->cm_dmamap);
1591 mfi_add_sys_pd_complete(cm);
1596 mfi_add_sys_pd_complete(struct mfi_command *cm)
1598 struct mfi_frame_header *hdr;
1599 struct mfi_pd_info *pd_info;
1600 struct mfi_softc *sc;
1604 hdr = &cm->cm_frame->header;
1605 pd_info = cm->cm_private;
1607 if (hdr->cmd_status != MFI_STAT_OK) {
1608 kfree(pd_info, M_MFIBUF);
1609 mfi_release_command(cm);
1612 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1613 device_printf(sc->mfi_dev,"PD=%x is not SYSTEM PD\n",
1614 pd_info->ref.v.device_id);
1615 kfree(pd_info, M_MFIBUF);
1616 mfi_release_command(cm);
1619 mfi_release_command(cm);
1621 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1623 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1624 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1625 kfree(pd_info, M_MFIBUF);
1627 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1631 device_set_ivars(child, pd_info);
1632 device_set_desc(child, "MFI System PD");
1633 bus_generic_attach(sc->mfi_dev);
1635 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1638 static struct mfi_command *
1639 mfi_bio_command(struct mfi_softc *sc)
1642 struct mfi_command *cm = NULL;
1643 struct mfi_disk *mfid;
1645 /* reserving two commands to avoid starvation for IOCTL */
1646 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1648 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1650 mfid = bio->bio_driver_info;
1651 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1652 cm = mfi_build_syspdio(sc, bio);
1654 cm = mfi_build_ldio(sc, bio);
1656 mfi_enqueue_bio(sc,bio);
1660 static struct mfi_command *
1661 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1663 struct mfi_command *cm;
1665 struct mfi_system_pd *disk;
1666 struct mfi_pass_frame *pass;
1667 int flags = 0,blkcount = 0;
1668 uint32_t context = 0;
1670 if ((cm = mfi_dequeue_free(sc)) == NULL)
1673 /* Zero out the MFI frame */
1674 context = cm->cm_frame->header.context;
1675 bzero(cm->cm_frame, sizeof(union mfi_frame));
1676 cm->cm_frame->header.context = context;
1678 pass = &cm->cm_frame->pass;
1679 bzero(pass->cdb, 16);
1680 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1681 switch (bp->b_cmd & 0x03) {
1683 pass->cdb[0] = READ_10;
1684 flags = MFI_CMD_DATAIN;
1687 pass->cdb[0] = WRITE_10;
1688 flags = MFI_CMD_DATAOUT;
1691 panic("Invalid bio command");
1694 /* Cheat with the sector length to avoid a non-constant division */
1695 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1696 disk = bio->bio_driver_info;
1697 /* Fill the LBA and Transfer length in CDB */
1698 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
1699 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
1700 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
1701 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
1702 pass->cdb[7] = (blkcount & 0xff00) >> 8;
1703 pass->cdb[8] = (blkcount & 0x00ff);
1704 pass->header.target_id = disk->pd_id;
1705 pass->header.timeout = 0;
1706 pass->header.flags = 0;
1707 pass->header.scsi_status = 0;
1708 pass->header.sense_len = MFI_SENSE_LEN;
1709 pass->header.data_len = bp->b_bcount;
1710 pass->header.cdb_len = 10;
1711 #if defined(__x86_64__)
1712 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1713 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1715 pass->sense_addr_lo = cm->cm_sense_busaddr;
1716 pass->sense_addr_hi = 0;
1718 cm->cm_complete = mfi_bio_complete;
1719 cm->cm_private = bio;
1720 cm->cm_data = bp->b_data;
1721 cm->cm_len = bp->b_bcount;
1722 cm->cm_sg = &pass->sgl;
1723 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
1724 cm->cm_flags = flags;
1728 static struct mfi_command *
1729 mfi_build_ldio(struct mfi_softc *sc,struct bio *bio)
1731 struct mfi_io_frame *io;
1733 struct mfi_disk *disk;
1734 struct mfi_command *cm;
1735 int flags, blkcount;
1736 uint32_t context = 0;
1738 if ((cm = mfi_dequeue_free(sc)) == NULL)
1741 /* Zero out the MFI frame */
1742 context = cm->cm_frame->header.context;
1743 bzero(cm->cm_frame,sizeof(union mfi_frame));
1744 cm->cm_frame->header.context = context;
1746 io = &cm->cm_frame->io;
1747 switch (bp->b_cmd & 0x03) {
1749 io->header.cmd = MFI_CMD_LD_READ;
1750 flags = MFI_CMD_DATAIN;
1753 io->header.cmd = MFI_CMD_LD_WRITE;
1754 flags = MFI_CMD_DATAOUT;
1757 panic("Invalid bio command");
1760 /* Cheat with the sector length to avoid a non-constant division */
1761 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1762 disk = bio->bio_driver_info;
1763 io->header.target_id = disk->ld_id;
1764 io->header.timeout = 0;
1765 io->header.flags = 0;
1766 io->header.scsi_status = 0;
1767 io->header.sense_len = MFI_SENSE_LEN;
1768 io->header.data_len = blkcount;
1769 #if defined(__x86_64__)
1770 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1771 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1773 io->sense_addr_lo = cm->cm_sense_busaddr;
1774 io->sense_addr_hi = 0;
1776 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
1777 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
1778 cm->cm_complete = mfi_bio_complete;
1779 cm->cm_private = bio;
1780 cm->cm_data = bp->b_data;
1781 cm->cm_len = bp->b_bcount;
1782 cm->cm_sg = &io->sgl;
1783 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1784 cm->cm_flags = flags;
1789 mfi_bio_complete(struct mfi_command *cm)
1793 struct mfi_frame_header *hdr;
1794 struct mfi_softc *sc;
1796 bio = cm->cm_private;
1798 hdr = &cm->cm_frame->header;
1801 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1802 bp->b_flags |= B_ERROR;
1804 device_printf(sc->mfi_dev, "I/O error, status= %d "
1805 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1806 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1807 } else if (cm->cm_error != 0) {
1808 bp->b_flags |= B_ERROR;
1811 mfi_release_command(cm);
1812 mfi_disk_complete(bio);
1816 mfi_startio(struct mfi_softc *sc)
1818 struct mfi_command *cm;
1819 struct ccb_hdr *ccbh;
1822 /* Don't bother if we're short on resources */
1823 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1826 /* Try a command that has already been prepared */
1827 cm = mfi_dequeue_ready(sc);
1830 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1831 cm = sc->mfi_cam_start(ccbh);
1834 /* Nope, so look for work on the bioq */
1836 cm = mfi_bio_command(sc);
1838 /* No work available, so exit */
1842 /* Send the command to the controller */
1843 if (mfi_mapcmd(sc, cm) != 0) {
1844 mfi_requeue_ready(cm);
1851 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1855 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1857 if (cm->cm_data != NULL) {
1858 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1859 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1860 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1861 if (error == EINPROGRESS) {
1862 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1866 error = mfi_send_frame(sc, cm);
1873 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1875 struct mfi_frame_header *hdr;
1876 struct mfi_command *cm;
1878 struct mfi_softc *sc;
1883 cm = (struct mfi_command *)arg;
1885 hdr = &cm->cm_frame->header;
1889 kprintf("error %d in callback\n", error);
1890 cm->cm_error = error;
1891 mfi_complete(sc, cm);
1895 /* Use IEEE sgl only for IO's on a SKINNY controller
1896 * For other commands on a SKINNY controller use either
1897 * sg32 or sg64 based on the sizeof(bus_addr_t).
1898 * Also calculate the total frame size based on the type
1901 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
1902 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
1903 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
1904 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
1905 for (i = 0; i < nsegs; i++) {
1906 sgl->sg_skinny[i].addr = segs[i].ds_addr;
1907 sgl->sg_skinny[i].len = segs[i].ds_len;
1908 sgl->sg_skinny[i].flag = 0;
1910 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1912 sge_size = sizeof(struct mfi_sg_skinny);
1915 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1916 for (i = 0; i < nsegs; i++) {
1917 sgl->sg32[i].addr = segs[i].ds_addr;
1918 sgl->sg32[i].len = segs[i].ds_len;
1920 sge_size = sizeof(struct mfi_sg32);
1922 for (i = 0; i < nsegs; i++) {
1923 sgl->sg64[i].addr = segs[i].ds_addr;
1924 sgl->sg64[i].len = segs[i].ds_len;
1926 hdr->flags |= MFI_FRAME_SGL64;
1927 sge_size = sizeof(struct mfi_sg64);
1930 hdr->sg_count = nsegs;
1933 if (cm->cm_flags & MFI_CMD_DATAIN) {
1934 dir |= BUS_DMASYNC_PREREAD;
1935 hdr->flags |= MFI_FRAME_DIR_READ;
1937 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1938 dir |= BUS_DMASYNC_PREWRITE;
1939 hdr->flags |= MFI_FRAME_DIR_WRITE;
1941 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1942 cm->cm_flags |= MFI_CMD_MAPPED;
1945 * Instead of calculating the total number of frames in the
1946 * compound frame, it's already assumed that there will be at
1947 * least 1 frame, so don't compensate for the modulo of the
1948 * following division.
1950 cm->cm_total_frame_size += (sge_size * nsegs);
1951 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1953 mfi_send_frame(sc, cm);
1957 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1959 struct mfi_frame_header *hdr;
1960 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1962 hdr = &cm->cm_frame->header;
1964 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1965 cm->cm_timestamp = time_second;
1966 mfi_enqueue_busy(cm);
1968 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1969 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1973 * The bus address of the command is aligned on a 64 byte boundary,
1974 * leaving the least 6 bits as zero. For whatever reason, the
1975 * hardware wants the address shifted right by three, leaving just
1976 * 3 zero bits. These three bits are then used as a prefetching
1977 * hint for the hardware to predict how many frames need to be
1978 * fetched across the bus. If a command has more than 8 frames
1979 * then the 3 bits are set to 0x7 and the firmware uses other
1980 * information in the command to determine the total amount to fetch.
1981 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1982 * is enough for both 32bit and 64bit systems.
1984 if (cm->cm_extra_frames > 7)
1985 cm->cm_extra_frames = 7;
1987 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1989 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1992 /* This is a polled command, so busy-wait for it to complete. */
1993 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2000 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2001 device_printf(sc->mfi_dev, "Frame %p timed out "
2002 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2010 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2014 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2016 if (cm->cm_flags & MFI_CMD_DATAIN)
2017 dir |= BUS_DMASYNC_POSTREAD;
2018 if (cm->cm_flags & MFI_CMD_DATAOUT)
2019 dir |= BUS_DMASYNC_POSTWRITE;
2021 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2022 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2023 cm->cm_flags &= ~MFI_CMD_MAPPED;
2026 cm->cm_flags |= MFI_CMD_COMPLETED;
2028 if (cm->cm_complete != NULL)
2029 cm->cm_complete(cm);
2035 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2037 struct mfi_command *cm;
2038 struct mfi_abort_frame *abort;
2040 uint32_t context = 0;
2042 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2044 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2048 /* Zero out the MFI frame */
2049 context = cm->cm_frame->header.context;
2050 bzero(cm->cm_frame, sizeof(union mfi_frame));
2051 cm->cm_frame->header.context = context;
2053 abort = &cm->cm_frame->abort;
2054 abort->header.cmd = MFI_CMD_ABORT;
2055 abort->header.flags = 0;
2056 abort->header.scsi_status = 0;
2057 abort->abort_context = cm_abort->cm_frame->header.context;
2058 #if defined(__x86_64__)
2059 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr & 0xFFFFFFFF;
2060 abort->abort_mfi_addr_hi = (cm_abort->cm_frame_busaddr & 0xFFFFFFFF00000000 ) >> 32 ;
2062 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
2063 abort->abort_mfi_addr_hi = 0;
2066 cm->cm_flags = MFI_CMD_POLLED;
2068 sc->mfi_aen_cm->cm_aen_abort = 1;
2070 mfi_release_command(cm);
2072 while (i < 5 && sc->mfi_aen_cm != NULL) {
2073 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
2081 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
2083 struct mfi_command *cm;
2084 struct mfi_io_frame *io;
2086 uint32_t context = 0;
2088 if ((cm = mfi_dequeue_free(sc)) == NULL)
2091 /* Zero out the MFI frame */
2092 context = cm->cm_frame->header.context;
2093 bzero(cm->cm_frame, sizeof(union mfi_frame));
2094 cm->cm_frame->header.context = context;
2096 io = &cm->cm_frame->io;
2097 io->header.cmd = MFI_CMD_LD_WRITE;
2098 io->header.target_id = id;
2099 io->header.timeout = 0;
2100 io->header.flags = 0;
2101 io->header.scsi_status = 0;
2102 io->header.sense_len = MFI_SENSE_LEN;
2103 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2104 #if defined(__x86_64__)
2105 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2106 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2108 io->sense_addr_lo = cm->cm_sense_busaddr;
2109 io->sense_addr_hi = 0;
2111 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2112 io->lba_lo = lba & 0xffffffff;
2115 cm->cm_sg = &io->sgl;
2116 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2117 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2119 error = mfi_mapcmd(sc, cm);
2120 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2121 BUS_DMASYNC_POSTWRITE);
2122 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2123 mfi_release_command(cm);
2129 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2132 struct mfi_command *cm;
2133 struct mfi_pass_frame *pass;
2137 if ((cm = mfi_dequeue_free(sc)) == NULL)
2140 pass = &cm->cm_frame->pass;
2141 bzero(pass->cdb, 16);
2142 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2143 pass->cdb[0] = WRITE_10;
2144 pass->cdb[2] = (lba & 0xff000000) >> 24;
2145 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2146 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2147 pass->cdb[5] = (lba & 0x000000ff);
2148 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2149 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2150 pass->cdb[8] = (blkcount & 0x00ff);
2151 pass->header.target_id = id;
2152 pass->header.timeout = 0;
2153 pass->header.flags = 0;
2154 pass->header.scsi_status = 0;
2155 pass->header.sense_len = MFI_SENSE_LEN;
2156 pass->header.data_len = len;
2157 pass->header.cdb_len = 10;
2158 #if defined(__x86_64__)
2159 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2160 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2162 pass->sense_addr_lo = cm->cm_sense_busaddr;
2163 pass->sense_addr_hi = 0;
2167 cm->cm_sg = &pass->sgl;
2168 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2169 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2171 error = mfi_mapcmd(sc, cm);
2172 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2173 BUS_DMASYNC_POSTWRITE);
2174 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2175 mfi_release_command(cm);
2181 mfi_open(struct dev_open_args *ap)
2183 cdev_t dev = ap->a_head.a_dev;
2184 struct mfi_softc *sc;
2189 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2190 if (sc->mfi_detaching)
2193 sc->mfi_flags |= MFI_FLAGS_OPEN;
2196 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2202 mfi_close(struct dev_close_args *ap)
2204 cdev_t dev = ap->a_head.a_dev;
2205 struct mfi_softc *sc;
2206 struct mfi_aen *mfi_aen_entry, *tmp;
2210 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2211 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2213 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2214 if (mfi_aen_entry->p == curproc) {
2215 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2217 kfree(mfi_aen_entry, M_MFIBUF);
2220 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2225 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2229 case MFI_DCMD_LD_DELETE:
2230 case MFI_DCMD_CFG_ADD:
2231 case MFI_DCMD_CFG_CLEAR:
2232 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2240 mfi_config_unlock(struct mfi_softc *sc, int locked)
2244 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2247 /* Perform pre-issue checks on commands from userland and possibly veto them. */
2249 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2251 struct mfi_disk *ld, *ld2;
2253 struct mfi_system_pd *syspd = NULL;
2257 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2259 switch (cm->cm_frame->dcmd.opcode) {
2260 case MFI_DCMD_LD_DELETE:
2261 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2262 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2268 error = mfi_disk_disable(ld);
2270 case MFI_DCMD_CFG_CLEAR:
2271 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2272 error = mfi_disk_disable(ld);
2277 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2280 mfi_disk_enable(ld2);
2284 case MFI_DCMD_PD_STATE_SET:
2285 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2287 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2288 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2289 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2290 if (syspd->pd_id == syspd_id)
2298 error = mfi_syspd_disable(syspd);
2306 /* Perform post-issue checks on commands from userland. */
2308 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2310 struct mfi_disk *ld, *ldn;
2311 struct mfi_system_pd *syspd = NULL;
2315 switch (cm->cm_frame->dcmd.opcode) {
2316 case MFI_DCMD_LD_DELETE:
2317 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2318 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2321 KASSERT(ld != NULL, ("volume dissappeared"));
2322 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2323 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2325 device_delete_child(sc->mfi_dev, ld->ld_dev);
2327 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2329 mfi_disk_enable(ld);
2331 case MFI_DCMD_CFG_CLEAR:
2332 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2333 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2335 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2336 device_delete_child(sc->mfi_dev, ld->ld_dev);
2339 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2341 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2342 mfi_disk_enable(ld);
2345 case MFI_DCMD_CFG_ADD:
2348 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2351 case MFI_DCMD_PD_STATE_SET:
2352 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2354 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2355 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2356 TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
2357 if (syspd->pd_id == syspd_id)
2364 /* If the transition fails then enable the syspd again */
2365 if(syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2366 mfi_syspd_enable(syspd);
2372 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2374 struct mfi_command *cm;
2375 struct mfi_dcmd_frame *dcmd;
2376 void *ioc_buf = NULL;
2378 int error = 0, locked;
2381 if (ioc->buf_size > 0) {
2382 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2383 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2385 device_printf(sc->mfi_dev, "failed to copyin\n");
2386 kfree(ioc_buf, M_MFIBUF);
2391 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2393 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2394 while ((cm = mfi_dequeue_free(sc)) == NULL)
2395 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2397 /* Save context for later */
2398 context = cm->cm_frame->header.context;
2400 dcmd = &cm->cm_frame->dcmd;
2401 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2403 cm->cm_sg = &dcmd->sgl;
2404 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2405 cm->cm_data = ioc_buf;
2406 cm->cm_len = ioc->buf_size;
2408 /* restore context */
2409 cm->cm_frame->header.context = context;
2411 /* Cheat since we don't know if we're writing or reading */
2412 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2414 error = mfi_check_command_pre(sc, cm);
2418 error = mfi_wait_command(sc, cm);
2420 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2423 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2424 mfi_check_command_post(sc, cm);
2426 mfi_release_command(cm);
2427 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2428 mfi_config_unlock(sc, locked);
2429 if (ioc->buf_size > 0)
2430 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2432 kfree(ioc_buf, M_MFIBUF);
2437 #define PTRIN(p) ((void *)(uintptr_t)(p))
2439 #define PTRIN(p) (p)
2443 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2445 struct mfi_config_data *conf_data = cm->cm_data;
2446 struct mfi_command *ld_cm = NULL;
2447 struct mfi_ld_info *ld_info = NULL;
2450 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2451 (conf_data->ld[0].params.isSSCD == 1)) {
2453 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2454 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2455 (void **)&ld_info, sizeof(*ld_info));
2457 device_printf(sc->mfi_dev,"Failed to allocate "
2458 "MFI_DCMD_LD_GET_INFO %d", error);
2460 kfree(ld_info, M_MFIBUF);
2463 ld_cm->cm_flags = MFI_CMD_DATAIN;
2464 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2465 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2466 if (mfi_wait_command(sc, ld_cm) != 0) {
2467 device_printf(sc->mfi_dev, "failed to get log drv\n");
2468 mfi_release_command(ld_cm);
2469 kfree(ld_info, M_MFIBUF);
2473 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2474 kfree(ld_info, M_MFIBUF);
2475 mfi_release_command(ld_cm);
2478 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2481 if (ld_info->ld_config.params.isSSCD == 1)
2484 mfi_release_command(ld_cm);
2485 kfree(ld_info, M_MFIBUF);
2491 mfi_ioctl(struct dev_ioctl_args *ap)
2493 cdev_t dev = ap->a_head.a_dev;
2494 u_long cmd = ap->a_cmd;
2495 int flag = ap->a_fflag;
2496 caddr_t arg = ap->a_data;
2497 struct mfi_softc *sc;
2498 union mfi_statrequest *ms;
2499 struct mfi_ioc_packet *ioc;
2501 struct mfi_ioc_packet32 *ioc32;
2503 struct mfi_ioc_aen *aen;
2504 struct mfi_command *cm = NULL;
2506 union mfi_sense_ptr sense_ptr;
2507 uint8_t *data = NULL, *temp, skip_pre_post = 0;
2509 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2511 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2512 struct mfi_ioc_passthru iop_swab;
2521 ms = (union mfi_statrequest *)arg;
2522 switch (ms->ms_item) {
2527 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2528 sizeof(struct mfi_qstat));
2535 case MFIIO_QUERY_DISK:
2537 struct mfi_query_disk *qd;
2538 struct mfi_disk *ld;
2540 qd = (struct mfi_query_disk *)arg;
2541 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2542 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2543 if (ld->ld_id == qd->array_id)
2548 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2552 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2554 bzero(qd->devname, SPECNAMELEN + 1);
2555 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2556 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2564 devclass_t devclass;
2565 ioc = (struct mfi_ioc_packet *)arg;
2568 adapter = ioc->mfi_adapter_no;
2569 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2570 devclass = devclass_find("mfi");
2571 sc = devclass_get_softc(devclass, adapter);
2573 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2574 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2575 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2578 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2582 * save off original context since copying from user
2583 * will clobber some data
2585 context = cm->cm_frame->header.context;
2587 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2588 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2589 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2590 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2591 cm->cm_frame->header.scsi_status = 0;
2592 cm->cm_frame->header.pad0 = 0;
2593 if (ioc->mfi_sge_count) {
2595 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2598 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2599 cm->cm_flags |= MFI_CMD_DATAIN;
2600 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2601 cm->cm_flags |= MFI_CMD_DATAOUT;
2602 /* Legacy app shim */
2603 if (cm->cm_flags == 0)
2604 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2605 cm->cm_len = cm->cm_frame->header.data_len;
2607 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2608 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2610 if (cm->cm_data == NULL) {
2611 device_printf(sc->mfi_dev, "Malloc failed\n");
2618 /* restore header context */
2619 cm->cm_frame->header.context = context;
2622 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2623 for (i = 0; i < ioc->mfi_sge_count; i++) {
2625 if (cmd == MFI_CMD) {
2627 error = copyin(ioc->mfi_sgl[i].iov_base,
2629 ioc->mfi_sgl[i].iov_len);
2633 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2635 PTRIN(ioc32->mfi_sgl[i].iov_base);
2636 error = copyin(temp_convert,
2638 ioc32->mfi_sgl[i].iov_len);
2641 error = copyin(ioc->mfi_sgl[i].iov_base,
2643 ioc->mfi_sgl[i].iov_len);
2646 device_printf(sc->mfi_dev,
2647 "Copy in failed\n");
2650 temp = &temp[ioc->mfi_sgl[i].iov_len];
2654 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2655 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2657 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2658 #if defined(__x86_64__)
2659 cm->cm_frame->pass.sense_addr_lo =
2660 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2661 cm->cm_frame->pass.sense_addr_hi =
2662 (cm->cm_sense_busaddr& 0xFFFFFFFF00000000) >> 32;
2664 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2665 cm->cm_frame->pass.sense_addr_hi = 0;
2669 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2670 skip_pre_post = mfi_check_for_sscd(sc, cm);
2671 if (!skip_pre_post) {
2672 error = mfi_check_command_pre(sc, cm);
2674 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2679 if ((error = mfi_wait_command(sc, cm)) != 0) {
2680 device_printf(sc->mfi_dev,
2681 "Controller polled failed\n");
2682 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2687 mfi_check_command_post(sc, cm);
2688 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2691 if (cm->cm_flags & MFI_CMD_DATAIN) {
2692 for (i = 0; i < ioc->mfi_sge_count; i++) {
2694 if (cmd == MFI_CMD) {
2696 error = copyout(temp,
2697 ioc->mfi_sgl[i].iov_base,
2698 ioc->mfi_sgl[i].iov_len);
2702 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2704 PTRIN(ioc32->mfi_sgl[i].iov_base);
2705 error = copyout(temp,
2707 ioc32->mfi_sgl[i].iov_len);
2710 error = copyout(temp,
2711 ioc->mfi_sgl[i].iov_base,
2712 ioc->mfi_sgl[i].iov_len);
2715 device_printf(sc->mfi_dev,
2716 "Copy out failed\n");
2719 temp = &temp[ioc->mfi_sgl[i].iov_len];
2723 if (ioc->mfi_sense_len) {
2724 /* get user-space sense ptr then copy out sense */
2725 bcopy(&((struct mfi_ioc_packet*)arg)
2726 ->mfi_frame.raw[ioc->mfi_sense_off],
2727 &sense_ptr.sense_ptr_data[0],
2728 sizeof(sense_ptr.sense_ptr_data));
2730 if (cmd != MFI_CMD) {
2732 * not 64bit native so zero out any address
2734 sense_ptr.addr.high = 0;
2737 error = copyout(cm->cm_sense, sense_ptr.user_space,
2738 ioc->mfi_sense_len);
2740 device_printf(sc->mfi_dev,
2741 "Copy out failed\n");
2746 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2748 mfi_config_unlock(sc, locked);
2750 kfree(data, M_MFIBUF);
2752 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2753 mfi_release_command(cm);
2754 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2760 aen = (struct mfi_ioc_aen *)arg;
2761 error = mfi_aen_register(sc, aen->aen_seq_num,
2762 aen->aen_class_locale);
2765 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2767 devclass_t devclass;
2768 struct mfi_linux_ioc_packet l_ioc;
2771 devclass = devclass_find("mfi");
2772 if (devclass == NULL)
2775 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2778 adapter = l_ioc.lioc_adapter_no;
2779 sc = devclass_get_softc(devclass, adapter);
2782 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2786 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2788 devclass_t devclass;
2789 struct mfi_linux_ioc_aen l_aen;
2792 devclass = devclass_find("mfi");
2793 if (devclass == NULL)
2796 error = copyin(arg, &l_aen, sizeof(l_aen));
2799 adapter = l_aen.laen_adapter_no;
2800 sc = devclass_get_softc(devclass, adapter);
2803 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2808 case MFIIO_PASSTHRU32:
2809 iop_swab.ioc_frame = iop32->ioc_frame;
2810 iop_swab.buf_size = iop32->buf_size;
2811 iop_swab.buf = PTRIN(iop32->buf);
2815 case MFIIO_PASSTHRU:
2816 error = mfi_user_command(sc, iop);
2818 if (cmd == MFIIO_PASSTHRU32)
2819 iop32->ioc_frame = iop_swab.ioc_frame;
2823 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2832 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
2834 struct mfi_softc *sc;
2835 struct mfi_linux_ioc_packet l_ioc;
2836 struct mfi_linux_ioc_aen l_aen;
2837 struct mfi_command *cm = NULL;
2838 struct mfi_aen *mfi_aen_entry;
2839 union mfi_sense_ptr sense_ptr;
2841 uint8_t *data = NULL, *temp;
2848 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2849 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2853 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2857 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2858 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2859 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2862 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2866 * save off original context since copying from user
2867 * will clobber some data
2869 context = cm->cm_frame->header.context;
2871 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2872 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2873 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2874 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2875 cm->cm_frame->header.scsi_status = 0;
2876 cm->cm_frame->header.pad0 = 0;
2877 if (l_ioc.lioc_sge_count)
2879 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2881 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2882 cm->cm_flags |= MFI_CMD_DATAIN;
2883 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2884 cm->cm_flags |= MFI_CMD_DATAOUT;
2885 cm->cm_len = cm->cm_frame->header.data_len;
2887 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2888 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2890 if (cm->cm_data == NULL) {
2891 device_printf(sc->mfi_dev, "Malloc failed\n");
2898 /* restore header context */
2899 cm->cm_frame->header.context = context;
2902 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2903 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2904 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2906 l_ioc.lioc_sgl[i].iov_len);
2908 device_printf(sc->mfi_dev,
2909 "Copy in failed\n");
2912 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2916 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2917 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2919 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2920 #if defined(__x86_64__)
2921 cm->cm_frame->pass.sense_addr_lo =
2922 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2923 cm->cm_frame->pass.sense_addr_hi =
2924 (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
2926 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2927 cm->cm_frame->pass.sense_addr_hi = 0;
2931 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2932 error = mfi_check_command_pre(sc, cm);
2934 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2938 if ((error = mfi_wait_command(sc, cm)) != 0) {
2939 device_printf(sc->mfi_dev,
2940 "Controller polled failed\n");
2941 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2945 mfi_check_command_post(sc, cm);
2946 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2949 if (cm->cm_flags & MFI_CMD_DATAIN) {
2950 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2951 error = copyout(temp,
2952 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2953 l_ioc.lioc_sgl[i].iov_len);
2955 device_printf(sc->mfi_dev,
2956 "Copy out failed\n");
2959 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2963 if (l_ioc.lioc_sense_len) {
2964 /* get user-space sense ptr then copy out sense */
2965 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2966 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2967 &sense_ptr.sense_ptr_data[0],
2968 sizeof(sense_ptr.sense_ptr_data));
2971 * only 32bit Linux support so zero out any
2972 * address over 32bit
2974 sense_ptr.addr.high = 0;
2976 error = copyout(cm->cm_sense, sense_ptr.user_space,
2977 l_ioc.lioc_sense_len);
2979 device_printf(sc->mfi_dev,
2980 "Copy out failed\n");
2985 error = copyout(&cm->cm_frame->header.cmd_status,
2986 &((struct mfi_linux_ioc_packet*)arg)
2987 ->lioc_frame.hdr.cmd_status,
2990 device_printf(sc->mfi_dev,
2991 "Copy out failed\n");
2996 mfi_config_unlock(sc, locked);
2998 kfree(data, M_MFIBUF);
3000 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3001 mfi_release_command(cm);
3002 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3006 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3007 error = copyin(arg, &l_aen, sizeof(l_aen));
3010 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3011 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3013 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3014 if (mfi_aen_entry != NULL) {
3015 mfi_aen_entry->p = curproc;
3016 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3019 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3020 l_aen.laen_class_locale);
3023 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3025 kfree(mfi_aen_entry, M_MFIBUF);
3027 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3031 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3040 mfi_kqfilter(struct dev_kqfilter_args *ap)
3042 cdev_t dev = ap->a_head.a_dev;
3043 struct knote *kn = ap->a_kn;
3044 struct mfi_softc *sc;
3045 struct klist *klist;
3050 switch (kn->kn_filter) {
3052 kn->kn_fop = &mfi_read_filterops;
3053 kn->kn_hook = (caddr_t)sc;
3056 kn->kn_fop = &mfi_write_filterops;
3057 kn->kn_hook = (caddr_t)sc;
3060 ap->a_result = EOPNOTSUPP;
3064 klist = &sc->mfi_kq.ki_note;
3065 knote_insert(klist, kn);
3071 mfi_filter_detach(struct knote *kn)
3073 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3074 struct klist *klist = &sc->mfi_kq.ki_note;
3076 knote_remove(klist, kn);
3080 mfi_filter_read(struct knote *kn, long hint)
3082 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3085 if (sc->mfi_aen_triggered != 0) {
3087 sc->mfi_aen_triggered = 0;
3089 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3090 kn->kn_flags |= EV_ERROR;
3093 sc->mfi_poll_waiting = 1;
3099 mfi_filter_write(struct knote *kn, long hint)
3107 struct mfi_softc *sc;
3108 struct mfi_command *cm;
3114 dc = devclass_find("mfi");
3116 kprintf("No mfi dev class\n");
3120 for (i = 0; ; i++) {
3121 sc = devclass_get_softc(dc, i);
3124 device_printf(sc->mfi_dev, "Dumping\n\n");
3126 deadline = time_second - MFI_CMD_TIMEOUT;
3127 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3128 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3129 if (cm->cm_timestamp < deadline) {
3130 device_printf(sc->mfi_dev,
3131 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3132 (int)(time_second - cm->cm_timestamp));
3143 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3150 mfi_timeout(void *data)
3152 struct mfi_softc *sc = (struct mfi_softc *)data;
3153 struct mfi_command *cm;
3157 deadline = time_second - MFI_CMD_TIMEOUT;
3158 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3159 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3160 if (sc->mfi_aen_cm == cm)
3162 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3163 device_printf(sc->mfi_dev,
3164 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3165 (int)(time_second - cm->cm_timestamp));
3167 MFI_VALIDATE_CMD(sc, cm);
3177 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3179 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,