2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
57 * Copyright 1994-2009 The FreeBSD Project.
58 * All rights reserved.
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
66 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
68 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
69 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
70 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
71 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
72 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
73 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
74 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
75 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
76 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 * The views and conclusions contained in the software and documentation
79 * are those of the authors and should not be interpreted as representing
80 * official policies,either expressed or implied, of the FreeBSD Project.
82 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/sysctl.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
93 #include <sys/eventhandler.h>
95 #include <sys/bus_dma.h>
99 #include <sys/signalvar.h>
100 #include <sys/device.h>
101 #include <sys/mplock2.h>
103 #include <bus/cam/scsi/scsi_all.h>
105 #include <bus/pci/pcivar.h>
107 #include <dev/raid/mfi/mfireg.h>
108 #include <dev/raid/mfi/mfi_ioctl.h>
109 #include <dev/raid/mfi/mfivar.h>
111 static int mfi_alloc_commands(struct mfi_softc *);
112 static int mfi_comms_init(struct mfi_softc *);
113 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
114 static int mfi_get_controller_info(struct mfi_softc *);
115 static int mfi_get_log_state(struct mfi_softc *,
116 struct mfi_evt_log_state **);
117 static int mfi_parse_entries(struct mfi_softc *, int, int);
118 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
119 uint32_t, void **, size_t);
120 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
121 static void mfi_startup(void *arg);
122 static void mfi_intr(void *arg);
123 static void mfi_ldprobe(struct mfi_softc *sc);
124 static void mfi_syspdprobe(struct mfi_softc *sc);
125 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
126 static void mfi_aen_complete(struct mfi_command *);
127 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
128 static int mfi_add_ld(struct mfi_softc *sc, int);
129 static void mfi_add_ld_complete(struct mfi_command *);
130 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
131 static void mfi_add_sys_pd_complete(struct mfi_command *);
132 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
133 static void mfi_bio_complete(struct mfi_command *);
134 static struct mfi_command * mfi_build_ldio(struct mfi_softc *,struct bio*);
135 static struct mfi_command * mfi_build_syspdio(struct mfi_softc *,struct bio*);
136 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
137 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
138 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
139 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
140 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
141 static void mfi_timeout(void *);
142 static int mfi_user_command(struct mfi_softc *,
143 struct mfi_ioc_passthru *);
144 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
145 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
146 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
147 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
148 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
149 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
150 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
151 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
152 static void mfi_filter_detach(struct knote *);
153 static int mfi_filter_read(struct knote *, long);
154 static int mfi_filter_write(struct knote *, long);
156 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
157 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
158 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
159 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
160 0, "event message locale");
162 static int mfi_event_class = MFI_EVT_CLASS_INFO;
163 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
164 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
165 0, "event message class");
167 static int mfi_max_cmds = 128;
168 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
169 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
172 static int mfi_msi_enable = 1;
173 TUNABLE_INT("hw.mfi.msi.enable", &mfi_msi_enable);
175 /* Management interface */
176 static d_open_t mfi_open;
177 static d_close_t mfi_close;
178 static d_ioctl_t mfi_ioctl;
179 static d_kqfilter_t mfi_kqfilter;
181 static struct dev_ops mfi_ops = {
184 .d_close = mfi_close,
185 .d_ioctl = mfi_ioctl,
186 .d_kqfilter = mfi_kqfilter,
189 static struct filterops mfi_read_filterops =
190 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
191 static struct filterops mfi_write_filterops =
192 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
194 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
196 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
199 mfi_enable_intr_xscale(struct mfi_softc *sc)
201 MFI_WRITE4(sc, MFI_OMSK, 0x01);
205 mfi_enable_intr_ppc(struct mfi_softc *sc)
207 if (sc->mfi_flags & MFI_FLAGS_1078) {
208 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
209 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
210 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
211 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
212 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
213 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
214 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
216 panic("unknown adapter type");
221 mfi_read_fw_status_xscale(struct mfi_softc *sc)
223 return MFI_READ4(sc, MFI_OMSG0);
227 mfi_read_fw_status_ppc(struct mfi_softc *sc)
229 return MFI_READ4(sc, MFI_OSP0);
233 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
237 status = MFI_READ4(sc, MFI_OSTS);
238 if ((status & MFI_OSTS_INTR_VALID) == 0)
241 MFI_WRITE4(sc, MFI_OSTS, status);
246 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
250 status = MFI_READ4(sc, MFI_OSTS);
251 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
252 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
253 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
256 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
257 MFI_WRITE4(sc, MFI_OSTS, status);
259 MFI_WRITE4(sc, MFI_ODCR0, status);
264 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
266 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
270 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
272 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
273 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
274 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
276 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
281 mfi_transition_firmware(struct mfi_softc *sc)
283 uint32_t fw_state, cur_state;
285 uint32_t cur_abs_reg_val = 0;
286 uint32_t prev_abs_reg_val = 0;
287 bus_space_handle_t idb;
289 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
290 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
291 idb = sc->mfi_flags & MFI_FLAGS_SKINNY ? MFI_SKINNY_IDB : MFI_IDB;
292 while (fw_state != MFI_FWSTATE_READY) {
294 device_printf(sc->mfi_dev, "Waiting for firmware to "
296 cur_state = fw_state;
298 case MFI_FWSTATE_FAULT:
299 device_printf(sc->mfi_dev, "Firmware fault\n");
301 case MFI_FWSTATE_WAIT_HANDSHAKE:
302 MFI_WRITE4(sc, idb, MFI_FWINIT_CLEAR_HANDSHAKE);
305 case MFI_FWSTATE_OPERATIONAL:
306 MFI_WRITE4(sc, idb, MFI_FWINIT_READY);
309 case MFI_FWSTATE_UNDEFINED:
310 case MFI_FWSTATE_BB_INIT:
313 case MFI_FWSTATE_FW_INIT:
314 case MFI_FWSTATE_FLUSH_CACHE:
317 case MFI_FWSTATE_DEVICE_SCAN:
318 max_wait = 180; /* wait for 180 seconds */
319 prev_abs_reg_val = cur_abs_reg_val;
321 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 MFI_WRITE4(sc, idb, MFI_FWINIT_HOTPLUG);
326 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
330 for (i = 0; i < (max_wait * 10); i++) {
331 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
332 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
333 if (fw_state == cur_state)
338 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
339 /* Check the device scanning progress */
340 if (prev_abs_reg_val != cur_abs_reg_val)
343 if (fw_state == cur_state) {
344 device_printf(sc->mfi_dev, "Firmware stuck in state "
352 #if defined(__x86_64__)
354 mfi_addr64_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
359 *addr = segs[0].ds_addr;
363 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
368 *addr = segs[0].ds_addr;
373 mfi_attach(struct mfi_softc *sc)
376 int error, commsz, framessz, sensesz;
377 int frames, unit, max_fw_sge;
380 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.981\n");
382 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
383 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
384 TAILQ_INIT(&sc->mfi_ld_tqh);
385 TAILQ_INIT(&sc->mfi_syspd_tqh);
386 TAILQ_INIT(&sc->mfi_aen_pids);
387 TAILQ_INIT(&sc->mfi_cam_ccbq);
394 if (sc->mfi_flags & MFI_FLAGS_1064R) {
395 sc->mfi_enable_intr = mfi_enable_intr_xscale;
396 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
397 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
398 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
400 sc->mfi_enable_intr = mfi_enable_intr_ppc;
401 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
402 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
403 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
407 /* Before we get too far, see if the firmware is working */
408 if ((error = mfi_transition_firmware(sc)) != 0) {
409 device_printf(sc->mfi_dev, "Firmware not in READY state, "
410 "error %d\n", error);
415 * Get information needed for sizing the contiguous memory for the
416 * frame pool. Size down the sgl parameter since we know that
417 * we will never need more than what's required for MAXPHYS.
418 * It would be nice if these constants were available at runtime
419 * instead of compile time.
421 status = sc->mfi_read_fw_status(sc);
422 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
423 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
424 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
427 * Create the dma tag for data buffers. Used both for block I/O
428 * and for various internal data queries.
430 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
431 1, 0, /* algnmnt, boundary */
432 BUS_SPACE_MAXADDR, /* lowaddr */
433 BUS_SPACE_MAXADDR, /* highaddr */
434 NULL, NULL, /* filter, filterarg */
435 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
436 sc->mfi_max_sge, /* nsegments */
437 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
438 BUS_DMA_ALLOCNOW, /* flags */
439 &sc->mfi_buffer_dmat)) {
440 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
445 * Allocate DMA memory for the comms queues. Keep it under 4GB for
446 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
447 * entry, so the calculated size here will be will be 1 more than
448 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
450 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
451 sizeof(struct mfi_hwcomms);
452 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
453 1, 0, /* algnmnt, boundary */
454 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
455 BUS_SPACE_MAXADDR, /* highaddr */
456 NULL, NULL, /* filter, filterarg */
457 commsz, /* maxsize */
459 commsz, /* maxsegsize */
461 &sc->mfi_comms_dmat)) {
462 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
465 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
466 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
467 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
470 bzero(sc->mfi_comms, commsz);
471 #if defined(__x86_64__)
472 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
473 sc->mfi_comms, commsz, mfi_addr64_cb, &sc->mfi_comms_busaddr, 0);
475 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
476 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
480 * Allocate DMA memory for the command frames. Keep them in the
481 * lower 4GB for efficiency. Calculate the size of the commands at
482 * the same time; each command is one 64 byte frame plus a set of
483 * additional frames for holding sg lists or other data.
484 * The assumption here is that the SG list will start at the second
485 * frame and not use the unused bytes in the first frame. While this
486 * isn't technically correct, it simplifies the calculation and allows
487 * for command frames that might be larger than an mfi_io_frame.
489 if (sizeof(bus_addr_t) == 8) {
490 sc->mfi_sge_size = sizeof(struct mfi_sg64);
491 sc->mfi_flags |= MFI_FLAGS_SG64;
493 sc->mfi_sge_size = sizeof(struct mfi_sg32);
495 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
496 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
497 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
498 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
499 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
500 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
501 64, 0, /* algnmnt, boundary */
502 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
503 BUS_SPACE_MAXADDR, /* highaddr */
504 NULL, NULL, /* filter, filterarg */
505 framessz, /* maxsize */
507 framessz, /* maxsegsize */
509 &sc->mfi_frames_dmat)) {
510 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
513 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
514 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
515 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
518 bzero(sc->mfi_frames, framessz);
519 #if defined(__x86_64__)
520 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
521 sc->mfi_frames, framessz, mfi_addr64_cb, &sc->mfi_frames_busaddr,0);
523 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
524 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
528 * Allocate DMA memory for the frame sense data. Keep them in the
529 * lower 4GB for efficiency
531 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
532 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
533 4, 0, /* algnmnt, boundary */
534 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
535 BUS_SPACE_MAXADDR, /* highaddr */
536 NULL, NULL, /* filter, filterarg */
537 sensesz, /* maxsize */
539 sensesz, /* maxsegsize */
541 &sc->mfi_sense_dmat)) {
542 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
545 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
546 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
547 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
550 #if defined(__x86_64__)
551 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
552 sc->mfi_sense, sensesz, mfi_addr64_cb, &sc->mfi_sense_busaddr, 0);
554 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
555 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
558 if ((error = mfi_alloc_commands(sc)) != 0)
561 if ((error = mfi_comms_init(sc)) != 0)
564 if ((error = mfi_get_controller_info(sc)) != 0)
567 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
568 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
569 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
572 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
575 * Set up the interrupt handler. XXX This should happen in
579 sc->mfi_irq_type = pci_alloc_1intr(sc->mfi_dev, mfi_msi_enable,
580 &sc->mfi_irq_rid, &irq_flags);
581 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
582 &sc->mfi_irq_rid, irq_flags)) == NULL) {
583 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
586 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
587 mfi_intr, sc, &sc->mfi_intr, NULL)) {
588 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
592 /* Register a config hook to probe the bus for arrays */
593 sc->mfi_ich.ich_func = mfi_startup;
594 sc->mfi_ich.ich_arg = sc;
595 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
596 device_printf(sc->mfi_dev, "Cannot establish configuration "
602 * Register a shutdown handler.
604 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
605 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
606 device_printf(sc->mfi_dev, "Warning: shutdown event "
607 "registration failed\n");
611 * Create the control device for doing management
613 unit = device_get_unit(sc->mfi_dev);
614 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
615 0640, "mfi%d", unit);
617 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
618 if (sc->mfi_cdev != NULL)
619 sc->mfi_cdev->si_drv1 = sc;
620 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
621 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
622 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
623 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
624 if (sc->mfi_sysctl_tree == NULL) {
625 device_printf(sc->mfi_dev, "can't add sysctl node\n");
628 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
629 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
630 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
631 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
632 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
633 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
634 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
635 &sc->mfi_keep_deleted_volumes, 0,
636 "Don't detach the mfid device for a busy volume that is deleted");
638 device_add_child(sc->mfi_dev, "mfip", -1);
639 bus_generic_attach(sc->mfi_dev);
641 /* Start the timeout watchdog */
642 callout_init_mp(&sc->mfi_watchdog_callout);
643 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
650 mfi_alloc_commands(struct mfi_softc *sc)
652 struct mfi_command *cm;
656 * XXX Should we allocate all the commands up front, or allocate on
657 * demand later like 'aac' does?
659 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
661 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
662 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
664 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
667 for (i = 0; i < ncmds; i++) {
668 cm = &sc->mfi_commands[i];
669 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
670 sc->mfi_cmd_size * i);
671 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
672 sc->mfi_cmd_size * i;
673 cm->cm_frame->header.context = i;
674 cm->cm_sense = &sc->mfi_sense[i];
675 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
678 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
679 &cm->cm_dmamap) == 0)
680 mfi_release_command(cm);
683 sc->mfi_total_cmds++;
690 mfi_release_command(struct mfi_command *cm)
692 struct mfi_frame_header *hdr;
696 * Zero out the important fields of the frame, but make sure the
697 * context field is preserved. For efficiency, handle the fields
698 * as 32 bit words. Clear out the first S/G entry too for safety.
700 hdr = &cm->cm_frame->header;
701 if (cm->cm_data != NULL && hdr->sg_count) {
702 cm->cm_sg->sg32[0].len = 0;
703 cm->cm_sg->sg32[0].addr = 0;
706 hdr_data = (uint32_t *)cm->cm_frame;
707 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
708 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
709 hdr_data[4] = 0; /* flags, timeout */
710 hdr_data[5] = 0; /* data_len */
712 cm->cm_extra_frames = 0;
714 cm->cm_complete = NULL;
715 cm->cm_private = NULL;
718 cm->cm_total_frame_size = 0;
720 mfi_enqueue_free(cm);
724 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
725 void **bufp, size_t bufsize)
727 struct mfi_command *cm;
728 struct mfi_dcmd_frame *dcmd;
730 uint32_t context = 0;
732 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
734 cm = mfi_dequeue_free(sc);
738 /* Zero out the MFI frame */
739 context = cm->cm_frame->header.context;
740 bzero(cm->cm_frame, sizeof(union mfi_frame));
741 cm->cm_frame->header.context = context;
743 if ((bufsize > 0) && (bufp != NULL)) {
745 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
747 mfi_release_command(cm);
756 dcmd = &cm->cm_frame->dcmd;
757 bzero(dcmd->mbox, MFI_MBOX_SIZE);
758 dcmd->header.cmd = MFI_CMD_DCMD;
759 dcmd->header.timeout = 0;
760 dcmd->header.flags = 0;
761 dcmd->header.data_len = bufsize;
762 dcmd->header.scsi_status = 0;
763 dcmd->opcode = opcode;
764 cm->cm_sg = &dcmd->sgl;
765 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
768 cm->cm_private = buf;
769 cm->cm_len = bufsize;
772 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
778 mfi_comms_init(struct mfi_softc *sc)
780 struct mfi_command *cm;
781 struct mfi_init_frame *init;
782 struct mfi_init_qinfo *qinfo;
784 uint32_t context = 0;
786 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
787 if ((cm = mfi_dequeue_free(sc)) == NULL)
790 /* Zero out the MFI frame */
791 context = cm->cm_frame->header.context;
792 bzero(cm->cm_frame, sizeof(union mfi_frame));
793 cm->cm_frame->header.context = context;
796 * Abuse the SG list area of the frame to hold the init_qinfo
799 init = &cm->cm_frame->init;
800 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
802 bzero(qinfo, sizeof(struct mfi_init_qinfo));
803 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
804 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
805 offsetof(struct mfi_hwcomms, hw_reply_q);
806 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
807 offsetof(struct mfi_hwcomms, hw_pi);
808 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
809 offsetof(struct mfi_hwcomms, hw_ci);
811 init->header.cmd = MFI_CMD_INIT;
812 init->header.data_len = sizeof(struct mfi_init_qinfo);
813 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
815 cm->cm_flags = MFI_CMD_POLLED;
817 if ((error = mfi_mapcmd(sc, cm)) != 0) {
818 device_printf(sc->mfi_dev, "failed to send init command\n");
819 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
822 mfi_release_command(cm);
823 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
829 mfi_get_controller_info(struct mfi_softc *sc)
831 struct mfi_command *cm = NULL;
832 struct mfi_ctrl_info *ci = NULL;
833 uint32_t max_sectors_1, max_sectors_2;
836 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
837 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
838 (void **)&ci, sizeof(*ci));
841 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
843 if ((error = mfi_mapcmd(sc, cm)) != 0) {
844 device_printf(sc->mfi_dev, "Failed to get controller info\n");
845 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
851 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
852 BUS_DMASYNC_POSTREAD);
853 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
855 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
856 max_sectors_2 = ci->max_request_size;
857 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
863 mfi_release_command(cm);
864 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
869 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
871 struct mfi_command *cm = NULL;
874 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
875 (void **)log_state, sizeof(**log_state));
878 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
880 if ((error = mfi_mapcmd(sc, cm)) != 0) {
881 device_printf(sc->mfi_dev, "Failed to get log state\n");
885 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
886 BUS_DMASYNC_POSTREAD);
887 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
891 mfi_release_command(cm);
897 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
899 struct mfi_evt_log_state *log_state = NULL;
900 union mfi_evt class_locale;
904 class_locale.members.reserved = 0;
905 class_locale.members.locale = mfi_event_locale;
906 class_locale.members.evt_class = mfi_event_class;
908 if (seq_start == 0) {
909 error = mfi_get_log_state(sc, &log_state);
912 kfree(log_state, M_MFIBUF);
917 * Walk through any events that fired since the last
920 mfi_parse_entries(sc, log_state->shutdown_seq_num,
921 log_state->newest_seq_num);
922 seq = log_state->newest_seq_num;
925 mfi_aen_register(sc, seq, class_locale.word);
926 if (log_state != NULL)
927 kfree(log_state, M_MFIBUF);
933 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
936 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
937 cm->cm_complete = NULL;
941 * MegaCli can issue a DCMD of 0. In this case do nothing
942 * and return 0 to it as status
944 if (cm->cm_frame->dcmd.opcode == 0) {
945 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
947 return (cm->cm_error);
949 mfi_enqueue_ready(cm);
951 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
952 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
953 return (cm->cm_error);
957 mfi_free(struct mfi_softc *sc)
959 struct mfi_command *cm;
962 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
964 if (sc->mfi_cdev != NULL)
965 destroy_dev(sc->mfi_cdev);
966 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
968 if (sc->mfi_total_cmds != 0) {
969 for (i = 0; i < sc->mfi_total_cmds; i++) {
970 cm = &sc->mfi_commands[i];
971 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
973 kfree(sc->mfi_commands, M_MFIBUF);
977 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
978 if (sc->mfi_irq != NULL)
979 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
981 if (sc->mfi_irq_type == PCI_INTR_TYPE_MSI)
982 pci_release_msi(sc->mfi_dev);
983 if (sc->mfi_sense_busaddr != 0)
984 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
985 if (sc->mfi_sense != NULL)
986 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
987 sc->mfi_sense_dmamap);
988 if (sc->mfi_sense_dmat != NULL)
989 bus_dma_tag_destroy(sc->mfi_sense_dmat);
991 if (sc->mfi_frames_busaddr != 0)
992 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
993 if (sc->mfi_frames != NULL)
994 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
995 sc->mfi_frames_dmamap);
996 if (sc->mfi_frames_dmat != NULL)
997 bus_dma_tag_destroy(sc->mfi_frames_dmat);
999 if (sc->mfi_comms_busaddr != 0)
1000 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1001 if (sc->mfi_comms != NULL)
1002 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1003 sc->mfi_comms_dmamap);
1004 if (sc->mfi_comms_dmat != NULL)
1005 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1007 if (sc->mfi_buffer_dmat != NULL)
1008 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1009 if (sc->mfi_parent_dmat != NULL)
1010 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1012 if (sc->mfi_sysctl_tree != NULL)
1013 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1015 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1016 if (mtx_initialized(&sc->mfi_io_lock))
1019 lockuninit(&sc->mfi_io_lock);
1020 lockuninit(&sc->mfi_config_lock);
1027 mfi_startup(void *arg)
1029 struct mfi_softc *sc;
1031 sc = (struct mfi_softc *)arg;
1033 config_intrhook_disestablish(&sc->mfi_ich);
1035 sc->mfi_enable_intr(sc);
1036 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1037 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1039 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1041 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1042 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1048 struct mfi_softc *sc;
1049 struct mfi_command *cm;
1050 uint32_t pi, ci, context;
1052 sc = (struct mfi_softc *)arg;
1054 if (sc->mfi_check_clear_intr(sc))
1058 * Do a dummy read to flush the interrupt ACK that we just performed,
1059 * ensuring that everything is really, truly consistent.
1061 (void)sc->mfi_read_fw_status(sc);
1063 pi = sc->mfi_comms->hw_pi;
1064 ci = sc->mfi_comms->hw_ci;
1065 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1067 context = sc->mfi_comms->hw_reply_q[ci];
1068 if (context < sc->mfi_max_fw_cmds) {
1069 cm = &sc->mfi_commands[context];
1070 mfi_remove_busy(cm);
1072 mfi_complete(sc, cm);
1074 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1079 sc->mfi_comms->hw_ci = ci;
1081 /* Give defered I/O a chance to run */
1082 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1083 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1085 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1091 mfi_shutdown(struct mfi_softc *sc)
1093 struct mfi_dcmd_frame *dcmd;
1094 struct mfi_command *cm;
1097 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1098 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1100 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1104 if (sc->mfi_aen_cm != NULL)
1105 mfi_abort(sc, sc->mfi_aen_cm);
1107 dcmd = &cm->cm_frame->dcmd;
1108 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1109 cm->cm_flags = MFI_CMD_POLLED;
1112 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1113 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1116 mfi_release_command(cm);
1117 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1121 mfi_syspdprobe(struct mfi_softc *sc)
1123 struct mfi_frame_header *hdr;
1124 struct mfi_command *cm = NULL;
1125 struct mfi_pd_list *pdlist = NULL;
1126 struct mfi_system_pd *syspd;
1129 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1130 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1131 /* Add SYSTEM PD's */
1132 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1133 (void **)&pdlist, sizeof(*pdlist));
1135 device_printf(sc->mfi_dev,"Error while forming syspd list\n");
1139 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1140 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1141 cm->cm_frame->dcmd.mbox[1] = 0;
1142 if (mfi_mapcmd(sc, cm) != 0) {
1143 device_printf(sc->mfi_dev, "Failed to get syspd device list\n");
1146 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1147 BUS_DMASYNC_POSTREAD);
1148 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1149 hdr = &cm->cm_frame->header;
1150 if (hdr->cmd_status != MFI_STAT_OK) {
1151 device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n",
1155 for (i = 0; i < pdlist->count; i++) {
1156 if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id)
1157 goto skip_sys_pd_add;
1158 /* Get each PD and add it to the system */
1159 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1160 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1161 if (syspd->pd_id == pdlist->addr[i].device_id)
1162 goto skip_sys_pd_add;
1165 mfi_add_sys_pd(sc,pdlist->addr[i].device_id);
1169 /* Delete SYSPD's whose state has been changed */
1170 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1171 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1172 for (i=0;i<pdlist->count;i++) {
1173 if (syspd->pd_id == pdlist->addr[i].device_id)
1174 goto skip_sys_pd_delete;
1177 device_delete_child(sc->mfi_dev,syspd->pd_dev);
1185 kfree(pdlist, M_MFIBUF);
1187 mfi_release_command(cm);
1191 mfi_ldprobe(struct mfi_softc *sc)
1193 struct mfi_frame_header *hdr;
1194 struct mfi_command *cm = NULL;
1195 struct mfi_ld_list *list = NULL;
1196 struct mfi_disk *ld;
1199 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1200 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1202 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1203 (void **)&list, sizeof(*list));
1207 cm->cm_flags = MFI_CMD_DATAIN;
1208 if (mfi_wait_command(sc, cm) != 0) {
1209 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1213 hdr = &cm->cm_frame->header;
1214 if (hdr->cmd_status != MFI_STAT_OK) {
1215 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1220 for (i = 0; i < list->ld_count; i++) {
1221 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1222 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1225 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1230 kfree(list, M_MFIBUF);
1232 mfi_release_command(cm);
1238 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1239 * the bits in 24-31 are all set, then it is the number of seconds since
1243 format_timestamp(uint32_t timestamp)
1245 static char buffer[32];
1247 if ((timestamp & 0xff000000) == 0xff000000)
1248 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1251 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1256 format_class(int8_t class)
1258 static char buffer[6];
1261 case MFI_EVT_CLASS_DEBUG:
1263 case MFI_EVT_CLASS_PROGRESS:
1264 return ("progress");
1265 case MFI_EVT_CLASS_INFO:
1267 case MFI_EVT_CLASS_WARNING:
1269 case MFI_EVT_CLASS_CRITICAL:
1271 case MFI_EVT_CLASS_FATAL:
1273 case MFI_EVT_CLASS_DEAD:
1276 ksnprintf(buffer, sizeof(buffer), "%d", class);
1282 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1285 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1286 format_timestamp(detail->time), detail->evt_class.members.locale,
1287 format_class(detail->evt_class.members.evt_class), detail->description);
1291 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1293 struct mfi_command *cm;
1294 struct mfi_dcmd_frame *dcmd;
1295 union mfi_evt current_aen, prior_aen;
1296 struct mfi_evt_detail *ed = NULL;
1299 current_aen.word = locale;
1300 if (sc->mfi_aen_cm != NULL) {
1302 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1303 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1304 !((prior_aen.members.locale & current_aen.members.locale)
1305 ^current_aen.members.locale)) {
1308 prior_aen.members.locale |= current_aen.members.locale;
1309 if (prior_aen.members.evt_class
1310 < current_aen.members.evt_class)
1311 current_aen.members.evt_class =
1312 prior_aen.members.evt_class;
1313 mfi_abort(sc, sc->mfi_aen_cm);
1317 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1318 (void **)&ed, sizeof(*ed));
1323 dcmd = &cm->cm_frame->dcmd;
1324 ((uint32_t *)&dcmd->mbox)[0] = seq;
1325 ((uint32_t *)&dcmd->mbox)[1] = locale;
1326 cm->cm_flags = MFI_CMD_DATAIN;
1327 cm->cm_complete = mfi_aen_complete;
1329 sc->mfi_aen_cm = cm;
1331 mfi_enqueue_ready(cm);
1339 mfi_aen_complete(struct mfi_command *cm)
1341 struct mfi_frame_header *hdr;
1342 struct mfi_softc *sc;
1343 struct mfi_evt_detail *detail;
1344 struct mfi_aen *mfi_aen_entry, *tmp;
1345 int seq = 0, aborted = 0;
1348 hdr = &cm->cm_frame->header;
1350 if (sc->mfi_aen_cm == NULL)
1353 if (sc->mfi_aen_cm->cm_aen_abort ||
1354 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1355 sc->mfi_aen_cm->cm_aen_abort = 0;
1358 sc->mfi_aen_triggered = 1;
1359 if (sc->mfi_poll_waiting) {
1360 sc->mfi_poll_waiting = 0;
1361 KNOTE(&sc->mfi_kq.ki_note, 0);
1363 detail = cm->cm_data;
1365 * XXX If this function is too expensive or is recursive, then
1366 * events should be put onto a queue and processed later.
1368 mfi_decode_evt(sc, detail);
1369 seq = detail->seq + 1;
1370 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1371 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1373 lwkt_gettoken(&proc_token);
1374 ksignal(mfi_aen_entry->p, SIGIO);
1375 lwkt_reltoken(&proc_token);
1376 kfree(mfi_aen_entry, M_MFIBUF);
1380 kfree(cm->cm_data, M_MFIBUF);
1381 sc->mfi_aen_cm = NULL;
1382 wakeup(&sc->mfi_aen_cm);
1383 mfi_release_command(cm);
1385 /* set it up again so the driver can catch more events */
1387 mfi_aen_setup(sc, seq);
1391 #define MAX_EVENTS 15
1394 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1396 struct mfi_command *cm;
1397 struct mfi_dcmd_frame *dcmd;
1398 struct mfi_evt_list *el;
1399 union mfi_evt class_locale;
1400 int error, i, seq, size;
1401 uint32_t context = 0;
1403 class_locale.members.reserved = 0;
1404 class_locale.members.locale = mfi_event_locale;
1405 class_locale.members.evt_class = mfi_event_class;
1407 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1409 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1413 for (seq = start_seq;;) {
1414 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1415 kfree(el, M_MFIBUF);
1419 /* Zero out the MFI frame */
1420 context = cm->cm_frame->header.context;
1421 bzero(cm->cm_frame, sizeof(union mfi_frame));
1422 cm->cm_frame->header.context = context;
1424 dcmd = &cm->cm_frame->dcmd;
1425 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1426 dcmd->header.cmd = MFI_CMD_DCMD;
1427 dcmd->header.timeout = 0;
1428 dcmd->header.data_len = size;
1429 dcmd->header.scsi_status = 0;
1430 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1431 ((uint32_t *)&dcmd->mbox)[0] = seq;
1432 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1433 cm->cm_sg = &dcmd->sgl;
1434 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1435 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1439 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1440 device_printf(sc->mfi_dev,
1441 "Failed to get controller entries\n");
1442 mfi_release_command(cm);
1446 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1447 BUS_DMASYNC_POSTREAD);
1448 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1450 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1451 mfi_release_command(cm);
1454 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1455 device_printf(sc->mfi_dev,
1456 "Error %d fetching controller entries\n",
1457 dcmd->header.cmd_status);
1458 mfi_release_command(cm);
1461 mfi_release_command(cm);
1463 for (i = 0; i < el->count; i++) {
1465 * If this event is newer than 'stop_seq' then
1466 * break out of the loop. Note that the log
1467 * is a circular buffer so we have to handle
1468 * the case that our stop point is earlier in
1469 * the buffer than our start point.
1471 if (el->event[i].seq >= stop_seq) {
1472 if (start_seq <= stop_seq)
1474 else if (el->event[i].seq < start_seq)
1477 mfi_decode_evt(sc, &el->event[i]);
1479 seq = el->event[el->count - 1].seq + 1;
1482 kfree(el, M_MFIBUF);
1487 mfi_add_ld(struct mfi_softc *sc, int id)
1489 struct mfi_command *cm;
1490 struct mfi_dcmd_frame *dcmd = NULL;
1491 struct mfi_ld_info *ld_info = NULL;
1494 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1496 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1497 (void **)&ld_info, sizeof(*ld_info));
1499 device_printf(sc->mfi_dev,
1500 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1502 kfree(ld_info, M_MFIBUF);
1505 cm->cm_flags = MFI_CMD_DATAIN;
1506 dcmd = &cm->cm_frame->dcmd;
1508 if (mfi_wait_command(sc, cm) != 0) {
1509 device_printf(sc->mfi_dev,
1510 "Failed to get logical drive: %d\n", id);
1511 kfree(ld_info, M_MFIBUF);
1514 if (ld_info->ld_config.params.isSSCD != 1) {
1515 mfi_add_ld_complete(cm);
1517 mfi_release_command(cm);
1518 if(ld_info) /* SSCD drives ld_info free here */
1519 kfree(ld_info, M_MFIBUF);
1525 mfi_add_ld_complete(struct mfi_command *cm)
1527 struct mfi_frame_header *hdr;
1528 struct mfi_ld_info *ld_info;
1529 struct mfi_softc *sc;
1533 hdr = &cm->cm_frame->header;
1534 ld_info = cm->cm_private;
1536 if (hdr->cmd_status != MFI_STAT_OK) {
1537 kfree(ld_info, M_MFIBUF);
1538 mfi_release_command(cm);
1541 mfi_release_command(cm);
1543 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1545 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1546 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1547 kfree(ld_info, M_MFIBUF);
1549 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1553 device_set_ivars(child, ld_info);
1554 device_set_desc(child, "MFI Logical Disk");
1555 bus_generic_attach(sc->mfi_dev);
1557 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1561 mfi_add_sys_pd(struct mfi_softc *sc,int id)
1563 struct mfi_command *cm;
1564 struct mfi_dcmd_frame *dcmd = NULL;
1565 struct mfi_pd_info *pd_info = NULL;
1568 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1570 error = mfi_dcmd_command(sc,&cm,MFI_DCMD_PD_GET_INFO,
1571 (void **)&pd_info, sizeof(*pd_info));
1573 device_printf(sc->mfi_dev,
1574 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error);
1576 kfree(pd_info,M_MFIBUF);
1579 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1580 dcmd = &cm->cm_frame->dcmd;
1582 dcmd->header.scsi_status = 0;
1583 dcmd->header.pad0 = 0;
1584 if (mfi_mapcmd(sc, cm) != 0) {
1585 device_printf(sc->mfi_dev,
1586 "Failed to get physical drive info %d\n", id);
1587 kfree(pd_info,M_MFIBUF);
1590 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1591 BUS_DMASYNC_POSTREAD);
1592 bus_dmamap_unload(sc->mfi_buffer_dmat,cm->cm_dmamap);
1593 mfi_add_sys_pd_complete(cm);
1598 mfi_add_sys_pd_complete(struct mfi_command *cm)
1600 struct mfi_frame_header *hdr;
1601 struct mfi_pd_info *pd_info;
1602 struct mfi_softc *sc;
1606 hdr = &cm->cm_frame->header;
1607 pd_info = cm->cm_private;
1609 if (hdr->cmd_status != MFI_STAT_OK) {
1610 kfree(pd_info, M_MFIBUF);
1611 mfi_release_command(cm);
1614 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1615 device_printf(sc->mfi_dev,"PD=%x is not SYSTEM PD\n",
1616 pd_info->ref.v.device_id);
1617 kfree(pd_info, M_MFIBUF);
1618 mfi_release_command(cm);
1621 mfi_release_command(cm);
1623 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1625 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1626 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1627 kfree(pd_info, M_MFIBUF);
1629 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1633 device_set_ivars(child, pd_info);
1634 device_set_desc(child, "MFI System PD");
1635 bus_generic_attach(sc->mfi_dev);
1637 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1640 static struct mfi_command *
1641 mfi_bio_command(struct mfi_softc *sc)
1644 struct mfi_command *cm = NULL;
1645 struct mfi_disk *mfid;
1647 /* reserving two commands to avoid starvation for IOCTL */
1648 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1650 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1652 mfid = bio->bio_driver_info;
1653 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1654 cm = mfi_build_syspdio(sc, bio);
1656 cm = mfi_build_ldio(sc, bio);
1658 mfi_enqueue_bio(sc,bio);
1662 static struct mfi_command *
1663 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1665 struct mfi_command *cm;
1667 struct mfi_system_pd *disk;
1668 struct mfi_pass_frame *pass;
1669 int flags = 0,blkcount = 0;
1670 uint32_t context = 0;
1672 if ((cm = mfi_dequeue_free(sc)) == NULL)
1675 /* Zero out the MFI frame */
1676 context = cm->cm_frame->header.context;
1677 bzero(cm->cm_frame, sizeof(union mfi_frame));
1678 cm->cm_frame->header.context = context;
1680 pass = &cm->cm_frame->pass;
1681 bzero(pass->cdb, 16);
1682 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1683 switch (bp->b_cmd & 0x03) {
1685 pass->cdb[0] = READ_10;
1686 flags = MFI_CMD_DATAIN;
1689 pass->cdb[0] = WRITE_10;
1690 flags = MFI_CMD_DATAOUT;
1693 panic("Invalid bio command");
1696 /* Cheat with the sector length to avoid a non-constant division */
1697 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1698 disk = bio->bio_driver_info;
1699 /* Fill the LBA and Transfer length in CDB */
1700 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
1701 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
1702 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
1703 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
1704 pass->cdb[7] = (blkcount & 0xff00) >> 8;
1705 pass->cdb[8] = (blkcount & 0x00ff);
1706 pass->header.target_id = disk->pd_id;
1707 pass->header.timeout = 0;
1708 pass->header.flags = 0;
1709 pass->header.scsi_status = 0;
1710 pass->header.sense_len = MFI_SENSE_LEN;
1711 pass->header.data_len = bp->b_bcount;
1712 pass->header.cdb_len = 10;
1713 #if defined(__x86_64__)
1714 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1715 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1717 pass->sense_addr_lo = cm->cm_sense_busaddr;
1718 pass->sense_addr_hi = 0;
1720 cm->cm_complete = mfi_bio_complete;
1721 cm->cm_private = bio;
1722 cm->cm_data = bp->b_data;
1723 cm->cm_len = bp->b_bcount;
1724 cm->cm_sg = &pass->sgl;
1725 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
1726 cm->cm_flags = flags;
1730 static struct mfi_command *
1731 mfi_build_ldio(struct mfi_softc *sc,struct bio *bio)
1733 struct mfi_io_frame *io;
1735 struct mfi_disk *disk;
1736 struct mfi_command *cm;
1737 int flags, blkcount;
1738 uint32_t context = 0;
1740 if ((cm = mfi_dequeue_free(sc)) == NULL)
1743 /* Zero out the MFI frame */
1744 context = cm->cm_frame->header.context;
1745 bzero(cm->cm_frame,sizeof(union mfi_frame));
1746 cm->cm_frame->header.context = context;
1748 io = &cm->cm_frame->io;
1749 switch (bp->b_cmd & 0x03) {
1751 io->header.cmd = MFI_CMD_LD_READ;
1752 flags = MFI_CMD_DATAIN;
1755 io->header.cmd = MFI_CMD_LD_WRITE;
1756 flags = MFI_CMD_DATAOUT;
1759 panic("Invalid bio command");
1762 /* Cheat with the sector length to avoid a non-constant division */
1763 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1764 disk = bio->bio_driver_info;
1765 io->header.target_id = disk->ld_id;
1766 io->header.timeout = 0;
1767 io->header.flags = 0;
1768 io->header.scsi_status = 0;
1769 io->header.sense_len = MFI_SENSE_LEN;
1770 io->header.data_len = blkcount;
1771 #if defined(__x86_64__)
1772 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1773 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1775 io->sense_addr_lo = cm->cm_sense_busaddr;
1776 io->sense_addr_hi = 0;
1778 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
1779 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
1780 cm->cm_complete = mfi_bio_complete;
1781 cm->cm_private = bio;
1782 cm->cm_data = bp->b_data;
1783 cm->cm_len = bp->b_bcount;
1784 cm->cm_sg = &io->sgl;
1785 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1786 cm->cm_flags = flags;
1791 mfi_bio_complete(struct mfi_command *cm)
1795 struct mfi_frame_header *hdr;
1796 struct mfi_softc *sc;
1798 bio = cm->cm_private;
1800 hdr = &cm->cm_frame->header;
1803 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1804 bp->b_flags |= B_ERROR;
1806 device_printf(sc->mfi_dev, "I/O error, status= %d "
1807 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1808 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1809 } else if (cm->cm_error != 0) {
1810 bp->b_flags |= B_ERROR;
1813 mfi_release_command(cm);
1814 mfi_disk_complete(bio);
1818 mfi_startio(struct mfi_softc *sc)
1820 struct mfi_command *cm;
1821 struct ccb_hdr *ccbh;
1824 /* Don't bother if we're short on resources */
1825 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1828 /* Try a command that has already been prepared */
1829 cm = mfi_dequeue_ready(sc);
1832 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1833 cm = sc->mfi_cam_start(ccbh);
1836 /* Nope, so look for work on the bioq */
1838 cm = mfi_bio_command(sc);
1840 /* No work available, so exit */
1844 /* Send the command to the controller */
1845 if (mfi_mapcmd(sc, cm) != 0) {
1846 mfi_requeue_ready(cm);
1853 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1857 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1859 if (cm->cm_data != NULL) {
1860 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1861 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1862 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1863 if (error == EINPROGRESS) {
1864 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1868 error = mfi_send_frame(sc, cm);
1875 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1877 struct mfi_frame_header *hdr;
1878 struct mfi_command *cm;
1880 struct mfi_softc *sc;
1881 int i, j, first, dir;
1885 cm = (struct mfi_command *)arg;
1887 hdr = &cm->cm_frame->header;
1891 kprintf("error %d in callback\n", error);
1892 cm->cm_error = error;
1893 mfi_complete(sc, cm);
1897 /* Use IEEE sgl only for IO's on a SKINNY controller
1898 * For other commands on a SKINNY controller use either
1899 * sg32 or sg64 based on the sizeof(bus_addr_t).
1900 * Also calculate the total frame size based on the type
1903 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
1904 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
1905 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
1906 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
1907 for (i = 0; i < nsegs; i++) {
1908 sgl->sg_skinny[i].addr = segs[i].ds_addr;
1909 sgl->sg_skinny[i].len = segs[i].ds_len;
1910 sgl->sg_skinny[i].flag = 0;
1912 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1914 sge_size = sizeof(struct mfi_sg_skinny);
1918 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
1919 first = cm->cm_stp_len;
1920 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1921 sgl->sg32[j].addr = segs[0].ds_addr;
1922 sgl->sg32[j++].len = first;
1924 sgl->sg64[j].addr = segs[0].ds_addr;
1925 sgl->sg64[j++].len = first;
1929 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1930 for (i = 0; i < nsegs; i++) {
1931 sgl->sg32[j].addr = segs[i].ds_addr + first;
1932 sgl->sg32[j++].len = segs[i].ds_len - first;
1935 sge_size = sizeof(struct mfi_sg32);
1937 for (i = 0; i < nsegs; i++) {
1938 sgl->sg64[j].addr = segs[i].ds_addr + first;
1939 sgl->sg64[j++].len = segs[i].ds_len - first;
1942 hdr->flags |= MFI_FRAME_SGL64;
1943 sge_size = sizeof(struct mfi_sg64);
1947 hdr->sg_count = nsegs;
1950 if (cm->cm_flags & MFI_CMD_DATAIN) {
1951 dir |= BUS_DMASYNC_PREREAD;
1952 hdr->flags |= MFI_FRAME_DIR_READ;
1954 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1955 dir |= BUS_DMASYNC_PREWRITE;
1956 hdr->flags |= MFI_FRAME_DIR_WRITE;
1958 if (cm->cm_frame->header.cmd == MFI_CMD_STP)
1959 dir |= BUS_DMASYNC_PREWRITE;
1960 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1961 cm->cm_flags |= MFI_CMD_MAPPED;
1964 * Instead of calculating the total number of frames in the
1965 * compound frame, it's already assumed that there will be at
1966 * least 1 frame, so don't compensate for the modulo of the
1967 * following division.
1969 cm->cm_total_frame_size += (sge_size * nsegs);
1970 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1972 mfi_send_frame(sc, cm);
1976 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1978 struct mfi_frame_header *hdr;
1979 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1981 hdr = &cm->cm_frame->header;
1983 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1984 cm->cm_timestamp = time_second;
1985 mfi_enqueue_busy(cm);
1987 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1988 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1992 * The bus address of the command is aligned on a 64 byte boundary,
1993 * leaving the least 6 bits as zero. For whatever reason, the
1994 * hardware wants the address shifted right by three, leaving just
1995 * 3 zero bits. These three bits are then used as a prefetching
1996 * hint for the hardware to predict how many frames need to be
1997 * fetched across the bus. If a command has more than 8 frames
1998 * then the 3 bits are set to 0x7 and the firmware uses other
1999 * information in the command to determine the total amount to fetch.
2000 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2001 * is enough for both 32bit and 64bit systems.
2003 if (cm->cm_extra_frames > 7)
2004 cm->cm_extra_frames = 7;
2006 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
2008 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2011 /* This is a polled command, so busy-wait for it to complete. */
2012 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2019 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2020 device_printf(sc->mfi_dev, "Frame %p timed out "
2021 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2029 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2033 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2035 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2036 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2037 dir |= BUS_DMASYNC_POSTREAD;
2038 if (cm->cm_flags & MFI_CMD_DATAOUT)
2039 dir |= BUS_DMASYNC_POSTWRITE;
2041 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2042 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2043 cm->cm_flags &= ~MFI_CMD_MAPPED;
2046 cm->cm_flags |= MFI_CMD_COMPLETED;
2048 if (cm->cm_complete != NULL)
2049 cm->cm_complete(cm);
2055 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2057 struct mfi_command *cm;
2058 struct mfi_abort_frame *abort;
2060 uint32_t context = 0;
2062 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2064 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2068 /* Zero out the MFI frame */
2069 context = cm->cm_frame->header.context;
2070 bzero(cm->cm_frame, sizeof(union mfi_frame));
2071 cm->cm_frame->header.context = context;
2073 abort = &cm->cm_frame->abort;
2074 abort->header.cmd = MFI_CMD_ABORT;
2075 abort->header.flags = 0;
2076 abort->header.scsi_status = 0;
2077 abort->abort_context = cm_abort->cm_frame->header.context;
2078 #if defined(__x86_64__)
2079 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr & 0xFFFFFFFF;
2080 abort->abort_mfi_addr_hi = (cm_abort->cm_frame_busaddr & 0xFFFFFFFF00000000 ) >> 32 ;
2082 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
2083 abort->abort_mfi_addr_hi = 0;
2086 cm->cm_flags = MFI_CMD_POLLED;
2088 sc->mfi_aen_cm->cm_aen_abort = 1;
2090 mfi_release_command(cm);
2092 while (i < 5 && sc->mfi_aen_cm != NULL) {
2093 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
2101 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
2103 struct mfi_command *cm;
2104 struct mfi_io_frame *io;
2106 uint32_t context = 0;
2108 if ((cm = mfi_dequeue_free(sc)) == NULL)
2111 /* Zero out the MFI frame */
2112 context = cm->cm_frame->header.context;
2113 bzero(cm->cm_frame, sizeof(union mfi_frame));
2114 cm->cm_frame->header.context = context;
2116 io = &cm->cm_frame->io;
2117 io->header.cmd = MFI_CMD_LD_WRITE;
2118 io->header.target_id = id;
2119 io->header.timeout = 0;
2120 io->header.flags = 0;
2121 io->header.scsi_status = 0;
2122 io->header.sense_len = MFI_SENSE_LEN;
2123 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2124 #if defined(__x86_64__)
2125 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2126 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2128 io->sense_addr_lo = cm->cm_sense_busaddr;
2129 io->sense_addr_hi = 0;
2131 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2132 io->lba_lo = lba & 0xffffffff;
2135 cm->cm_sg = &io->sgl;
2136 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2137 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2139 error = mfi_mapcmd(sc, cm);
2140 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2141 BUS_DMASYNC_POSTWRITE);
2142 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2143 mfi_release_command(cm);
2149 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2152 struct mfi_command *cm;
2153 struct mfi_pass_frame *pass;
2157 if ((cm = mfi_dequeue_free(sc)) == NULL)
2160 pass = &cm->cm_frame->pass;
2161 bzero(pass->cdb, 16);
2162 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2163 pass->cdb[0] = WRITE_10;
2164 pass->cdb[2] = (lba & 0xff000000) >> 24;
2165 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2166 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2167 pass->cdb[5] = (lba & 0x000000ff);
2168 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2169 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2170 pass->cdb[8] = (blkcount & 0x00ff);
2171 pass->header.target_id = id;
2172 pass->header.timeout = 0;
2173 pass->header.flags = 0;
2174 pass->header.scsi_status = 0;
2175 pass->header.sense_len = MFI_SENSE_LEN;
2176 pass->header.data_len = len;
2177 pass->header.cdb_len = 10;
2178 #if defined(__x86_64__)
2179 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2180 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2182 pass->sense_addr_lo = cm->cm_sense_busaddr;
2183 pass->sense_addr_hi = 0;
2187 cm->cm_sg = &pass->sgl;
2188 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2189 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2191 error = mfi_mapcmd(sc, cm);
2192 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2193 BUS_DMASYNC_POSTWRITE);
2194 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2195 mfi_release_command(cm);
2201 mfi_open(struct dev_open_args *ap)
2203 cdev_t dev = ap->a_head.a_dev;
2204 struct mfi_softc *sc;
2209 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2210 if (sc->mfi_detaching)
2213 sc->mfi_flags |= MFI_FLAGS_OPEN;
2216 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2222 mfi_close(struct dev_close_args *ap)
2224 cdev_t dev = ap->a_head.a_dev;
2225 struct mfi_softc *sc;
2226 struct mfi_aen *mfi_aen_entry, *tmp;
2230 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2231 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2233 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2234 if (mfi_aen_entry->p == curproc) {
2235 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2237 kfree(mfi_aen_entry, M_MFIBUF);
2240 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2245 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2249 case MFI_DCMD_LD_DELETE:
2250 case MFI_DCMD_CFG_ADD:
2251 case MFI_DCMD_CFG_CLEAR:
2252 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2260 mfi_config_unlock(struct mfi_softc *sc, int locked)
2264 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2267 /* Perform pre-issue checks on commands from userland and possibly veto them. */
2269 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2271 struct mfi_disk *ld, *ld2;
2273 struct mfi_system_pd *syspd = NULL;
2277 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2279 switch (cm->cm_frame->dcmd.opcode) {
2280 case MFI_DCMD_LD_DELETE:
2281 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2282 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2288 error = mfi_disk_disable(ld);
2290 case MFI_DCMD_CFG_CLEAR:
2291 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2292 error = mfi_disk_disable(ld);
2297 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2300 mfi_disk_enable(ld2);
2304 case MFI_DCMD_PD_STATE_SET:
2305 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2307 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2308 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2309 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2310 if (syspd->pd_id == syspd_id)
2318 error = mfi_syspd_disable(syspd);
2326 /* Perform post-issue checks on commands from userland. */
2328 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2330 struct mfi_disk *ld, *ldn;
2331 struct mfi_system_pd *syspd = NULL;
2335 switch (cm->cm_frame->dcmd.opcode) {
2336 case MFI_DCMD_LD_DELETE:
2337 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2338 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2341 KASSERT(ld != NULL, ("volume dissappeared"));
2342 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2343 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2345 device_delete_child(sc->mfi_dev, ld->ld_dev);
2347 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2349 mfi_disk_enable(ld);
2351 case MFI_DCMD_CFG_CLEAR:
2352 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2353 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2355 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2356 device_delete_child(sc->mfi_dev, ld->ld_dev);
2359 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2361 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2362 mfi_disk_enable(ld);
2365 case MFI_DCMD_CFG_ADD:
2368 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2371 case MFI_DCMD_PD_STATE_SET:
2372 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2374 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2375 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2376 TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
2377 if (syspd->pd_id == syspd_id)
2384 /* If the transition fails then enable the syspd again */
2385 if(syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2386 mfi_syspd_enable(syspd);
2392 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2394 struct mfi_command *cm;
2395 struct mfi_dcmd_frame *dcmd;
2396 void *ioc_buf = NULL;
2398 int error = 0, locked;
2401 if (ioc->buf_size > 0) {
2402 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2403 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2405 device_printf(sc->mfi_dev, "failed to copyin\n");
2406 kfree(ioc_buf, M_MFIBUF);
2411 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2413 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2414 while ((cm = mfi_dequeue_free(sc)) == NULL)
2415 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2417 /* Save context for later */
2418 context = cm->cm_frame->header.context;
2420 dcmd = &cm->cm_frame->dcmd;
2421 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2423 cm->cm_sg = &dcmd->sgl;
2424 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2425 cm->cm_data = ioc_buf;
2426 cm->cm_len = ioc->buf_size;
2428 /* restore context */
2429 cm->cm_frame->header.context = context;
2431 /* Cheat since we don't know if we're writing or reading */
2432 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2434 error = mfi_check_command_pre(sc, cm);
2438 error = mfi_wait_command(sc, cm);
2440 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2443 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2444 mfi_check_command_post(sc, cm);
2446 mfi_release_command(cm);
2447 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2448 mfi_config_unlock(sc, locked);
2449 if (ioc->buf_size > 0)
2450 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2452 kfree(ioc_buf, M_MFIBUF);
2457 #define PTRIN(p) ((void *)(uintptr_t)(p))
2459 #define PTRIN(p) (p)
2463 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2465 struct mfi_config_data *conf_data = cm->cm_data;
2466 struct mfi_command *ld_cm = NULL;
2467 struct mfi_ld_info *ld_info = NULL;
2470 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2471 (conf_data->ld[0].params.isSSCD == 1)) {
2473 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2474 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2475 (void **)&ld_info, sizeof(*ld_info));
2477 device_printf(sc->mfi_dev,"Failed to allocate "
2478 "MFI_DCMD_LD_GET_INFO %d", error);
2480 kfree(ld_info, M_MFIBUF);
2483 ld_cm->cm_flags = MFI_CMD_DATAIN;
2484 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2485 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2486 if (mfi_wait_command(sc, ld_cm) != 0) {
2487 device_printf(sc->mfi_dev, "failed to get log drv\n");
2488 mfi_release_command(ld_cm);
2489 kfree(ld_info, M_MFIBUF);
2493 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2494 kfree(ld_info, M_MFIBUF);
2495 mfi_release_command(ld_cm);
2498 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2501 if (ld_info->ld_config.params.isSSCD == 1)
2504 mfi_release_command(ld_cm);
2505 kfree(ld_info, M_MFIBUF);
2511 mfi_ioctl(struct dev_ioctl_args *ap)
2513 cdev_t dev = ap->a_head.a_dev;
2514 u_long cmd = ap->a_cmd;
2515 int flag = ap->a_fflag;
2516 caddr_t arg = ap->a_data;
2517 struct mfi_softc *sc;
2518 union mfi_statrequest *ms;
2519 struct mfi_ioc_packet *ioc;
2521 struct mfi_ioc_packet32 *ioc32;
2523 struct mfi_ioc_aen *aen;
2524 struct mfi_command *cm = NULL;
2526 union mfi_sense_ptr sense_ptr;
2527 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2530 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2532 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2533 struct mfi_ioc_passthru iop_swab;
2542 ms = (union mfi_statrequest *)arg;
2543 switch (ms->ms_item) {
2548 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2549 sizeof(struct mfi_qstat));
2556 case MFIIO_QUERY_DISK:
2558 struct mfi_query_disk *qd;
2559 struct mfi_disk *ld;
2561 qd = (struct mfi_query_disk *)arg;
2562 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2563 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2564 if (ld->ld_id == qd->array_id)
2569 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2573 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2575 bzero(qd->devname, SPECNAMELEN + 1);
2576 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2577 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2585 devclass_t devclass;
2586 ioc = (struct mfi_ioc_packet *)arg;
2589 adapter = ioc->mfi_adapter_no;
2590 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2591 devclass = devclass_find("mfi");
2592 sc = devclass_get_softc(devclass, adapter);
2594 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2595 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2596 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2599 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2603 * save off original context since copying from user
2604 * will clobber some data
2606 context = cm->cm_frame->header.context;
2608 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2609 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2610 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2611 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2612 cm->cm_frame->header.scsi_status = 0;
2613 cm->cm_frame->header.pad0 = 0;
2614 if (ioc->mfi_sge_count) {
2616 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2619 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2620 cm->cm_flags |= MFI_CMD_DATAIN;
2621 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2622 cm->cm_flags |= MFI_CMD_DATAOUT;
2623 /* Legacy app shim */
2624 if (cm->cm_flags == 0)
2625 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2626 cm->cm_len = cm->cm_frame->header.data_len;
2627 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2629 if (cmd == MFI_CMD) {
2632 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
2635 /* 32bit on 64bit */
2636 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2637 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
2640 cm->cm_len += cm->cm_stp_len;
2643 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2644 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2650 /* restore header context */
2651 cm->cm_frame->header.context = context;
2654 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
2655 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
2656 for (i = 0; i < ioc->mfi_sge_count; i++) {
2658 if (cmd == MFI_CMD) {
2661 addr = ioc->mfi_sgl[i].iov_base;
2662 len = ioc->mfi_sgl[i].iov_len;
2665 /* 32bit on 64bit */
2666 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2667 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
2668 len = ioc32->mfi_sgl[i].iov_len;
2671 error = copyin(addr, temp, len);
2673 device_printf(sc->mfi_dev,
2674 "Copy in failed\n");
2681 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2682 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2684 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2685 #if defined(__x86_64__)
2686 cm->cm_frame->pass.sense_addr_lo =
2687 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2688 cm->cm_frame->pass.sense_addr_hi =
2689 (cm->cm_sense_busaddr& 0xFFFFFFFF00000000) >> 32;
2691 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2692 cm->cm_frame->pass.sense_addr_hi = 0;
2696 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2697 skip_pre_post = mfi_check_for_sscd(sc, cm);
2698 if (!skip_pre_post) {
2699 error = mfi_check_command_pre(sc, cm);
2701 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2706 if ((error = mfi_wait_command(sc, cm)) != 0) {
2707 device_printf(sc->mfi_dev,
2708 "Controller polled failed\n");
2709 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2714 mfi_check_command_post(sc, cm);
2715 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2718 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2719 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
2720 for (i = 0; i < ioc->mfi_sge_count; i++) {
2722 if (cmd == MFI_CMD) {
2725 addr = ioc->mfi_sgl[i].iov_base;
2726 len = ioc->mfi_sgl[i].iov_len;
2729 /* 32bit on 64bit */
2730 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2731 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
2732 len = ioc32->mfi_sgl[i].iov_len;
2735 error = copyout(temp, addr, len);
2737 device_printf(sc->mfi_dev,
2738 "Copy out failed\n");
2745 if (ioc->mfi_sense_len) {
2746 /* get user-space sense ptr then copy out sense */
2747 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
2748 &sense_ptr.sense_ptr_data[0],
2749 sizeof(sense_ptr.sense_ptr_data));
2751 if (cmd != MFI_CMD) {
2753 * not 64bit native so zero out any address
2755 sense_ptr.addr.high = 0;
2758 error = copyout(cm->cm_sense, sense_ptr.user_space,
2759 ioc->mfi_sense_len);
2761 device_printf(sc->mfi_dev,
2762 "Copy out failed\n");
2767 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2769 mfi_config_unlock(sc, locked);
2771 kfree(data, M_MFIBUF);
2773 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2774 mfi_release_command(cm);
2775 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2781 aen = (struct mfi_ioc_aen *)arg;
2782 error = mfi_aen_register(sc, aen->aen_seq_num,
2783 aen->aen_class_locale);
2786 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2788 devclass_t devclass;
2789 struct mfi_linux_ioc_packet l_ioc;
2792 devclass = devclass_find("mfi");
2793 if (devclass == NULL)
2796 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2799 adapter = l_ioc.lioc_adapter_no;
2800 sc = devclass_get_softc(devclass, adapter);
2803 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2807 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2809 devclass_t devclass;
2810 struct mfi_linux_ioc_aen l_aen;
2813 devclass = devclass_find("mfi");
2814 if (devclass == NULL)
2817 error = copyin(arg, &l_aen, sizeof(l_aen));
2820 adapter = l_aen.laen_adapter_no;
2821 sc = devclass_get_softc(devclass, adapter);
2824 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2829 case MFIIO_PASSTHRU32:
2830 iop_swab.ioc_frame = iop32->ioc_frame;
2831 iop_swab.buf_size = iop32->buf_size;
2832 iop_swab.buf = PTRIN(iop32->buf);
2836 case MFIIO_PASSTHRU:
2837 error = mfi_user_command(sc, iop);
2839 if (cmd == MFIIO_PASSTHRU32)
2840 iop32->ioc_frame = iop_swab.ioc_frame;
2844 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2853 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
2855 struct mfi_softc *sc;
2856 struct mfi_linux_ioc_packet l_ioc;
2857 struct mfi_linux_ioc_aen l_aen;
2858 struct mfi_command *cm = NULL;
2859 struct mfi_aen *mfi_aen_entry;
2860 union mfi_sense_ptr sense_ptr;
2862 uint8_t *data = NULL, *temp;
2869 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2870 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2874 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2878 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2879 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2880 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2883 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2887 * save off original context since copying from user
2888 * will clobber some data
2890 context = cm->cm_frame->header.context;
2892 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2893 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2894 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2895 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2896 cm->cm_frame->header.scsi_status = 0;
2897 cm->cm_frame->header.pad0 = 0;
2898 if (l_ioc.lioc_sge_count)
2900 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2902 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2903 cm->cm_flags |= MFI_CMD_DATAIN;
2904 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2905 cm->cm_flags |= MFI_CMD_DATAOUT;
2906 cm->cm_len = cm->cm_frame->header.data_len;
2908 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2909 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2915 /* restore header context */
2916 cm->cm_frame->header.context = context;
2919 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2920 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2921 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2923 l_ioc.lioc_sgl[i].iov_len);
2925 device_printf(sc->mfi_dev,
2926 "Copy in failed\n");
2929 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2933 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2934 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2936 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2937 #if defined(__x86_64__)
2938 cm->cm_frame->pass.sense_addr_lo =
2939 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2940 cm->cm_frame->pass.sense_addr_hi =
2941 (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
2943 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2944 cm->cm_frame->pass.sense_addr_hi = 0;
2948 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2949 error = mfi_check_command_pre(sc, cm);
2951 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2955 if ((error = mfi_wait_command(sc, cm)) != 0) {
2956 device_printf(sc->mfi_dev,
2957 "Controller polled failed\n");
2958 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2962 mfi_check_command_post(sc, cm);
2963 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2966 if (cm->cm_flags & MFI_CMD_DATAIN) {
2967 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2968 error = copyout(temp,
2969 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2970 l_ioc.lioc_sgl[i].iov_len);
2972 device_printf(sc->mfi_dev,
2973 "Copy out failed\n");
2976 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2980 if (l_ioc.lioc_sense_len) {
2981 /* get user-space sense ptr then copy out sense */
2982 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2983 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2984 &sense_ptr.sense_ptr_data[0],
2985 sizeof(sense_ptr.sense_ptr_data));
2988 * only 32bit Linux support so zero out any
2989 * address over 32bit
2991 sense_ptr.addr.high = 0;
2993 error = copyout(cm->cm_sense, sense_ptr.user_space,
2994 l_ioc.lioc_sense_len);
2996 device_printf(sc->mfi_dev,
2997 "Copy out failed\n");
3002 error = copyout(&cm->cm_frame->header.cmd_status,
3003 &((struct mfi_linux_ioc_packet*)arg)
3004 ->lioc_frame.hdr.cmd_status,
3007 device_printf(sc->mfi_dev,
3008 "Copy out failed\n");
3013 mfi_config_unlock(sc, locked);
3015 kfree(data, M_MFIBUF);
3017 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3018 mfi_release_command(cm);
3019 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3023 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3024 error = copyin(arg, &l_aen, sizeof(l_aen));
3027 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3028 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3030 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3031 if (mfi_aen_entry != NULL) {
3032 mfi_aen_entry->p = curproc;
3033 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3036 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3037 l_aen.laen_class_locale);
3040 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3042 kfree(mfi_aen_entry, M_MFIBUF);
3044 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3048 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3057 mfi_kqfilter(struct dev_kqfilter_args *ap)
3059 cdev_t dev = ap->a_head.a_dev;
3060 struct knote *kn = ap->a_kn;
3061 struct mfi_softc *sc;
3062 struct klist *klist;
3067 switch (kn->kn_filter) {
3069 kn->kn_fop = &mfi_read_filterops;
3070 kn->kn_hook = (caddr_t)sc;
3073 kn->kn_fop = &mfi_write_filterops;
3074 kn->kn_hook = (caddr_t)sc;
3077 ap->a_result = EOPNOTSUPP;
3081 klist = &sc->mfi_kq.ki_note;
3082 knote_insert(klist, kn);
3088 mfi_filter_detach(struct knote *kn)
3090 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3091 struct klist *klist = &sc->mfi_kq.ki_note;
3093 knote_remove(klist, kn);
3097 mfi_filter_read(struct knote *kn, long hint)
3099 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3102 if (sc->mfi_aen_triggered != 0) {
3104 sc->mfi_aen_triggered = 0;
3106 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3107 kn->kn_flags |= EV_ERROR;
3110 sc->mfi_poll_waiting = 1;
3116 mfi_filter_write(struct knote *kn, long hint)
3124 struct mfi_softc *sc;
3125 struct mfi_command *cm;
3131 dc = devclass_find("mfi");
3133 kprintf("No mfi dev class\n");
3137 for (i = 0; ; i++) {
3138 sc = devclass_get_softc(dc, i);
3141 device_printf(sc->mfi_dev, "Dumping\n\n");
3143 deadline = time_second - MFI_CMD_TIMEOUT;
3144 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3145 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3146 if (cm->cm_timestamp < deadline) {
3147 device_printf(sc->mfi_dev,
3148 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3149 (int)(time_second - cm->cm_timestamp));
3160 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3167 mfi_timeout(void *data)
3169 struct mfi_softc *sc = (struct mfi_softc *)data;
3170 struct mfi_command *cm;
3174 deadline = time_second - MFI_CMD_TIMEOUT;
3175 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3176 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3177 if (sc->mfi_aen_cm == cm)
3179 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3180 device_printf(sc->mfi_dev,
3181 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3182 (int)(time_second - cm->cm_timestamp));
3184 MFI_VALIDATE_CMD(sc, cm);
3194 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3196 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,