mfi(4): Merge LSI's latest driver changes (updates us to version 3.981).
[dragonfly.git] / sys / dev / raid / mfi / mfi.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52/*-
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
55 * are met:
56 *
57 * Copyright 1994-2009 The FreeBSD Project.
58 * All rights reserved.
59 *
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 *
66 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
68 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
69 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
70 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
71 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
72 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
73 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
74 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
75 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
76 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
77 *
78 * The views and conclusions contained in the software and documentation
79 * are those of the authors and should not be interpreted as representing
80 * official policies,either expressed or implied, of the FreeBSD Project.
81 *
82 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.57 2011/07/14 20:20:33 jhb Exp $
83 */
84
85#include "opt_mfi.h"
86
87#include <sys/param.h>
88#include <sys/systm.h>
89#include <sys/sysctl.h>
90#include <sys/malloc.h>
91#include <sys/kernel.h>
92#include <sys/bus.h>
93#include <sys/eventhandler.h>
94#include <sys/rman.h>
95#include <sys/bus_dma.h>
96#include <sys/buf2.h>
97#include <sys/ioccom.h>
98#include <sys/uio.h>
99#include <sys/proc.h>
100#include <sys/signalvar.h>
101#include <sys/device.h>
102#include <sys/mplock2.h>
103
104#include <bus/cam/scsi/scsi_all.h>
105
106#include <dev/raid/mfi/mfireg.h>
107#include <dev/raid/mfi/mfi_ioctl.h>
108#include <dev/raid/mfi/mfivar.h>
109
110static int mfi_alloc_commands(struct mfi_softc *);
111static int mfi_comms_init(struct mfi_softc *);
112static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
113static int mfi_get_controller_info(struct mfi_softc *);
114static int mfi_get_log_state(struct mfi_softc *,
115 struct mfi_evt_log_state **);
116static int mfi_parse_entries(struct mfi_softc *, int, int);
117static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
118 uint32_t, void **, size_t);
119static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
120static void mfi_startup(void *arg);
121static void mfi_intr(void *arg);
122static void mfi_ldprobe(struct mfi_softc *sc);
123static void mfi_syspdprobe(struct mfi_softc *sc);
124static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
125static void mfi_aen_complete(struct mfi_command *);
126static int mfi_aen_setup(struct mfi_softc *, uint32_t);
127static int mfi_add_ld(struct mfi_softc *sc, int);
128static void mfi_add_ld_complete(struct mfi_command *);
129static int mfi_add_sys_pd(struct mfi_softc *sc, int);
130static void mfi_add_sys_pd_complete(struct mfi_command *);
131static struct mfi_command * mfi_bio_command(struct mfi_softc *);
132static void mfi_bio_complete(struct mfi_command *);
133static struct mfi_command * mfi_build_ldio(struct mfi_softc *,struct bio*);
134static struct mfi_command * mfi_build_syspdio(struct mfi_softc *,struct bio*);
135static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
136static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
137static void mfi_complete(struct mfi_softc *, struct mfi_command *);
138static int mfi_abort(struct mfi_softc *, struct mfi_command *);
139static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
140static void mfi_timeout(void *);
141static int mfi_user_command(struct mfi_softc *,
142 struct mfi_ioc_passthru *);
143static void mfi_enable_intr_xscale(struct mfi_softc *sc);
144static void mfi_enable_intr_ppc(struct mfi_softc *sc);
145static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
146static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
147static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
148static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
149static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
150static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
151static void mfi_filter_detach(struct knote *);
152static int mfi_filter_read(struct knote *, long);
153static int mfi_filter_write(struct knote *, long);
154
155SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
156static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
157TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
158SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
159 0, "event message locale");
160
161static int mfi_event_class = MFI_EVT_CLASS_INFO;
162TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
163SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
164 0, "event message class");
165
166static int mfi_max_cmds = 128;
167TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
168SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
169 0, "Max commands");
170
171/* Management interface */
172static d_open_t mfi_open;
173static d_close_t mfi_close;
174static d_ioctl_t mfi_ioctl;
175static d_kqfilter_t mfi_kqfilter;
176
177static struct dev_ops mfi_ops = {
178 { "mfi", 0, 0 },
179 .d_open = mfi_open,
180 .d_close = mfi_close,
181 .d_ioctl = mfi_ioctl,
182 .d_kqfilter = mfi_kqfilter,
183};
184
185static struct filterops mfi_read_filterops =
186 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
187static struct filterops mfi_write_filterops =
188 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
189
190MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
191
192#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
193
194static void
195mfi_enable_intr_xscale(struct mfi_softc *sc)
196{
197 MFI_WRITE4(sc, MFI_OMSK, 0x01);
198}
199
200static void
201mfi_enable_intr_ppc(struct mfi_softc *sc)
202{
203 if (sc->mfi_flags & MFI_FLAGS_1078) {
204 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
205 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
206 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
207 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
208 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
209 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
210 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
211 }
212}
213
214static int32_t
215mfi_read_fw_status_xscale(struct mfi_softc *sc)
216{
217 return MFI_READ4(sc, MFI_OMSG0);
218}
219
220static int32_t
221mfi_read_fw_status_ppc(struct mfi_softc *sc)
222{
223 return MFI_READ4(sc, MFI_OSP0);
224}
225
226static int
227mfi_check_clear_intr_xscale(struct mfi_softc *sc)
228{
229 int32_t status;
230
231 status = MFI_READ4(sc, MFI_OSTS);
232 if ((status & MFI_OSTS_INTR_VALID) == 0)
233 return 1;
234
235 MFI_WRITE4(sc, MFI_OSTS, status);
236 return 0;
237}
238
239static int
240mfi_check_clear_intr_ppc(struct mfi_softc *sc)
241{
242 int32_t status;
243
244 status = MFI_READ4(sc, MFI_OSTS);
245 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
246 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
247 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
248 return 1;
249
250 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
251 MFI_WRITE4(sc, MFI_OSTS, status);
252 else
253 MFI_WRITE4(sc, MFI_ODCR0, status);
254 return 0;
255}
256
257static void
258mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
259{
260 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
261}
262
263static void
264mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
265{
266 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
267 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
268 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
269 } else {
270 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
271 }
272}
273
274static int
275mfi_transition_firmware(struct mfi_softc *sc)
276{
277 uint32_t fw_state, cur_state;
278 int max_wait, i;
279 uint32_t cur_abs_reg_val = 0;
280 uint32_t prev_abs_reg_val = 0;
281 bus_space_handle_t idb;
282
283 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
284 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
285 idb = sc->mfi_flags & MFI_FLAGS_SKINNY ? MFI_SKINNY_IDB : MFI_IDB;
286 while (fw_state != MFI_FWSTATE_READY) {
287 if (bootverbose)
288 device_printf(sc->mfi_dev, "Waiting for firmware to "
289 "become ready\n");
290 cur_state = fw_state;
291 switch (fw_state) {
292 case MFI_FWSTATE_FAULT:
293 device_printf(sc->mfi_dev, "Firmware fault\n");
294 return (ENXIO);
295 case MFI_FWSTATE_WAIT_HANDSHAKE:
296 MFI_WRITE4(sc, idb, MFI_FWINIT_CLEAR_HANDSHAKE);
297 max_wait = 2;
298 break;
299 case MFI_FWSTATE_OPERATIONAL:
300 MFI_WRITE4(sc, idb, MFI_FWINIT_READY);
301 max_wait = 10;
302 break;
303 case MFI_FWSTATE_UNDEFINED:
304 case MFI_FWSTATE_BB_INIT:
305 max_wait = 2;
306 break;
307 case MFI_FWSTATE_FW_INIT:
308 case MFI_FWSTATE_FLUSH_CACHE:
309 max_wait = 20;
310 break;
311 case MFI_FWSTATE_DEVICE_SCAN:
312 max_wait = 180; /* wait for 180 seconds */
313 prev_abs_reg_val = cur_abs_reg_val;
314 break;
315 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
316 MFI_WRITE4(sc, idb, MFI_FWINIT_HOTPLUG);
317 max_wait = 10;
318 break;
319 default:
320 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
321 fw_state);
322 return (ENXIO);
323 }
324 for (i = 0; i < (max_wait * 10); i++) {
325 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
326 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
327 if (fw_state == cur_state)
328 DELAY(100000);
329 else
330 break;
331 }
332 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
333 /* Check the device scanning progress */
334 if (prev_abs_reg_val != cur_abs_reg_val)
335 continue;
336 }
337 if (fw_state == cur_state) {
338 device_printf(sc->mfi_dev, "Firmware stuck in state "
339 "%#x\n", fw_state);
340 return (ENXIO);
341 }
342 }
343 return (0);
344}
345
346#if defined(__x86_64__)
347static void
348mfi_addr64_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
349{
350 uint64_t *addr;
351
352 addr = arg;
353 *addr = segs[0].ds_addr;
354}
355#else
356static void
357mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358{
359 uint32_t *addr;
360
361 addr = arg;
362 *addr = segs[0].ds_addr;
363}
364#endif
365
366int
367mfi_attach(struct mfi_softc *sc)
368{
369 uint32_t status;
370 int error, commsz, framessz, sensesz;
371 int frames, unit, max_fw_sge;
372
373 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.981\n");
374
375 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
376 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
377 TAILQ_INIT(&sc->mfi_ld_tqh);
378 TAILQ_INIT(&sc->mfi_syspd_tqh);
379 TAILQ_INIT(&sc->mfi_aen_pids);
380 TAILQ_INIT(&sc->mfi_cam_ccbq);
381
382 mfi_initq_free(sc);
383 mfi_initq_ready(sc);
384 mfi_initq_busy(sc);
385 mfi_initq_bio(sc);
386
387 if (sc->mfi_flags & MFI_FLAGS_1064R) {
388 sc->mfi_enable_intr = mfi_enable_intr_xscale;
389 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
390 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
391 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
392 }
393 else {
394 sc->mfi_enable_intr = mfi_enable_intr_ppc;
395 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
396 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
397 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
398 }
399
400
401 /* Before we get too far, see if the firmware is working */
402 if ((error = mfi_transition_firmware(sc)) != 0) {
403 device_printf(sc->mfi_dev, "Firmware not in READY state, "
404 "error %d\n", error);
405 return (ENXIO);
406 }
407
408 /*
409 * Get information needed for sizing the contiguous memory for the
410 * frame pool. Size down the sgl parameter since we know that
411 * we will never need more than what's required for MAXPHYS.
412 * It would be nice if these constants were available at runtime
413 * instead of compile time.
414 */
415 status = sc->mfi_read_fw_status(sc);
416 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
417 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
418 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
419
420 /*
421 * Create the dma tag for data buffers. Used both for block I/O
422 * and for various internal data queries.
423 */
424 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
425 1, 0, /* algnmnt, boundary */
426 BUS_SPACE_MAXADDR, /* lowaddr */
427 BUS_SPACE_MAXADDR, /* highaddr */
428 NULL, NULL, /* filter, filterarg */
429 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
430 sc->mfi_max_sge, /* nsegments */
431 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
432 BUS_DMA_ALLOCNOW, /* flags */
433 &sc->mfi_buffer_dmat)) {
434 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
435 return (ENOMEM);
436 }
437
438 /*
439 * Allocate DMA memory for the comms queues. Keep it under 4GB for
440 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
441 * entry, so the calculated size here will be will be 1 more than
442 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
443 */
444 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
445 sizeof(struct mfi_hwcomms);
446 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
447 1, 0, /* algnmnt, boundary */
448 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
449 BUS_SPACE_MAXADDR, /* highaddr */
450 NULL, NULL, /* filter, filterarg */
451 commsz, /* maxsize */
452 1, /* msegments */
453 commsz, /* maxsegsize */
454 0, /* flags */
455 &sc->mfi_comms_dmat)) {
456 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
457 return (ENOMEM);
458 }
459 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
460 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
461 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
462 return (ENOMEM);
463 }
464 bzero(sc->mfi_comms, commsz);
465#if defined(__x86_64__)
466 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
467 sc->mfi_comms, commsz, mfi_addr64_cb, &sc->mfi_comms_busaddr, 0);
468#else
469 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
470 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
471#endif
472
473 /*
474 * Allocate DMA memory for the command frames. Keep them in the
475 * lower 4GB for efficiency. Calculate the size of the commands at
476 * the same time; each command is one 64 byte frame plus a set of
477 * additional frames for holding sg lists or other data.
478 * The assumption here is that the SG list will start at the second
479 * frame and not use the unused bytes in the first frame. While this
480 * isn't technically correct, it simplifies the calculation and allows
481 * for command frames that might be larger than an mfi_io_frame.
482 */
483 if (sizeof(bus_addr_t) == 8) {
484 sc->mfi_sge_size = sizeof(struct mfi_sg64);
485 sc->mfi_flags |= MFI_FLAGS_SG64;
486 } else {
487 sc->mfi_sge_size = sizeof(struct mfi_sg32);
488 }
489 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
490 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
491 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
492 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
493 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
494 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
495 64, 0, /* algnmnt, boundary */
496 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
497 BUS_SPACE_MAXADDR, /* highaddr */
498 NULL, NULL, /* filter, filterarg */
499 framessz, /* maxsize */
500 1, /* nsegments */
501 framessz, /* maxsegsize */
502 0, /* flags */
503 &sc->mfi_frames_dmat)) {
504 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
505 return (ENOMEM);
506 }
507 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
508 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
509 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
510 return (ENOMEM);
511 }
512 bzero(sc->mfi_frames, framessz);
513#if defined(__x86_64__)
514 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
515 sc->mfi_frames, framessz, mfi_addr64_cb, &sc->mfi_frames_busaddr,0);
516#else
517 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
518 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
519#endif
520
521 /*
522 * Allocate DMA memory for the frame sense data. Keep them in the
523 * lower 4GB for efficiency
524 */
525 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
526 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
527 4, 0, /* algnmnt, boundary */
528 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
529 BUS_SPACE_MAXADDR, /* highaddr */
530 NULL, NULL, /* filter, filterarg */
531 sensesz, /* maxsize */
532 1, /* nsegments */
533 sensesz, /* maxsegsize */
534 0, /* flags */
535 &sc->mfi_sense_dmat)) {
536 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
537 return (ENOMEM);
538 }
539 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
540 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
541 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
542 return (ENOMEM);
543 }
544#if defined(__x86_64__)
545 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
546 sc->mfi_sense, sensesz, mfi_addr64_cb, &sc->mfi_sense_busaddr, 0);
547#else
548 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
549 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
550#endif
551
552 if ((error = mfi_alloc_commands(sc)) != 0)
553 return (error);
554
555 if ((error = mfi_comms_init(sc)) != 0)
556 return (error);
557
558 if ((error = mfi_get_controller_info(sc)) != 0)
559 return (error);
560
561 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
562 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
563 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
564 return (error);
565 }
566 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
567
568 /*
569 * Set up the interrupt handler. XXX This should happen in
570 * mfi_pci.c
571 */
572 sc->mfi_irq_rid = 0;
573 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
574 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
575 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
576 return (EINVAL);
577 }
578 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
579 mfi_intr, sc, &sc->mfi_intr, NULL)) {
580 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
581 return (EINVAL);
582 }
583
584 /* Register a config hook to probe the bus for arrays */
585 sc->mfi_ich.ich_func = mfi_startup;
586 sc->mfi_ich.ich_arg = sc;
587 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
588 device_printf(sc->mfi_dev, "Cannot establish configuration "
589 "hook\n");
590 return (EINVAL);
591 }
592
593 /*
594 * Register a shutdown handler.
595 */
596 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
597 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
598 device_printf(sc->mfi_dev, "Warning: shutdown event "
599 "registration failed\n");
600 }
601
602 /*
603 * Create the control device for doing management
604 */
605 unit = device_get_unit(sc->mfi_dev);
606 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
607 0640, "mfi%d", unit);
608 if (unit == 0)
609 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
610 if (sc->mfi_cdev != NULL)
611 sc->mfi_cdev->si_drv1 = sc;
612 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
613 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
614 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
615 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
616 if (sc->mfi_sysctl_tree == NULL) {
617 device_printf(sc->mfi_dev, "can't add sysctl node\n");
618 return (EINVAL);
619 }
620 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
621 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
622 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
623 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
624 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
625 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
626 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
627 &sc->mfi_keep_deleted_volumes, 0,
628 "Don't detach the mfid device for a busy volume that is deleted");
629
630 device_add_child(sc->mfi_dev, "mfip", -1);
631 bus_generic_attach(sc->mfi_dev);
632
633 /* Start the timeout watchdog */
634 callout_init(&sc->mfi_watchdog_callout);
635 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
636 mfi_timeout, sc);
637
638 return (0);
639}
640
641static int
642mfi_alloc_commands(struct mfi_softc *sc)
643{
644 struct mfi_command *cm;
645 int i, ncmds;
646
647 /*
648 * XXX Should we allocate all the commands up front, or allocate on
649 * demand later like 'aac' does?
650 */
651 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
652 if (bootverbose)
653 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
654 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
655
656 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
657 M_WAITOK | M_ZERO);
658
659 for (i = 0; i < ncmds; i++) {
660 cm = &sc->mfi_commands[i];
661 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
662 sc->mfi_cmd_size * i);
663 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
664 sc->mfi_cmd_size * i;
665 cm->cm_frame->header.context = i;
666 cm->cm_sense = &sc->mfi_sense[i];
667 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
668 cm->cm_sc = sc;
669 cm->cm_index = i;
670 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
671 &cm->cm_dmamap) == 0)
672 mfi_release_command(cm);
673 else
674 break;
675 sc->mfi_total_cmds++;
676 }
677
678 return (0);
679}
680
681void
682mfi_release_command(struct mfi_command *cm)
683{
684 struct mfi_frame_header *hdr;
685 uint32_t *hdr_data;
686
687 /*
688 * Zero out the important fields of the frame, but make sure the
689 * context field is preserved. For efficiency, handle the fields
690 * as 32 bit words. Clear out the first S/G entry too for safety.
691 */
692 hdr = &cm->cm_frame->header;
693 if (cm->cm_data != NULL && hdr->sg_count) {
694 cm->cm_sg->sg32[0].len = 0;
695 cm->cm_sg->sg32[0].addr = 0;
696 }
697
698 hdr_data = (uint32_t *)cm->cm_frame;
699 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
700 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
701 hdr_data[4] = 0; /* flags, timeout */
702 hdr_data[5] = 0; /* data_len */
703
704 cm->cm_extra_frames = 0;
705 cm->cm_flags = 0;
706 cm->cm_complete = NULL;
707 cm->cm_private = NULL;
708 cm->cm_data = NULL;
709 cm->cm_sg = 0;
710 cm->cm_total_frame_size = 0;
711
712 mfi_enqueue_free(cm);
713}
714
715static int
716mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
717 void **bufp, size_t bufsize)
718{
719 struct mfi_command *cm;
720 struct mfi_dcmd_frame *dcmd;
721 void *buf = NULL;
722 uint32_t context = 0;
723
724 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
725
726 cm = mfi_dequeue_free(sc);
727 if (cm == NULL)
728 return (EBUSY);
729
730 /* Zero out the MFI frame */
731 context = cm->cm_frame->header.context;
732 bzero(cm->cm_frame, sizeof(union mfi_frame));
733 cm->cm_frame->header.context = context;
734
735 if ((bufsize > 0) && (bufp != NULL)) {
736 if (*bufp == NULL) {
737 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
738 if (buf == NULL) {
739 mfi_release_command(cm);
740 return (ENOMEM);
741 }
742 *bufp = buf;
743 } else {
744 buf = *bufp;
745 }
746 }
747
748 dcmd = &cm->cm_frame->dcmd;
749 bzero(dcmd->mbox, MFI_MBOX_SIZE);
750 dcmd->header.cmd = MFI_CMD_DCMD;
751 dcmd->header.timeout = 0;
752 dcmd->header.flags = 0;
753 dcmd->header.data_len = bufsize;
754 dcmd->header.scsi_status = 0;
755 dcmd->opcode = opcode;
756 cm->cm_sg = &dcmd->sgl;
757 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
758 cm->cm_flags = 0;
759 cm->cm_data = buf;
760 cm->cm_private = buf;
761 cm->cm_len = bufsize;
762
763 *cmp = cm;
764 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
765 *bufp = buf;
766 return (0);
767}
768
769static int
770mfi_comms_init(struct mfi_softc *sc)
771{
772 struct mfi_command *cm;
773 struct mfi_init_frame *init;
774 struct mfi_init_qinfo *qinfo;
775 int error;
776 uint32_t context = 0;
777
778 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
779 if ((cm = mfi_dequeue_free(sc)) == NULL)
780 return (EBUSY);
781
782 /* Zero out the MFI frame */
783 context = cm->cm_frame->header.context;
784 bzero(cm->cm_frame, sizeof(union mfi_frame));
785 cm->cm_frame->header.context = context;
786
787 /*
788 * Abuse the SG list area of the frame to hold the init_qinfo
789 * object;
790 */
791 init = &cm->cm_frame->init;
792 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
793
794 bzero(qinfo, sizeof(struct mfi_init_qinfo));
795 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
796 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
797 offsetof(struct mfi_hwcomms, hw_reply_q);
798 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
799 offsetof(struct mfi_hwcomms, hw_pi);
800 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
801 offsetof(struct mfi_hwcomms, hw_ci);
802
803 init->header.cmd = MFI_CMD_INIT;
804 init->header.data_len = sizeof(struct mfi_init_qinfo);
805 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
806 cm->cm_data = NULL;
807 cm->cm_flags = MFI_CMD_POLLED;
808
809 if ((error = mfi_mapcmd(sc, cm)) != 0) {
810 device_printf(sc->mfi_dev, "failed to send init command\n");
811 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
812 return (error);
813 }
814 mfi_release_command(cm);
815 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
816
817 return (0);
818}
819
820static int
821mfi_get_controller_info(struct mfi_softc *sc)
822{
823 struct mfi_command *cm = NULL;
824 struct mfi_ctrl_info *ci = NULL;
825 uint32_t max_sectors_1, max_sectors_2;
826 int error;
827
828 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
829 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
830 (void **)&ci, sizeof(*ci));
831 if (error)
832 goto out;
833 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
834
835 if ((error = mfi_mapcmd(sc, cm)) != 0) {
836 device_printf(sc->mfi_dev, "Failed to get controller info\n");
837 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
838 MFI_SECTOR_LEN;
839 error = 0;
840 goto out;
841 }
842
843 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
844 BUS_DMASYNC_POSTREAD);
845 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
846
847 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
848 max_sectors_2 = ci->max_request_size;
849 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
850
851out:
852 if (ci)
853 kfree(ci, M_MFIBUF);
854 if (cm)
855 mfi_release_command(cm);
856 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
857 return (error);
858}
859
860static int
861mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
862{
863 struct mfi_command *cm = NULL;
864 int error;
865
866 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
867 (void **)log_state, sizeof(**log_state));
868 if (error)
869 goto out;
870 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
871
872 if ((error = mfi_mapcmd(sc, cm)) != 0) {
873 device_printf(sc->mfi_dev, "Failed to get log state\n");
874 goto out;
875 }
876
877 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
878 BUS_DMASYNC_POSTREAD);
879 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
880
881out:
882 if (cm)
883 mfi_release_command(cm);
884
885 return (error);
886}
887
888static int
889mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
890{
891 struct mfi_evt_log_state *log_state = NULL;
892 union mfi_evt class_locale;
893 int error = 0;
894 uint32_t seq;
895
896 class_locale.members.reserved = 0;
897 class_locale.members.locale = mfi_event_locale;
898 class_locale.members.evt_class = mfi_event_class;
899
900 if (seq_start == 0) {
901 error = mfi_get_log_state(sc, &log_state);
902 if (error) {
903 if (log_state)
904 kfree(log_state, M_MFIBUF);
905 return (error);
906 }
907
908 /*
909 * Walk through any events that fired since the last
910 * shutdown.
911 */
912 mfi_parse_entries(sc, log_state->shutdown_seq_num,
913 log_state->newest_seq_num);
914 seq = log_state->newest_seq_num;
915 } else
916 seq = seq_start;
917 mfi_aen_register(sc, seq, class_locale.word);
918 if (log_state != NULL)
919 kfree(log_state, M_MFIBUF);
920
921 return 0;
922}
923
924static int
925mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
926{
927
928 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
929 cm->cm_complete = NULL;
930
931
932 /*
933 * MegaCli can issue a DCMD of 0. In this case do nothing
934 * and return 0 to it as status
935 */
936 if (cm->cm_frame->dcmd.opcode == 0) {
937 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
938 cm->cm_error = 0;
939 return (cm->cm_error);
940 }
941 mfi_enqueue_ready(cm);
942 mfi_startio(sc);
943 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
944 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
945 return (cm->cm_error);
946}
947
948void
949mfi_free(struct mfi_softc *sc)
950{
951 struct mfi_command *cm;
952 int i;
953
954 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
955
956 if (sc->mfi_cdev != NULL)
957 destroy_dev(sc->mfi_cdev);
958 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
959
960 if (sc->mfi_total_cmds != 0) {
961 for (i = 0; i < sc->mfi_total_cmds; i++) {
962 cm = &sc->mfi_commands[i];
963 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
964 }
965 kfree(sc->mfi_commands, M_MFIBUF);
966 }
967
968 if (sc->mfi_intr)
969 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
970 if (sc->mfi_irq != NULL)
971 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
972 sc->mfi_irq);
973
974 if (sc->mfi_sense_busaddr != 0)
975 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
976 if (sc->mfi_sense != NULL)
977 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
978 sc->mfi_sense_dmamap);
979 if (sc->mfi_sense_dmat != NULL)
980 bus_dma_tag_destroy(sc->mfi_sense_dmat);
981
982 if (sc->mfi_frames_busaddr != 0)
983 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
984 if (sc->mfi_frames != NULL)
985 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
986 sc->mfi_frames_dmamap);
987 if (sc->mfi_frames_dmat != NULL)
988 bus_dma_tag_destroy(sc->mfi_frames_dmat);
989
990 if (sc->mfi_comms_busaddr != 0)
991 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
992 if (sc->mfi_comms != NULL)
993 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
994 sc->mfi_comms_dmamap);
995 if (sc->mfi_comms_dmat != NULL)
996 bus_dma_tag_destroy(sc->mfi_comms_dmat);
997
998 if (sc->mfi_buffer_dmat != NULL)
999 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1000 if (sc->mfi_parent_dmat != NULL)
1001 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1002
1003 if (sc->mfi_sysctl_tree != NULL)
1004 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1005
1006#if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1007
1008 if (mtx_initialized(&sc->mfi_io_lock)) {
1009 lockuninit(&sc->mfi_io_lock);
1010 sx_destroy(&sc->mfi_config_lock);
1011 }
1012#endif
1013
1014 lockuninit(&sc->mfi_io_lock);
1015 lockuninit(&sc->mfi_config_lock);
1016
1017 return;
1018}
1019
1020static void
1021mfi_startup(void *arg)
1022{
1023 struct mfi_softc *sc;
1024
1025 sc = (struct mfi_softc *)arg;
1026
1027 config_intrhook_disestablish(&sc->mfi_ich);
1028
1029 sc->mfi_enable_intr(sc);
1030 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1031 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1032 mfi_ldprobe(sc);
1033 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1034 mfi_syspdprobe(sc);
1035 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1036 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1037}
1038
1039static void
1040mfi_intr(void *arg)
1041{
1042 struct mfi_softc *sc;
1043 struct mfi_command *cm;
1044 uint32_t pi, ci, context;
1045
1046 sc = (struct mfi_softc *)arg;
1047
1048 if (sc->mfi_check_clear_intr(sc))
1049 return;
1050
1051 pi = sc->mfi_comms->hw_pi;
1052 ci = sc->mfi_comms->hw_ci;
1053 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1054 while (ci != pi) {
1055 context = sc->mfi_comms->hw_reply_q[ci];
1056 if (context < sc->mfi_max_fw_cmds) {
1057 cm = &sc->mfi_commands[context];
1058 mfi_remove_busy(cm);
1059 cm->cm_error = 0;
1060 mfi_complete(sc, cm);
1061 }
1062 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1063 ci = 0;
1064 }
1065 }
1066
1067 sc->mfi_comms->hw_ci = ci;
1068
1069 /* Give defered I/O a chance to run */
1070 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1071 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1072 mfi_startio(sc);
1073 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1074
1075 return;
1076}
1077
1078int
1079mfi_shutdown(struct mfi_softc *sc)
1080{
1081 struct mfi_dcmd_frame *dcmd;
1082 struct mfi_command *cm;
1083 int error;
1084
1085 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1086 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1087 if (error) {
1088 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1089 return (error);
1090 }
1091
1092 if (sc->mfi_aen_cm != NULL)
1093 mfi_abort(sc, sc->mfi_aen_cm);
1094
1095 dcmd = &cm->cm_frame->dcmd;
1096 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1097 cm->cm_flags = MFI_CMD_POLLED;
1098 cm->cm_data = NULL;
1099
1100 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1101 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1102 }
1103
1104 mfi_release_command(cm);
1105 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1106 return (error);
1107}
1108static void
1109mfi_syspdprobe(struct mfi_softc *sc)
1110{
1111 struct mfi_frame_header *hdr;
1112 struct mfi_command *cm = NULL;
1113 struct mfi_pd_list *pdlist = NULL;
1114 struct mfi_system_pd *syspd;
1115 int error, i;
1116
1117 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1118 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1119 /* Add SYSTEM PD's */
1120 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1121 (void **)&pdlist, sizeof(*pdlist));
1122 if (error) {
1123 device_printf(sc->mfi_dev,"Error while forming syspd list\n");
1124 goto out;
1125 }
1126
1127 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1128 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1129 cm->cm_frame->dcmd.mbox[1] = 0;
1130 if (mfi_mapcmd(sc, cm) != 0) {
1131 device_printf(sc->mfi_dev, "Failed to get syspd device list\n");
1132 goto out;
1133 }
1134 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1135 BUS_DMASYNC_POSTREAD);
1136 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1137 hdr = &cm->cm_frame->header;
1138 if (hdr->cmd_status != MFI_STAT_OK) {
1139 device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n",
1140 hdr->cmd_status);
1141 goto out;
1142 }
1143 for (i = 0; i < pdlist->count; i++) {
1144 if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id)
1145 goto skip_sys_pd_add;
1146 /* Get each PD and add it to the system */
1147 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1148 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1149 if (syspd->pd_id == pdlist->addr[i].device_id)
1150 goto skip_sys_pd_add;
1151 }
1152 }
1153 mfi_add_sys_pd(sc,pdlist->addr[i].device_id);
1154skip_sys_pd_add:
1155 ;
1156 }
1157 /* Delete SYSPD's whose state has been changed */
1158 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1159 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1160 for (i=0;i<pdlist->count;i++) {
1161 if (syspd->pd_id == pdlist->addr[i].device_id)
1162 goto skip_sys_pd_delete;
1163 }
1164 get_mplock();
1165 device_delete_child(sc->mfi_dev,syspd->pd_dev);
1166 rel_mplock();
1167skip_sys_pd_delete:
1168 ;
1169 }
1170 }
1171out:
1172 if (pdlist)
1173 kfree(pdlist, M_MFIBUF);
1174 if (cm)
1175 mfi_release_command(cm);
1176}
1177
1178static void
1179mfi_ldprobe(struct mfi_softc *sc)
1180{
1181 struct mfi_frame_header *hdr;
1182 struct mfi_command *cm = NULL;
1183 struct mfi_ld_list *list = NULL;
1184 struct mfi_disk *ld;
1185 int error, i;
1186
1187 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1188 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1189
1190 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1191 (void **)&list, sizeof(*list));
1192 if (error)
1193 goto out;
1194
1195 cm->cm_flags = MFI_CMD_DATAIN;
1196 if (mfi_wait_command(sc, cm) != 0) {
1197 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1198 goto out;
1199 }
1200
1201 hdr = &cm->cm_frame->header;
1202 if (hdr->cmd_status != MFI_STAT_OK) {
1203 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1204 hdr->cmd_status);
1205 goto out;
1206 }
1207
1208 for (i = 0; i < list->ld_count; i++) {
1209 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1210 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1211 goto skip_add;
1212 }
1213 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1214 skip_add:;
1215 }
1216out:
1217 if (list)
1218 kfree(list, M_MFIBUF);
1219 if (cm)
1220 mfi_release_command(cm);
1221
1222 return;
1223}
1224
1225/*
1226 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1227 * the bits in 24-31 are all set, then it is the number of seconds since
1228 * boot.
1229 */
1230static const char *
1231format_timestamp(uint32_t timestamp)
1232{
1233 static char buffer[32];
1234
1235 if ((timestamp & 0xff000000) == 0xff000000)
1236 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1237 0x00ffffff);
1238 else
1239 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1240 return (buffer);
1241}
1242
1243static const char *
1244format_class(int8_t class)
1245{
1246 static char buffer[6];
1247
1248 switch (class) {
1249 case MFI_EVT_CLASS_DEBUG:
1250 return ("debug");
1251 case MFI_EVT_CLASS_PROGRESS:
1252 return ("progress");
1253 case MFI_EVT_CLASS_INFO:
1254 return ("info");
1255 case MFI_EVT_CLASS_WARNING:
1256 return ("WARN");
1257 case MFI_EVT_CLASS_CRITICAL:
1258 return ("CRIT");
1259 case MFI_EVT_CLASS_FATAL:
1260 return ("FATAL");
1261 case MFI_EVT_CLASS_DEAD:
1262 return ("DEAD");
1263 default:
1264 ksnprintf(buffer, sizeof(buffer), "%d", class);
1265 return (buffer);
1266 }
1267}
1268
1269static void
1270mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1271{
1272
1273 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1274 format_timestamp(detail->time), detail->evt_class.members.locale,
1275 format_class(detail->evt_class.members.evt_class), detail->description);
1276}
1277
1278static int
1279mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1280{
1281 struct mfi_command *cm;
1282 struct mfi_dcmd_frame *dcmd;
1283 union mfi_evt current_aen, prior_aen;
1284 struct mfi_evt_detail *ed = NULL;
1285 int error = 0;
1286
1287 current_aen.word = locale;
1288 if (sc->mfi_aen_cm != NULL) {
1289 prior_aen.word =
1290 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1291 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1292 !((prior_aen.members.locale & current_aen.members.locale)
1293 ^current_aen.members.locale)) {
1294 return (0);
1295 } else {
1296 prior_aen.members.locale |= current_aen.members.locale;
1297 if (prior_aen.members.evt_class
1298 < current_aen.members.evt_class)
1299 current_aen.members.evt_class =
1300 prior_aen.members.evt_class;
1301 mfi_abort(sc, sc->mfi_aen_cm);
1302 }
1303 }
1304
1305 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1306 (void **)&ed, sizeof(*ed));
1307 if (error) {
1308 goto out;
1309 }
1310
1311 dcmd = &cm->cm_frame->dcmd;
1312 ((uint32_t *)&dcmd->mbox)[0] = seq;
1313 ((uint32_t *)&dcmd->mbox)[1] = locale;
1314 cm->cm_flags = MFI_CMD_DATAIN;
1315 cm->cm_complete = mfi_aen_complete;
1316
1317 sc->mfi_aen_cm = cm;
1318
1319 mfi_enqueue_ready(cm);
1320 mfi_startio(sc);
1321
1322out:
1323 return (error);
1324}
1325
1326static void
1327mfi_aen_complete(struct mfi_command *cm)
1328{
1329 struct mfi_frame_header *hdr;
1330 struct mfi_softc *sc;
1331 struct mfi_evt_detail *detail;
1332 struct mfi_aen *mfi_aen_entry, *tmp;
1333 int seq = 0, aborted = 0;
1334
1335 sc = cm->cm_sc;
1336 hdr = &cm->cm_frame->header;
1337
1338 if (sc->mfi_aen_cm == NULL)
1339 return;
1340
1341 if (sc->mfi_aen_cm->cm_aen_abort ||
1342 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1343 sc->mfi_aen_cm->cm_aen_abort = 0;
1344 aborted = 1;
1345 } else {
1346 sc->mfi_aen_triggered = 1;
1347 if (sc->mfi_poll_waiting) {
1348 sc->mfi_poll_waiting = 0;
1349 KNOTE(&sc->mfi_kq.ki_note, 0);
1350 }
1351 detail = cm->cm_data;
1352 /*
1353 * XXX If this function is too expensive or is recursive, then
1354 * events should be put onto a queue and processed later.
1355 */
1356 mfi_decode_evt(sc, detail);
1357 seq = detail->seq + 1;
1358 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1359 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1360 aen_link);
1361 lwkt_gettoken(&proc_token);
1362 ksignal(mfi_aen_entry->p, SIGIO);
1363 lwkt_reltoken(&proc_token);
1364 kfree(mfi_aen_entry, M_MFIBUF);
1365 }
1366 }
1367
1368 kfree(cm->cm_data, M_MFIBUF);
1369 sc->mfi_aen_cm = NULL;
1370 wakeup(&sc->mfi_aen_cm);
1371 mfi_release_command(cm);
1372
1373 /* set it up again so the driver can catch more events */
1374 if (!aborted) {
1375 mfi_aen_setup(sc, seq);
1376 }
1377}
1378
1379#define MAX_EVENTS 15
1380
1381static int
1382mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1383{
1384 struct mfi_command *cm;
1385 struct mfi_dcmd_frame *dcmd;
1386 struct mfi_evt_list *el;
1387 union mfi_evt class_locale;
1388 int error, i, seq, size;
1389 uint32_t context = 0;
1390
1391 class_locale.members.reserved = 0;
1392 class_locale.members.locale = mfi_event_locale;
1393 class_locale.members.evt_class = mfi_event_class;
1394
1395 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1396 * (MAX_EVENTS - 1);
1397 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1398 if (el == NULL)
1399 return (ENOMEM);
1400
1401 for (seq = start_seq;;) {
1402 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1403 kfree(el, M_MFIBUF);
1404 return (EBUSY);
1405 }
1406
1407 /* Zero out the MFI frame */
1408 context = cm->cm_frame->header.context;
1409 bzero(cm->cm_frame, sizeof(union mfi_frame));
1410 cm->cm_frame->header.context = context;
1411
1412 dcmd = &cm->cm_frame->dcmd;
1413 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1414 dcmd->header.cmd = MFI_CMD_DCMD;
1415 dcmd->header.timeout = 0;
1416 dcmd->header.data_len = size;
1417 dcmd->header.scsi_status = 0;
1418 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1419 ((uint32_t *)&dcmd->mbox)[0] = seq;
1420 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1421 cm->cm_sg = &dcmd->sgl;
1422 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1423 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1424 cm->cm_data = el;
1425 cm->cm_len = size;
1426
1427 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1428 device_printf(sc->mfi_dev,
1429 "Failed to get controller entries\n");
1430 mfi_release_command(cm);
1431 break;
1432 }
1433
1434 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1435 BUS_DMASYNC_POSTREAD);
1436 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1437
1438 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1439 mfi_release_command(cm);
1440 break;
1441 }
1442 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1443 device_printf(sc->mfi_dev,
1444 "Error %d fetching controller entries\n",
1445 dcmd->header.cmd_status);
1446 mfi_release_command(cm);
1447 break;
1448 }
1449 mfi_release_command(cm);
1450
1451 for (i = 0; i < el->count; i++) {
1452 /*
1453 * If this event is newer than 'stop_seq' then
1454 * break out of the loop. Note that the log
1455 * is a circular buffer so we have to handle
1456 * the case that our stop point is earlier in
1457 * the buffer than our start point.
1458 */
1459 if (el->event[i].seq >= stop_seq) {
1460 if (start_seq <= stop_seq)
1461 break;
1462 else if (el->event[i].seq < start_seq)
1463 break;
1464 }
1465 mfi_decode_evt(sc, &el->event[i]);
1466 }
1467 seq = el->event[el->count - 1].seq + 1;
1468 }
1469
1470 kfree(el, M_MFIBUF);
1471 return (0);
1472}
1473
1474static int
1475mfi_add_ld(struct mfi_softc *sc, int id)
1476{
1477 struct mfi_command *cm;
1478 struct mfi_dcmd_frame *dcmd = NULL;
1479 struct mfi_ld_info *ld_info = NULL;
1480 int error;
1481
1482 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1483
1484 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1485 (void **)&ld_info, sizeof(*ld_info));
1486 if (error) {
1487 device_printf(sc->mfi_dev,
1488 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1489 if (ld_info)
1490 kfree(ld_info, M_MFIBUF);
1491 return (error);
1492 }
1493 cm->cm_flags = MFI_CMD_DATAIN;
1494 dcmd = &cm->cm_frame->dcmd;
1495 dcmd->mbox[0] = id;
1496 if (mfi_wait_command(sc, cm) != 0) {
1497 device_printf(sc->mfi_dev,
1498 "Failed to get logical drive: %d\n", id);
1499 kfree(ld_info, M_MFIBUF);
1500 return (0);
1501 }
1502 if (ld_info->ld_config.params.isSSCD != 1) {
1503 mfi_add_ld_complete(cm);
1504 } else {
1505 mfi_release_command(cm);
1506 if(ld_info) /* SSCD drives ld_info free here */
1507 kfree(ld_info, M_MFIBUF);
1508 }
1509 return (0);
1510}
1511
1512static void
1513mfi_add_ld_complete(struct mfi_command *cm)
1514{
1515 struct mfi_frame_header *hdr;
1516 struct mfi_ld_info *ld_info;
1517 struct mfi_softc *sc;
1518 device_t child;
1519
1520 sc = cm->cm_sc;
1521 hdr = &cm->cm_frame->header;
1522 ld_info = cm->cm_private;
1523
1524 if (hdr->cmd_status != MFI_STAT_OK) {
1525 kfree(ld_info, M_MFIBUF);
1526 mfi_release_command(cm);
1527 return;
1528 }
1529 mfi_release_command(cm);
1530
1531 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1532 get_mplock();
1533 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1534 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1535 kfree(ld_info, M_MFIBUF);
1536 rel_mplock();
1537 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1538 return;
1539 }
1540
1541 device_set_ivars(child, ld_info);
1542 device_set_desc(child, "MFI Logical Disk");
1543 bus_generic_attach(sc->mfi_dev);
1544 rel_mplock();
1545 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1546}
1547
1548static int
1549mfi_add_sys_pd(struct mfi_softc *sc,int id)
1550{
1551 struct mfi_command *cm;
1552 struct mfi_dcmd_frame *dcmd = NULL;
1553 struct mfi_pd_info *pd_info = NULL;
1554 int error;
1555
1556 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1557
1558 error = mfi_dcmd_command(sc,&cm,MFI_DCMD_PD_GET_INFO,
1559 (void **)&pd_info, sizeof(*pd_info));
1560 if (error) {
1561 device_printf(sc->mfi_dev,
1562 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error);
1563 if (pd_info)
1564 kfree(pd_info,M_MFIBUF);
1565 return (error);
1566 }
1567 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1568 dcmd = &cm->cm_frame->dcmd;
1569 dcmd->mbox[0] = id;
1570 dcmd->header.scsi_status = 0;
1571 dcmd->header.pad0 = 0;
1572 if (mfi_mapcmd(sc, cm) != 0) {
1573 device_printf(sc->mfi_dev,
1574 "Failed to get physical drive info %d\n", id);
1575 kfree(pd_info,M_MFIBUF);
1576 return (0);
1577 }
1578 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1579 BUS_DMASYNC_POSTREAD);
1580 bus_dmamap_unload(sc->mfi_buffer_dmat,cm->cm_dmamap);
1581 mfi_add_sys_pd_complete(cm);
1582 return (0);
1583}
1584
1585static void
1586mfi_add_sys_pd_complete(struct mfi_command *cm)
1587{
1588 struct mfi_frame_header *hdr;
1589 struct mfi_pd_info *pd_info;
1590 struct mfi_softc *sc;
1591 device_t child;
1592
1593 sc = cm->cm_sc;
1594 hdr = &cm->cm_frame->header;
1595 pd_info = cm->cm_private;
1596
1597 if (hdr->cmd_status != MFI_STAT_OK) {
1598 kfree(pd_info, M_MFIBUF);
1599 mfi_release_command(cm);
1600 return;
1601 }
1602 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1603 device_printf(sc->mfi_dev,"PD=%x is not SYSTEM PD\n",
1604 pd_info->ref.v.device_id);
1605 kfree(pd_info, M_MFIBUF);
1606 mfi_release_command(cm);
1607 return;
1608 }
1609 mfi_release_command(cm);
1610
1611 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1612 get_mplock();
1613 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1614 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1615 kfree(pd_info, M_MFIBUF);
1616 rel_mplock();
1617 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1618 return;
1619 }
1620
1621 device_set_ivars(child, pd_info);
1622 device_set_desc(child, "MFI System PD");
1623 bus_generic_attach(sc->mfi_dev);
1624 rel_mplock();
1625 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1626}
1627
1628static struct mfi_command *
1629mfi_bio_command(struct mfi_softc *sc)
1630{
1631 struct bio *bio;
1632 struct mfi_command *cm = NULL;
1633 struct mfi_disk *mfid;
1634
1635 /* reserving two commands to avoid starvation for IOCTL */
1636 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1637 return (NULL);
1638 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1639 return (NULL);
1640 mfid = bio->bio_driver_info;
1641 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1642 cm = mfi_build_syspdio(sc, bio);
1643 else
1644 cm = mfi_build_ldio(sc, bio);
1645 if (!cm)
1646 mfi_enqueue_bio(sc,bio);
1647 return cm;
1648}
1649
1650static struct mfi_command *
1651mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1652{
1653 struct mfi_command *cm;
1654 struct buf *bp;
1655 struct mfi_system_pd *disk;
1656 struct mfi_pass_frame *pass;
1657 int flags = 0,blkcount = 0;
1658 uint32_t context = 0;
1659
1660 if ((cm = mfi_dequeue_free(sc)) == NULL)
1661 return (NULL);
1662
1663 /* Zero out the MFI frame */
1664 context = cm->cm_frame->header.context;
1665 bzero(cm->cm_frame, sizeof(union mfi_frame));
1666 cm->cm_frame->header.context = context;
1667 bp = bio->bio_buf;
1668 pass = &cm->cm_frame->pass;
1669 bzero(pass->cdb, 16);
1670 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1671 switch (bp->b_cmd & 0x03) {
1672 case BUF_CMD_READ:
1673 pass->cdb[0] = READ_10;
1674 flags = MFI_CMD_DATAIN;
1675 break;
1676 case BUF_CMD_WRITE:
1677 pass->cdb[0] = WRITE_10;
1678 flags = MFI_CMD_DATAOUT;
1679 break;
1680 default:
1681 panic("Invalid bio command");
1682 }
1683
1684 /* Cheat with the sector length to avoid a non-constant division */
1685 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1686 disk = bio->bio_driver_info;
1687 /* Fill the LBA and Transfer length in CDB */
1688 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
1689 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
1690 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
1691 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
1692 pass->cdb[7] = (blkcount & 0xff00) >> 8;
1693 pass->cdb[8] = (blkcount & 0x00ff);
1694 pass->header.target_id = disk->pd_id;
1695 pass->header.timeout = 0;
1696 pass->header.flags = 0;
1697 pass->header.scsi_status = 0;
1698 pass->header.sense_len = MFI_SENSE_LEN;
1699 pass->header.data_len = bp->b_bcount;
1700 pass->header.cdb_len = 10;
1701#if defined(__x86_64__)
1702 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1703 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1704#else
1705 pass->sense_addr_lo = cm->cm_sense_busaddr;
1706 pass->sense_addr_hi = 0;
1707#endif
1708 cm->cm_complete = mfi_bio_complete;
1709 cm->cm_private = bio;
1710 cm->cm_data = bp->b_data;
1711 cm->cm_len = bp->b_bcount;
1712 cm->cm_sg = &pass->sgl;
1713 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
1714 cm->cm_flags = flags;
1715 return (cm);
1716}
1717
1718static struct mfi_command *
1719mfi_build_ldio(struct mfi_softc *sc,struct bio *bio)
1720{
1721 struct mfi_io_frame *io;
1722 struct buf *bp;
1723 struct mfi_disk *disk;
1724 struct mfi_command *cm;
1725 int flags, blkcount;
1726 uint32_t context = 0;
1727
1728 if ((cm = mfi_dequeue_free(sc)) == NULL)
1729 return (NULL);
1730
1731 /* Zero out the MFI frame */
1732 context = cm->cm_frame->header.context;
1733 bzero(cm->cm_frame,sizeof(union mfi_frame));
1734 cm->cm_frame->header.context = context;
1735 bp = bio->bio_buf;
1736 io = &cm->cm_frame->io;
1737 switch (bp->b_cmd & 0x03) {
1738 case BUF_CMD_READ:
1739 io->header.cmd = MFI_CMD_LD_READ;
1740 flags = MFI_CMD_DATAIN;
1741 break;
1742 case BUF_CMD_WRITE:
1743 io->header.cmd = MFI_CMD_LD_WRITE;
1744 flags = MFI_CMD_DATAOUT;
1745 break;
1746 default:
1747 panic("Invalid bio command");
1748 }
1749
1750 /* Cheat with the sector length to avoid a non-constant division */
1751 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1752 disk = bio->bio_driver_info;
1753 io->header.target_id = disk->ld_id;
1754 io->header.timeout = 0;
1755 io->header.flags = 0;
1756 io->header.scsi_status = 0;
1757 io->header.sense_len = MFI_SENSE_LEN;
1758 io->header.data_len = blkcount;
1759#if defined(__x86_64__)
1760 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1761 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1762#else
1763 io->sense_addr_lo = cm->cm_sense_busaddr;
1764 io->sense_addr_hi = 0;
1765#endif
1766 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
1767 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
1768 cm->cm_complete = mfi_bio_complete;
1769 cm->cm_private = bio;
1770 cm->cm_data = bp->b_data;
1771 cm->cm_len = bp->b_bcount;
1772 cm->cm_sg = &io->sgl;
1773 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1774 cm->cm_flags = flags;
1775 return (cm);
1776}
1777
1778static void
1779mfi_bio_complete(struct mfi_command *cm)
1780{
1781 struct bio *bio;
1782 struct buf *bp;
1783 struct mfi_frame_header *hdr;
1784 struct mfi_softc *sc;
1785
1786 bio = cm->cm_private;
1787 bp = bio->bio_buf;
1788 hdr = &cm->cm_frame->header;
1789 sc = cm->cm_sc;
1790
1791 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1792 bp->b_flags |= B_ERROR;
1793 bp->b_error = EIO;
1794 device_printf(sc->mfi_dev, "I/O error, status= %d "
1795 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1796 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1797 } else if (cm->cm_error != 0) {
1798 bp->b_flags |= B_ERROR;
1799 }
1800
1801 mfi_release_command(cm);
1802 mfi_disk_complete(bio);
1803}
1804
1805void
1806mfi_startio(struct mfi_softc *sc)
1807{
1808 struct mfi_command *cm;
1809 struct ccb_hdr *ccbh;
1810
1811 for (;;) {
1812 /* Don't bother if we're short on resources */
1813 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1814 break;
1815
1816 /* Try a command that has already been prepared */
1817 cm = mfi_dequeue_ready(sc);
1818
1819 if (cm == NULL) {
1820 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1821 cm = sc->mfi_cam_start(ccbh);
1822 }
1823
1824 /* Nope, so look for work on the bioq */
1825 if (cm == NULL)
1826 cm = mfi_bio_command(sc);
1827
1828 /* No work available, so exit */
1829 if (cm == NULL)
1830 break;
1831
1832 /* Send the command to the controller */
1833 if (mfi_mapcmd(sc, cm) != 0) {
1834 mfi_requeue_ready(cm);
1835 break;
1836 }
1837 }
1838}
1839
1840static int
1841mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1842{
1843 int error, polled;
1844
1845 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1846
1847 if (cm->cm_data != NULL) {
1848 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1849 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1850 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1851 if (error == EINPROGRESS) {
1852 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1853 return (0);
1854 }
1855 } else {
1856 error = mfi_send_frame(sc, cm);
1857 }
1858
1859 return (error);
1860}
1861
1862static void
1863mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1864{
1865 struct mfi_frame_header *hdr;
1866 struct mfi_command *cm;
1867 union mfi_sgl *sgl;
1868 struct mfi_softc *sc;
1869 int i, dir;
1870 int sgl_mapped = 0;
1871 int sge_size = 0;
1872
1873 cm = (struct mfi_command *)arg;
1874 sc = cm->cm_sc;
1875 hdr = &cm->cm_frame->header;
1876 sgl = cm->cm_sg;
1877
1878 if (error) {
1879 kprintf("error %d in callback\n", error);
1880 cm->cm_error = error;
1881 mfi_complete(sc, cm);
1882 return;
1883 }
1884
1885 /* Use IEEE sgl only for IO's on a SKINNY controller
1886 * For other commands on a SKINNY controller use either
1887 * sg32 or sg64 based on the sizeof(bus_addr_t).
1888 * Also calculate the total frame size based on the type
1889 * of SGL used.
1890 */
1891 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
1892 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
1893 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
1894 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
1895 for (i = 0; i < nsegs; i++) {
1896 sgl->sg_skinny[i].addr = segs[i].ds_addr;
1897 sgl->sg_skinny[i].len = segs[i].ds_len;
1898 sgl->sg_skinny[i].flag = 0;
1899 }
1900 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1901 sgl_mapped = 1;
1902 sge_size = sizeof(struct mfi_sg_skinny);
1903 }
1904 if (!sgl_mapped) {
1905 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1906 for (i = 0; i < nsegs; i++) {
1907 sgl->sg32[i].addr = segs[i].ds_addr;
1908 sgl->sg32[i].len = segs[i].ds_len;
1909 }
1910 sge_size = sizeof(struct mfi_sg32);
1911 } else {
1912 for (i = 0; i < nsegs; i++) {
1913 sgl->sg64[i].addr = segs[i].ds_addr;
1914 sgl->sg64[i].len = segs[i].ds_len;
1915 }
1916 hdr->flags |= MFI_FRAME_SGL64;
1917 sge_size = sizeof(struct mfi_sg64);
1918 }
1919 }
1920 hdr->sg_count = nsegs;
1921
1922 dir = 0;
1923 if (cm->cm_flags & MFI_CMD_DATAIN) {
1924 dir |= BUS_DMASYNC_PREREAD;
1925 hdr->flags |= MFI_FRAME_DIR_READ;
1926 }
1927 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1928 dir |= BUS_DMASYNC_PREWRITE;
1929 hdr->flags |= MFI_FRAME_DIR_WRITE;
1930 }
1931 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1932 cm->cm_flags |= MFI_CMD_MAPPED;
1933
1934 /*
1935 * Instead of calculating the total number of frames in the
1936 * compound frame, it's already assumed that there will be at
1937 * least 1 frame, so don't compensate for the modulo of the
1938 * following division.
1939 */
1940 cm->cm_total_frame_size += (sge_size * nsegs);
1941 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1942
1943 mfi_send_frame(sc, cm);
1944}
1945
1946static int
1947mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1948{
1949 struct mfi_frame_header *hdr;
1950 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1951
1952 hdr = &cm->cm_frame->header;
1953
1954 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1955 cm->cm_timestamp = time_second;
1956 mfi_enqueue_busy(cm);
1957 } else {
1958 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1959 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1960 }
1961
1962 /*
1963 * The bus address of the command is aligned on a 64 byte boundary,
1964 * leaving the least 6 bits as zero. For whatever reason, the
1965 * hardware wants the address shifted right by three, leaving just
1966 * 3 zero bits. These three bits are then used as a prefetching
1967 * hint for the hardware to predict how many frames need to be
1968 * fetched across the bus. If a command has more than 8 frames
1969 * then the 3 bits are set to 0x7 and the firmware uses other
1970 * information in the command to determine the total amount to fetch.
1971 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1972 * is enough for both 32bit and 64bit systems.
1973 */
1974 if (cm->cm_extra_frames > 7)
1975 cm->cm_extra_frames = 7;
1976
1977 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1978
1979 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1980 return (0);
1981
1982 /* This is a polled command, so busy-wait for it to complete. */
1983 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1984 DELAY(1000);
1985 tm -= 1;
1986 if (tm <= 0)
1987 break;
1988 }
1989
1990 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1991 device_printf(sc->mfi_dev, "Frame %p timed out "
1992 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1993 return (ETIMEDOUT);
1994 }
1995
1996 return (0);
1997}
1998
1999static void
2000mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2001{
2002 int dir;
2003
2004 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2005 dir = 0;
2006 if (cm->cm_flags & MFI_CMD_DATAIN)
2007 dir |= BUS_DMASYNC_POSTREAD;
2008 if (cm->cm_flags & MFI_CMD_DATAOUT)
2009 dir |= BUS_DMASYNC_POSTWRITE;
2010
2011 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2012 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2013 cm->cm_flags &= ~MFI_CMD_MAPPED;
2014 }
2015
2016 cm->cm_flags |= MFI_CMD_COMPLETED;
2017
2018 if (cm->cm_complete != NULL)
2019 cm->cm_complete(cm);
2020 else
2021 wakeup(cm);
2022}
2023
2024static int
2025mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2026{
2027 struct mfi_command *cm;
2028 struct mfi_abort_frame *abort;
2029 int i = 0;
2030 uint32_t context = 0;
2031
2032 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2033
2034 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2035 return (EBUSY);
2036 }
2037
2038 /* Zero out the MFI frame */
2039 context = cm->cm_frame->header.context;
2040 bzero(cm->cm_frame, sizeof(union mfi_frame));
2041 cm->cm_frame->header.context = context;
2042
2043 abort = &cm->cm_frame->abort;
2044 abort->header.cmd = MFI_CMD_ABORT;
2045 abort->header.flags = 0;
2046 abort->header.scsi_status = 0;
2047 abort->abort_context = cm_abort->cm_frame->header.context;
2048#if defined(__x86_64__)
2049 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr & 0xFFFFFFFF;
2050 abort->abort_mfi_addr_hi = (cm_abort->cm_frame_busaddr & 0xFFFFFFFF00000000 ) >> 32 ;
2051#else
2052 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
2053 abort->abort_mfi_addr_hi = 0;
2054#endif
2055 cm->cm_data = NULL;
2056 cm->cm_flags = MFI_CMD_POLLED;
2057
2058 sc->mfi_aen_cm->cm_aen_abort = 1;
2059 mfi_mapcmd(sc, cm);
2060 mfi_release_command(cm);
2061
2062 while (i < 5 && sc->mfi_aen_cm != NULL) {
2063 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
2064 i++;
2065 }
2066
2067 return (0);
2068}
2069
2070int
2071mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
2072{
2073 struct mfi_command *cm;
2074 struct mfi_io_frame *io;
2075 int error;
2076 uint32_t context = 0;
2077
2078 if ((cm = mfi_dequeue_free(sc)) == NULL)
2079 return (EBUSY);
2080
2081 /* Zero out the MFI frame */
2082 context = cm->cm_frame->header.context;
2083 bzero(cm->cm_frame, sizeof(union mfi_frame));
2084 cm->cm_frame->header.context = context;
2085
2086 io = &cm->cm_frame->io;
2087 io->header.cmd = MFI_CMD_LD_WRITE;
2088 io->header.target_id = id;
2089 io->header.timeout = 0;
2090 io->header.flags = 0;
2091 io->header.scsi_status = 0;
2092 io->header.sense_len = MFI_SENSE_LEN;
2093 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2094#if defined(__x86_64__)
2095 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2096 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2097#else
2098 io->sense_addr_lo = cm->cm_sense_busaddr;
2099 io->sense_addr_hi = 0;
2100#endif
2101 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2102 io->lba_lo = lba & 0xffffffff;
2103 cm->cm_data = virt;
2104 cm->cm_len = len;
2105 cm->cm_sg = &io->sgl;
2106 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2107 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2108
2109 error = mfi_mapcmd(sc, cm);
2110 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2111 BUS_DMASYNC_POSTWRITE);
2112 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2113 mfi_release_command(cm);
2114
2115 return (error);
2116}
2117
2118int
2119mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2120 int len)
2121{
2122 struct mfi_command *cm;
2123 struct mfi_pass_frame *pass;
2124 int error;
2125 int blkcount = 0;
2126
2127 if ((cm = mfi_dequeue_free(sc)) == NULL)
2128 return (EBUSY);
2129
2130 pass = &cm->cm_frame->pass;
2131 bzero(pass->cdb, 16);
2132 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2133 pass->cdb[0] = WRITE_10;
2134 pass->cdb[2] = (lba & 0xff000000) >> 24;
2135 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2136 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2137 pass->cdb[5] = (lba & 0x000000ff);
2138 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2139 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2140 pass->cdb[8] = (blkcount & 0x00ff);
2141 pass->header.target_id = id;
2142 pass->header.timeout = 0;
2143 pass->header.flags = 0;
2144 pass->header.scsi_status = 0;
2145 pass->header.sense_len = MFI_SENSE_LEN;
2146 pass->header.data_len = len;
2147 pass->header.cdb_len = 10;
2148#if defined(__x86_64__)
2149 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2150 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2151#else
2152 pass->sense_addr_lo = cm->cm_sense_busaddr;
2153 pass->sense_addr_hi = 0;
2154#endif
2155 cm->cm_data = virt;
2156 cm->cm_len = len;
2157 cm->cm_sg = &pass->sgl;
2158 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2159 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2160
2161 error = mfi_mapcmd(sc, cm);
2162 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2163 BUS_DMASYNC_POSTWRITE);
2164 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2165 mfi_release_command(cm);
2166
2167 return (error);
2168}
2169
2170static int
2171mfi_open(struct dev_open_args *ap)
2172{
2173 cdev_t dev = ap->a_head.a_dev;
2174 struct mfi_softc *sc;
2175 int error;
2176
2177 sc = dev->si_drv1;
2178
2179 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2180 if (sc->mfi_detaching)
2181 error = ENXIO;
2182 else {
2183 sc->mfi_flags |= MFI_FLAGS_OPEN;
2184 error = 0;
2185 }
2186 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2187
2188 return (error);
2189}
2190
2191static int
2192mfi_close(struct dev_close_args *ap)
2193{
2194 cdev_t dev = ap->a_head.a_dev;
2195 struct mfi_softc *sc;
2196 struct mfi_aen *mfi_aen_entry, *tmp;
2197
2198 sc = dev->si_drv1;
2199
2200 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2201 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2202
2203 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2204 if (mfi_aen_entry->p == curproc) {
2205 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2206 aen_link);
2207 kfree(mfi_aen_entry, M_MFIBUF);
2208 }
2209 }
2210 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2211 return (0);
2212}
2213
2214static int
2215mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2216{
2217
2218 switch (opcode) {
2219 case MFI_DCMD_LD_DELETE:
2220 case MFI_DCMD_CFG_ADD:
2221 case MFI_DCMD_CFG_CLEAR:
2222 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2223 return (1);
2224 default:
2225 return (0);
2226 }
2227}
2228
2229static void
2230mfi_config_unlock(struct mfi_softc *sc, int locked)
2231{
2232
2233 if (locked)
2234 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2235}
2236
2237/* Perform pre-issue checks on commands from userland and possibly veto them. */
2238static int
2239mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2240{
2241 struct mfi_disk *ld, *ld2;
2242 int error;
2243 struct mfi_system_pd *syspd = NULL;
2244 uint16_t syspd_id;
2245 uint16_t *mbox;
2246
2247 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2248 error = 0;
2249 switch (cm->cm_frame->dcmd.opcode) {
2250 case MFI_DCMD_LD_DELETE:
2251 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2252 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2253 break;
2254 }
2255 if (ld == NULL)
2256 error = ENOENT;
2257 else
2258 error = mfi_disk_disable(ld);
2259 break;
2260 case MFI_DCMD_CFG_CLEAR:
2261 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2262 error = mfi_disk_disable(ld);
2263 if (error)
2264 break;
2265 }
2266 if (error) {
2267 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2268 if (ld2 == ld)
2269 break;
2270 mfi_disk_enable(ld2);
2271 }
2272 }
2273 break;
2274 case MFI_DCMD_PD_STATE_SET:
2275 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2276 syspd_id = mbox[0];
2277 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2278 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2279 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2280 if (syspd->pd_id == syspd_id)
2281 break;
2282 }
2283 }
2284 } else {
2285 break;
2286 }
2287 if(syspd)
2288 error = mfi_syspd_disable(syspd);
2289 break;
2290 default:
2291 break;
2292 }
2293 return (error);
2294}
2295
2296/* Perform post-issue checks on commands from userland. */
2297static void
2298mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2299{
2300 struct mfi_disk *ld, *ldn;
2301 struct mfi_system_pd *syspd = NULL;
2302 uint16_t syspd_id;
2303 uint16_t *mbox;
2304
2305 switch (cm->cm_frame->dcmd.opcode) {
2306 case MFI_DCMD_LD_DELETE:
2307 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2308 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2309 break;
2310 }
2311 KASSERT(ld != NULL, ("volume dissappeared"));
2312 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2313 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2314 get_mplock();
2315 device_delete_child(sc->mfi_dev, ld->ld_dev);
2316 rel_mplock();
2317 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2318 } else
2319 mfi_disk_enable(ld);
2320 break;
2321 case MFI_DCMD_CFG_CLEAR:
2322 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2323 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2324 get_mplock();
2325 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2326 device_delete_child(sc->mfi_dev, ld->ld_dev);
2327 }
2328 rel_mplock();
2329 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2330 } else {
2331 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2332 mfi_disk_enable(ld);
2333 }
2334 break;
2335 case MFI_DCMD_CFG_ADD:
2336 mfi_ldprobe(sc);
2337 break;
2338 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2339 mfi_ldprobe(sc);
2340 break;
2341 case MFI_DCMD_PD_STATE_SET:
2342 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2343 syspd_id = mbox[0];
2344 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2345 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2346 TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
2347 if (syspd->pd_id == syspd_id)
2348 break;
2349 }
2350 }
2351 } else {
2352 break;
2353 }
2354 /* If the transition fails then enable the syspd again */
2355 if(syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2356 mfi_syspd_enable(syspd);
2357 break;
2358 }
2359}
2360
2361static int
2362mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2363{
2364 struct mfi_command *cm;
2365 struct mfi_dcmd_frame *dcmd;
2366 void *ioc_buf = NULL;
2367 uint32_t context;
2368 int error = 0, locked;
2369
2370
2371 if (ioc->buf_size > 0) {
2372 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2373 if (ioc_buf == NULL) {
2374 return (ENOMEM);
2375 }
2376 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2377 if (error) {
2378 device_printf(sc->mfi_dev, "failed to copyin\n");
2379 kfree(ioc_buf, M_MFIBUF);
2380 return (error);
2381 }
2382 }
2383
2384 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2385
2386 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2387 while ((cm = mfi_dequeue_free(sc)) == NULL)
2388 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2389
2390 /* Save context for later */
2391 context = cm->cm_frame->header.context;
2392
2393 dcmd = &cm->cm_frame->dcmd;
2394 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2395
2396 cm->cm_sg = &dcmd->sgl;
2397 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2398 cm->cm_data = ioc_buf;
2399 cm->cm_len = ioc->buf_size;
2400
2401 /* restore context */
2402 cm->cm_frame->header.context = context;
2403
2404 /* Cheat since we don't know if we're writing or reading */
2405 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2406
2407 error = mfi_check_command_pre(sc, cm);
2408 if (error)
2409 goto out;
2410
2411 error = mfi_wait_command(sc, cm);
2412 if (error) {
2413 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2414 goto out;
2415 }
2416 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2417 mfi_check_command_post(sc, cm);
2418out:
2419 mfi_release_command(cm);
2420 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2421 mfi_config_unlock(sc, locked);
2422 if (ioc->buf_size > 0)
2423 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2424 if (ioc_buf)
2425 kfree(ioc_buf, M_MFIBUF);
2426 return (error);
2427}
2428
2429#ifdef __x86_64__
2430#define PTRIN(p) ((void *)(uintptr_t)(p))
2431#else
2432#define PTRIN(p) (p)
2433#endif
2434
2435static int
2436mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2437{
2438 struct mfi_config_data *conf_data = cm->cm_data;
2439 struct mfi_command *ld_cm = NULL;
2440 struct mfi_ld_info *ld_info = NULL;
2441 int error = 0;
2442
2443 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2444 (conf_data->ld[0].params.isSSCD == 1)) {
2445 error = 1;
2446 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2447 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2448 (void **)&ld_info, sizeof(*ld_info));
2449 if (error) {
2450 device_printf(sc->mfi_dev,"Failed to allocate "
2451 "MFI_DCMD_LD_GET_INFO %d", error);
2452 if (ld_info)
2453 kfree(ld_info, M_MFIBUF);
2454 return 0;
2455 }
2456 ld_cm->cm_flags = MFI_CMD_DATAIN;
2457 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2458 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2459 if (mfi_wait_command(sc, ld_cm) != 0) {
2460 device_printf(sc->mfi_dev, "failed to get log drv\n");
2461 mfi_release_command(ld_cm);
2462 kfree(ld_info, M_MFIBUF);
2463 return 0;
2464 }
2465
2466 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2467 kfree(ld_info, M_MFIBUF);
2468 mfi_release_command(ld_cm);
2469 return 0;
2470 } else {
2471 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2472 }
2473
2474 if (ld_info->ld_config.params.isSSCD == 1)
2475 error = 1;
2476
2477 mfi_release_command(ld_cm);
2478 kfree(ld_info, M_MFIBUF);
2479 }
2480 return error;
2481}
2482
2483static int
2484mfi_ioctl(struct dev_ioctl_args *ap)
2485{
2486 cdev_t dev = ap->a_head.a_dev;
2487 u_long cmd = ap->a_cmd;
2488 int flag = ap->a_fflag;
2489 caddr_t arg = ap->a_data;
2490 struct mfi_softc *sc;
2491 union mfi_statrequest *ms;
2492 struct mfi_ioc_packet *ioc;
2493#ifdef __x86_64__
2494 struct mfi_ioc_packet32 *ioc32;
2495#endif
2496 struct mfi_ioc_aen *aen;
2497 struct mfi_command *cm = NULL;
2498 uint32_t context;
2499 union mfi_sense_ptr sense_ptr;
2500 uint8_t *data = NULL, *temp, skip_pre_post = 0;
2501 int i;
2502 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2503#ifdef __x86_64__
2504 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2505 struct mfi_ioc_passthru iop_swab;
2506#endif
2507 int error, locked;
2508
2509 sc = dev->si_drv1;
2510 error = 0;
2511
2512 switch (cmd) {
2513 case MFIIO_STATS:
2514 ms = (union mfi_statrequest *)arg;
2515 switch (ms->ms_item) {
2516 case MFIQ_FREE:
2517 case MFIQ_BIO:
2518 case MFIQ_READY:
2519 case MFIQ_BUSY:
2520 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2521 sizeof(struct mfi_qstat));
2522 break;
2523 default:
2524 error = ENOIOCTL;
2525 break;
2526 }
2527 break;
2528 case MFIIO_QUERY_DISK:
2529 {
2530 struct mfi_query_disk *qd;
2531 struct mfi_disk *ld;
2532
2533 qd = (struct mfi_query_disk *)arg;
2534 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2535 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2536 if (ld->ld_id == qd->array_id)
2537 break;
2538 }
2539 if (ld == NULL) {
2540 qd->present = 0;
2541 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2542 return (0);
2543 }
2544 qd->present = 1;
2545 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2546 qd->open = 1;
2547 bzero(qd->devname, SPECNAMELEN + 1);
2548 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2549 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2550 break;
2551 }
2552 case MFI_CMD:
2553#ifdef __x86_64__
2554 case MFI_CMD32:
2555#endif
2556 {
2557 devclass_t devclass;
2558 ioc = (struct mfi_ioc_packet *)arg;
2559 int adapter;
2560
2561 adapter = ioc->mfi_adapter_no;
2562 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2563 devclass = devclass_find("mfi");
2564 sc = devclass_get_softc(devclass, adapter);
2565 }
2566 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2567 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2568 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2569 return (EBUSY);
2570 }
2571 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2572 locked = 0;
2573
2574 /*
2575 * save off original context since copying from user
2576 * will clobber some data
2577 */
2578 context = cm->cm_frame->header.context;
2579
2580 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2581 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2582 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2583 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2584 cm->cm_frame->header.scsi_status = 0;
2585 cm->cm_frame->header.pad0 = 0;
2586 if (ioc->mfi_sge_count) {
2587 cm->cm_sg =
2588 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2589 }
2590 cm->cm_flags = 0;
2591 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2592 cm->cm_flags |= MFI_CMD_DATAIN;
2593 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2594 cm->cm_flags |= MFI_CMD_DATAOUT;
2595 /* Legacy app shim */
2596 if (cm->cm_flags == 0)
2597 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2598 cm->cm_len = cm->cm_frame->header.data_len;
2599 if (cm->cm_len &&
2600 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2601 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2602 M_WAITOK | M_ZERO);
2603 if (cm->cm_data == NULL) {
2604 device_printf(sc->mfi_dev, "Malloc failed\n");
2605 goto out;
2606 }
2607 } else {
2608 cm->cm_data = 0;
2609 }
2610
2611 /* restore header context */
2612 cm->cm_frame->header.context = context;
2613
2614 temp = data;
2615 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2616 for (i = 0; i < ioc->mfi_sge_count; i++) {
2617#ifdef __x86_64__
2618 if (cmd == MFI_CMD) {
2619 /* Native */
2620 error = copyin(ioc->mfi_sgl[i].iov_base,
2621 temp,
2622 ioc->mfi_sgl[i].iov_len);
2623 } else {
2624 void *temp_convert;
2625 /* 32bit */
2626 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2627 temp_convert =
2628 PTRIN(ioc32->mfi_sgl[i].iov_base);
2629 error = copyin(temp_convert,
2630 temp,
2631 ioc32->mfi_sgl[i].iov_len);
2632 }
2633#else
2634 error = copyin(ioc->mfi_sgl[i].iov_base,
2635 temp,
2636 ioc->mfi_sgl[i].iov_len);
2637#endif
2638 if (error != 0) {
2639 device_printf(sc->mfi_dev,
2640 "Copy in failed\n");
2641 goto out;
2642 }
2643 temp = &temp[ioc->mfi_sgl[i].iov_len];
2644 }
2645 }
2646
2647 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2648 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2649
2650 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2651#if defined(__x86_64__)
2652 cm->cm_frame->pass.sense_addr_lo =
2653 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2654 cm->cm_frame->pass.sense_addr_hi =
2655 (cm->cm_sense_busaddr& 0xFFFFFFFF00000000) >> 32;
2656#else
2657 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2658 cm->cm_frame->pass.sense_addr_hi = 0;
2659#endif
2660 }
2661
2662 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2663 skip_pre_post = mfi_check_for_sscd(sc, cm);
2664 if (!skip_pre_post) {
2665 error = mfi_check_command_pre(sc, cm);
2666 if (error) {
2667 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2668 goto out;
2669 }
2670 }
2671
2672 if ((error = mfi_wait_command(sc, cm)) != 0) {
2673 device_printf(sc->mfi_dev,
2674 "Controller polled failed\n");
2675 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2676 goto out;
2677 }
2678
2679 if (!skip_pre_post)
2680 mfi_check_command_post(sc, cm);
2681 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2682
2683 temp = data;
2684 if (cm->cm_flags & MFI_CMD_DATAIN) {
2685 for (i = 0; i < ioc->mfi_sge_count; i++) {
2686#ifdef __x86_64__
2687 if (cmd == MFI_CMD) {
2688 /* Native */
2689 error = copyout(temp,
2690 ioc->mfi_sgl[i].iov_base,
2691 ioc->mfi_sgl[i].iov_len);
2692 } else {
2693 void *temp_convert;
2694 /* 32bit */
2695 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2696 temp_convert =
2697 PTRIN(ioc32->mfi_sgl[i].iov_base);
2698 error = copyout(temp,
2699 temp_convert,
2700 ioc32->mfi_sgl[i].iov_len);
2701 }
2702#else
2703 error = copyout(temp,
2704 ioc->mfi_sgl[i].iov_base,
2705 ioc->mfi_sgl[i].iov_len);
2706#endif
2707 if (error != 0) {
2708 device_printf(sc->mfi_dev,
2709 "Copy out failed\n");
2710 goto out;
2711 }
2712 temp = &temp[ioc->mfi_sgl[i].iov_len];
2713 }
2714 }
2715
2716 if (ioc->mfi_sense_len) {
2717 /* get user-space sense ptr then copy out sense */
2718 bcopy(&((struct mfi_ioc_packet*)arg)
2719 ->mfi_frame.raw[ioc->mfi_sense_off],
2720 &sense_ptr.sense_ptr_data[0],
2721 sizeof(sense_ptr.sense_ptr_data));
2722#ifdef __x86_64__
2723 if (cmd != MFI_CMD) {
2724 /*
2725 * not 64bit native so zero out any address
2726 * over 32bit */
2727 sense_ptr.addr.high = 0;
2728 }
2729#endif
2730 error = copyout(cm->cm_sense, sense_ptr.user_space,
2731 ioc->mfi_sense_len);
2732 if (error != 0) {
2733 device_printf(sc->mfi_dev,
2734 "Copy out failed\n");
2735 goto out;
2736 }
2737 }
2738
2739 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2740out:
2741 mfi_config_unlock(sc, locked);
2742 if (data)
2743 kfree(data, M_MFIBUF);
2744 if (cm) {
2745 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2746 mfi_release_command(cm);
2747 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2748 }
2749
2750 break;
2751 }
2752 case MFI_SET_AEN:
2753 aen = (struct mfi_ioc_aen *)arg;
2754 error = mfi_aen_register(sc, aen->aen_seq_num,
2755 aen->aen_class_locale);
2756
2757 break;
2758 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2759 {
2760 devclass_t devclass;
2761 struct mfi_linux_ioc_packet l_ioc;
2762 int adapter;
2763
2764 devclass = devclass_find("mfi");
2765 if (devclass == NULL)
2766 return (ENOENT);
2767
2768 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2769 if (error)
2770 return (error);
2771 adapter = l_ioc.lioc_adapter_no;
2772 sc = devclass_get_softc(devclass, adapter);
2773 if (sc == NULL)
2774 return (ENOENT);
2775 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2776 cmd, arg, flag));
2777 break;
2778 }
2779 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2780 {
2781 devclass_t devclass;
2782 struct mfi_linux_ioc_aen l_aen;
2783 int adapter;
2784
2785 devclass = devclass_find("mfi");
2786 if (devclass == NULL)
2787 return (ENOENT);
2788
2789 error = copyin(arg, &l_aen, sizeof(l_aen));
2790 if (error)
2791 return (error);
2792 adapter = l_aen.laen_adapter_no;
2793 sc = devclass_get_softc(devclass, adapter);
2794 if (sc == NULL)
2795 return (ENOENT);
2796 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2797 cmd, arg, flag));
2798 break;
2799 }
2800#ifdef __x86_64__
2801 case MFIIO_PASSTHRU32:
2802 iop_swab.ioc_frame = iop32->ioc_frame;
2803 iop_swab.buf_size = iop32->buf_size;
2804 iop_swab.buf = PTRIN(iop32->buf);
2805 iop = &iop_swab;
2806 /* FALLTHROUGH */
2807#endif
2808 case MFIIO_PASSTHRU:
2809 error = mfi_user_command(sc, iop);
2810#ifdef __x86_64__
2811 if (cmd == MFIIO_PASSTHRU32)
2812 iop32->ioc_frame = iop_swab.ioc_frame;
2813#endif
2814 break;
2815 default:
2816 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2817 error = ENOENT;
2818 break;
2819 }
2820
2821 return (error);
2822}
2823
2824static int
2825mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
2826{
2827 struct mfi_softc *sc;
2828 struct mfi_linux_ioc_packet l_ioc;
2829 struct mfi_linux_ioc_aen l_aen;
2830 struct mfi_command *cm = NULL;
2831 struct mfi_aen *mfi_aen_entry;
2832 union mfi_sense_ptr sense_ptr;
2833 uint32_t context;
2834 uint8_t *data = NULL, *temp;
2835 int i;
2836 int error, locked;
2837
2838 sc = dev->si_drv1;
2839 error = 0;
2840 switch (cmd) {
2841 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2842 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2843 if (error != 0)
2844 return (error);
2845
2846 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2847 return (EINVAL);
2848 }
2849
2850 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2851 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2852 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2853 return (EBUSY);
2854 }
2855 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2856 locked = 0;
2857
2858 /*
2859 * save off original context since copying from user
2860 * will clobber some data
2861 */
2862 context = cm->cm_frame->header.context;
2863
2864 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2865 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2866 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2867 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2868 cm->cm_frame->header.scsi_status = 0;
2869 cm->cm_frame->header.pad0 = 0;
2870 if (l_ioc.lioc_sge_count)
2871 cm->cm_sg =
2872 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2873 cm->cm_flags = 0;
2874 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2875 cm->cm_flags |= MFI_CMD_DATAIN;
2876 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2877 cm->cm_flags |= MFI_CMD_DATAOUT;
2878 cm->cm_len = cm->cm_frame->header.data_len;
2879 if (cm->cm_len &&
2880 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2881 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2882 M_WAITOK | M_ZERO);
2883 if (cm->cm_data == NULL) {
2884 device_printf(sc->mfi_dev, "Malloc failed\n");
2885 goto out;
2886 }
2887 } else {
2888 cm->cm_data = 0;
2889 }
2890
2891 /* restore header context */
2892 cm->cm_frame->header.context = context;
2893
2894 temp = data;
2895 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2896 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2897 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2898 temp,
2899 l_ioc.lioc_sgl[i].iov_len);
2900 if (error != 0) {
2901 device_printf(sc->mfi_dev,
2902 "Copy in failed\n");
2903 goto out;
2904 }
2905 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2906 }
2907 }
2908
2909 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2910 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2911
2912 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2913#if defined(__x86_64__)
2914 cm->cm_frame->pass.sense_addr_lo =
2915 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2916 cm->cm_frame->pass.sense_addr_hi =
2917 (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
2918#else
2919 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2920 cm->cm_frame->pass.sense_addr_hi = 0;
2921#endif
2922 }
2923
2924 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2925 error = mfi_check_command_pre(sc, cm);
2926 if (error) {
2927 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2928 goto out;
2929 }
2930
2931 if ((error = mfi_wait_command(sc, cm)) != 0) {
2932 device_printf(sc->mfi_dev,
2933 "Controller polled failed\n");
2934 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2935 goto out;
2936 }
2937
2938 mfi_check_command_post(sc, cm);
2939 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2940
2941 temp = data;
2942 if (cm->cm_flags & MFI_CMD_DATAIN) {
2943 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2944 error = copyout(temp,
2945 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2946 l_ioc.lioc_sgl[i].iov_len);
2947 if (error != 0) {
2948 device_printf(sc->mfi_dev,
2949 "Copy out failed\n");
2950 goto out;
2951 }
2952 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2953 }
2954 }
2955
2956 if (l_ioc.lioc_sense_len) {
2957 /* get user-space sense ptr then copy out sense */
2958 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2959 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2960 &sense_ptr.sense_ptr_data[0],
2961 sizeof(sense_ptr.sense_ptr_data));
2962#ifdef __x86_64__
2963 /*
2964 * only 32bit Linux support so zero out any
2965 * address over 32bit
2966 */
2967 sense_ptr.addr.high = 0;
2968#endif
2969 error = copyout(cm->cm_sense, sense_ptr.user_space,
2970 l_ioc.lioc_sense_len);
2971 if (error != 0) {
2972 device_printf(sc->mfi_dev,
2973 "Copy out failed\n");
2974 goto out;
2975 }
2976 }
2977
2978 error = copyout(&cm->cm_frame->header.cmd_status,
2979 &((struct mfi_linux_ioc_packet*)arg)
2980 ->lioc_frame.hdr.cmd_status,
2981 1);
2982 if (error != 0) {
2983 device_printf(sc->mfi_dev,
2984 "Copy out failed\n");
2985 goto out;
2986 }
2987
2988out:
2989 mfi_config_unlock(sc, locked);
2990 if (data)
2991 kfree(data, M_MFIBUF);
2992 if (cm) {
2993 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2994 mfi_release_command(cm);
2995 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2996 }
2997
2998 return (error);
2999 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3000 error = copyin(arg, &l_aen, sizeof(l_aen));
3001 if (error != 0)
3002 return (error);
3003 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3004 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3005 M_WAITOK);
3006 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3007 if (mfi_aen_entry != NULL) {
3008 mfi_aen_entry->p = curproc;
3009 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3010 aen_link);
3011 }
3012 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3013 l_aen.laen_class_locale);
3014
3015 if (error != 0) {
3016 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3017 aen_link);
3018 kfree(mfi_aen_entry, M_MFIBUF);
3019 }
3020 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3021
3022 return (error);
3023 default:
3024 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3025 error = ENOENT;
3026 break;
3027 }
3028
3029 return (error);
3030}
3031
3032static int
3033mfi_kqfilter(struct dev_kqfilter_args *ap)
3034{
3035 cdev_t dev = ap->a_head.a_dev;
3036 struct knote *kn = ap->a_kn;
3037 struct mfi_softc *sc;
3038 struct klist *klist;
3039
3040 ap->a_result = 0;
3041 sc = dev->si_drv1;
3042
3043 switch (kn->kn_filter) {
3044 case EVFILT_READ:
3045 kn->kn_fop = &mfi_read_filterops;
3046 kn->kn_hook = (caddr_t)sc;
3047 break;
3048 case EVFILT_WRITE:
3049 kn->kn_fop = &mfi_write_filterops;
3050 kn->kn_hook = (caddr_t)sc;
3051 break;
3052 default:
3053 ap->a_result = EOPNOTSUPP;
3054 return (0);
3055 }
3056
3057 klist = &sc->mfi_kq.ki_note;
3058 knote_insert(klist, kn);
3059
3060 return(0);
3061}
3062
3063static void
3064mfi_filter_detach(struct knote *kn)
3065{
3066 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3067 struct klist *klist = &sc->mfi_kq.ki_note;
3068
3069 knote_remove(klist, kn);
3070}
3071
3072static int
3073mfi_filter_read(struct knote *kn, long hint)
3074{
3075 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3076 int ready = 0;
3077
3078 if (sc->mfi_aen_triggered != 0) {
3079 ready = 1;
3080 sc->mfi_aen_triggered = 0;
3081 }
3082 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3083 kn->kn_flags |= EV_ERROR;
3084
3085 if (ready == 0)
3086 sc->mfi_poll_waiting = 1;
3087
3088 return (ready);
3089}
3090
3091static int
3092mfi_filter_write(struct knote *kn, long hint)
3093{
3094 return (0);
3095}
3096
3097static void
3098mfi_dump_all(void)
3099{
3100 struct mfi_softc *sc;
3101 struct mfi_command *cm;
3102 devclass_t dc;
3103 time_t deadline;
3104 int timedout;
3105 int i;
3106
3107 dc = devclass_find("mfi");
3108 if (dc == NULL) {
3109 kprintf("No mfi dev class\n");
3110 return;
3111 }
3112
3113 for (i = 0; ; i++) {
3114 sc = devclass_get_softc(dc, i);
3115 if (sc == NULL)
3116 break;
3117 device_printf(sc->mfi_dev, "Dumping\n\n");
3118 timedout = 0;
3119 deadline = time_second - MFI_CMD_TIMEOUT;
3120 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3121 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3122 if (cm->cm_timestamp < deadline) {
3123 device_printf(sc->mfi_dev,
3124 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3125 (int)(time_second - cm->cm_timestamp));
3126 MFI_PRINT_CMD(cm);
3127 timedout++;
3128 }
3129 }
3130
3131#if 0
3132 if (timedout)
3133 MFI_DUMP_CMDS(SC);
3134#endif
3135
3136 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3137 }
3138
3139 return;
3140}
3141
3142static void
3143mfi_timeout(void *data)
3144{
3145 struct mfi_softc *sc = (struct mfi_softc *)data;
3146 struct mfi_command *cm;
3147 time_t deadline;
3148 int timedout = 0;
3149
3150 deadline = time_second - MFI_CMD_TIMEOUT;
3151 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3152 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3153 if (sc->mfi_aen_cm == cm)
3154 continue;
3155 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3156 device_printf(sc->mfi_dev,
3157 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3158 (int)(time_second - cm->cm_timestamp));
3159 MFI_PRINT_CMD(cm);
3160 MFI_VALIDATE_CMD(sc, cm);
3161 timedout++;
3162 }
3163 }
3164
3165#if 0
3166 if (timedout)
3167 MFI_DUMP_CMDS(SC);
3168#endif
3169
3170 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3171
3172 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3173 mfi_timeout, sc);
3174
3175 if (0)
3176 mfi_dump_all();
3177 return;
3178}