kernel: Remove some unneeded NULL checks after kmalloc() with M_WAITOK.
[dragonfly.git] / sys / dev / raid / mfi / mfi.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52/*-
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
55 * are met:
56 *
57 * Copyright 1994-2009 The FreeBSD Project.
58 * All rights reserved.
59 *
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 *
66 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
68 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
69 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
70 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
71 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
72 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
73 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
74 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
75 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
76 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
77 *
78 * The views and conclusions contained in the software and documentation
79 * are those of the authors and should not be interpreted as representing
80 * official policies,either expressed or implied, of the FreeBSD Project.
81 *
82 * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.57 2011/07/14 20:20:33 jhb Exp $
83 */
84
85#include "opt_mfi.h"
86
87#include <sys/param.h>
88#include <sys/systm.h>
89#include <sys/sysctl.h>
90#include <sys/malloc.h>
91#include <sys/kernel.h>
92#include <sys/bus.h>
93#include <sys/eventhandler.h>
94#include <sys/rman.h>
95#include <sys/bus_dma.h>
96#include <sys/buf2.h>
97#include <sys/ioccom.h>
98#include <sys/uio.h>
99#include <sys/proc.h>
100#include <sys/signalvar.h>
101#include <sys/device.h>
102#include <sys/mplock2.h>
103
104#include <bus/cam/scsi/scsi_all.h>
105
106#include <dev/raid/mfi/mfireg.h>
107#include <dev/raid/mfi/mfi_ioctl.h>
108#include <dev/raid/mfi/mfivar.h>
109
110static int mfi_alloc_commands(struct mfi_softc *);
111static int mfi_comms_init(struct mfi_softc *);
112static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
113static int mfi_get_controller_info(struct mfi_softc *);
114static int mfi_get_log_state(struct mfi_softc *,
115 struct mfi_evt_log_state **);
116static int mfi_parse_entries(struct mfi_softc *, int, int);
117static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
118 uint32_t, void **, size_t);
119static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
120static void mfi_startup(void *arg);
121static void mfi_intr(void *arg);
122static void mfi_ldprobe(struct mfi_softc *sc);
123static void mfi_syspdprobe(struct mfi_softc *sc);
124static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
125static void mfi_aen_complete(struct mfi_command *);
126static int mfi_aen_setup(struct mfi_softc *, uint32_t);
127static int mfi_add_ld(struct mfi_softc *sc, int);
128static void mfi_add_ld_complete(struct mfi_command *);
129static int mfi_add_sys_pd(struct mfi_softc *sc, int);
130static void mfi_add_sys_pd_complete(struct mfi_command *);
131static struct mfi_command * mfi_bio_command(struct mfi_softc *);
132static void mfi_bio_complete(struct mfi_command *);
133static struct mfi_command * mfi_build_ldio(struct mfi_softc *,struct bio*);
134static struct mfi_command * mfi_build_syspdio(struct mfi_softc *,struct bio*);
135static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
136static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
137static void mfi_complete(struct mfi_softc *, struct mfi_command *);
138static int mfi_abort(struct mfi_softc *, struct mfi_command *);
139static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
140static void mfi_timeout(void *);
141static int mfi_user_command(struct mfi_softc *,
142 struct mfi_ioc_passthru *);
143static void mfi_enable_intr_xscale(struct mfi_softc *sc);
144static void mfi_enable_intr_ppc(struct mfi_softc *sc);
145static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
146static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
147static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
148static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
149static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
150static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
151static void mfi_filter_detach(struct knote *);
152static int mfi_filter_read(struct knote *, long);
153static int mfi_filter_write(struct knote *, long);
154
155SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
156static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
157TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
158SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
159 0, "event message locale");
160
161static int mfi_event_class = MFI_EVT_CLASS_INFO;
162TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
163SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
164 0, "event message class");
165
166static int mfi_max_cmds = 128;
167TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
168SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
169 0, "Max commands");
170
171/* Management interface */
172static d_open_t mfi_open;
173static d_close_t mfi_close;
174static d_ioctl_t mfi_ioctl;
175static d_kqfilter_t mfi_kqfilter;
176
177static struct dev_ops mfi_ops = {
178 { "mfi", 0, 0 },
179 .d_open = mfi_open,
180 .d_close = mfi_close,
181 .d_ioctl = mfi_ioctl,
182 .d_kqfilter = mfi_kqfilter,
183};
184
185static struct filterops mfi_read_filterops =
186 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
187static struct filterops mfi_write_filterops =
188 { FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
189
190MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
191
192#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
193
194static void
195mfi_enable_intr_xscale(struct mfi_softc *sc)
196{
197 MFI_WRITE4(sc, MFI_OMSK, 0x01);
198}
199
200static void
201mfi_enable_intr_ppc(struct mfi_softc *sc)
202{
203 if (sc->mfi_flags & MFI_FLAGS_1078) {
204 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
205 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
206 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
207 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
208 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
209 } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
210 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
211 } else {
212 panic("unknown adapter type");
213 }
214}
215
216static int32_t
217mfi_read_fw_status_xscale(struct mfi_softc *sc)
218{
219 return MFI_READ4(sc, MFI_OMSG0);
220}
221
222static int32_t
223mfi_read_fw_status_ppc(struct mfi_softc *sc)
224{
225 return MFI_READ4(sc, MFI_OSP0);
226}
227
228static int
229mfi_check_clear_intr_xscale(struct mfi_softc *sc)
230{
231 int32_t status;
232
233 status = MFI_READ4(sc, MFI_OSTS);
234 if ((status & MFI_OSTS_INTR_VALID) == 0)
235 return 1;
236
237 MFI_WRITE4(sc, MFI_OSTS, status);
238 return 0;
239}
240
241static int
242mfi_check_clear_intr_ppc(struct mfi_softc *sc)
243{
244 int32_t status;
245
246 status = MFI_READ4(sc, MFI_OSTS);
247 if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
248 ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
249 ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
250 return 1;
251
252 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
253 MFI_WRITE4(sc, MFI_OSTS, status);
254 else
255 MFI_WRITE4(sc, MFI_ODCR0, status);
256 return 0;
257}
258
259static void
260mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
261{
262 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
263}
264
265static void
266mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
267{
268 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
269 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
270 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
271 } else {
272 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
273 }
274}
275
276static int
277mfi_transition_firmware(struct mfi_softc *sc)
278{
279 uint32_t fw_state, cur_state;
280 int max_wait, i;
281 uint32_t cur_abs_reg_val = 0;
282 uint32_t prev_abs_reg_val = 0;
283 bus_space_handle_t idb;
284
285 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
286 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
287 idb = sc->mfi_flags & MFI_FLAGS_SKINNY ? MFI_SKINNY_IDB : MFI_IDB;
288 while (fw_state != MFI_FWSTATE_READY) {
289 if (bootverbose)
290 device_printf(sc->mfi_dev, "Waiting for firmware to "
291 "become ready\n");
292 cur_state = fw_state;
293 switch (fw_state) {
294 case MFI_FWSTATE_FAULT:
295 device_printf(sc->mfi_dev, "Firmware fault\n");
296 return (ENXIO);
297 case MFI_FWSTATE_WAIT_HANDSHAKE:
298 MFI_WRITE4(sc, idb, MFI_FWINIT_CLEAR_HANDSHAKE);
299 max_wait = 2;
300 break;
301 case MFI_FWSTATE_OPERATIONAL:
302 MFI_WRITE4(sc, idb, MFI_FWINIT_READY);
303 max_wait = 10;
304 break;
305 case MFI_FWSTATE_UNDEFINED:
306 case MFI_FWSTATE_BB_INIT:
307 max_wait = 2;
308 break;
309 case MFI_FWSTATE_FW_INIT:
310 case MFI_FWSTATE_FLUSH_CACHE:
311 max_wait = 20;
312 break;
313 case MFI_FWSTATE_DEVICE_SCAN:
314 max_wait = 180; /* wait for 180 seconds */
315 prev_abs_reg_val = cur_abs_reg_val;
316 break;
317 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
318 MFI_WRITE4(sc, idb, MFI_FWINIT_HOTPLUG);
319 max_wait = 10;
320 break;
321 default:
322 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
323 fw_state);
324 return (ENXIO);
325 }
326 for (i = 0; i < (max_wait * 10); i++) {
327 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
328 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
329 if (fw_state == cur_state)
330 DELAY(100000);
331 else
332 break;
333 }
334 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
335 /* Check the device scanning progress */
336 if (prev_abs_reg_val != cur_abs_reg_val)
337 continue;
338 }
339 if (fw_state == cur_state) {
340 device_printf(sc->mfi_dev, "Firmware stuck in state "
341 "%#x\n", fw_state);
342 return (ENXIO);
343 }
344 }
345 return (0);
346}
347
348#if defined(__x86_64__)
349static void
350mfi_addr64_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351{
352 uint64_t *addr;
353
354 addr = arg;
355 *addr = segs[0].ds_addr;
356}
357#else
358static void
359mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
360{
361 uint32_t *addr;
362
363 addr = arg;
364 *addr = segs[0].ds_addr;
365}
366#endif
367
368int
369mfi_attach(struct mfi_softc *sc)
370{
371 uint32_t status;
372 int error, commsz, framessz, sensesz;
373 int frames, unit, max_fw_sge;
374
375 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.981\n");
376
377 lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
378 lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
379 TAILQ_INIT(&sc->mfi_ld_tqh);
380 TAILQ_INIT(&sc->mfi_syspd_tqh);
381 TAILQ_INIT(&sc->mfi_aen_pids);
382 TAILQ_INIT(&sc->mfi_cam_ccbq);
383
384 mfi_initq_free(sc);
385 mfi_initq_ready(sc);
386 mfi_initq_busy(sc);
387 mfi_initq_bio(sc);
388
389 if (sc->mfi_flags & MFI_FLAGS_1064R) {
390 sc->mfi_enable_intr = mfi_enable_intr_xscale;
391 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
392 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
393 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
394 } else {
395 sc->mfi_enable_intr = mfi_enable_intr_ppc;
396 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
397 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
398 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
399 }
400
401
402 /* Before we get too far, see if the firmware is working */
403 if ((error = mfi_transition_firmware(sc)) != 0) {
404 device_printf(sc->mfi_dev, "Firmware not in READY state, "
405 "error %d\n", error);
406 return (ENXIO);
407 }
408
409 /*
410 * Get information needed for sizing the contiguous memory for the
411 * frame pool. Size down the sgl parameter since we know that
412 * we will never need more than what's required for MAXPHYS.
413 * It would be nice if these constants were available at runtime
414 * instead of compile time.
415 */
416 status = sc->mfi_read_fw_status(sc);
417 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
418 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
419 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
420
421 /*
422 * Create the dma tag for data buffers. Used both for block I/O
423 * and for various internal data queries.
424 */
425 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
426 1, 0, /* algnmnt, boundary */
427 BUS_SPACE_MAXADDR, /* lowaddr */
428 BUS_SPACE_MAXADDR, /* highaddr */
429 NULL, NULL, /* filter, filterarg */
430 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
431 sc->mfi_max_sge, /* nsegments */
432 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
433 BUS_DMA_ALLOCNOW, /* flags */
434 &sc->mfi_buffer_dmat)) {
435 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
436 return (ENOMEM);
437 }
438
439 /*
440 * Allocate DMA memory for the comms queues. Keep it under 4GB for
441 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
442 * entry, so the calculated size here will be will be 1 more than
443 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
444 */
445 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
446 sizeof(struct mfi_hwcomms);
447 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
448 1, 0, /* algnmnt, boundary */
449 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
450 BUS_SPACE_MAXADDR, /* highaddr */
451 NULL, NULL, /* filter, filterarg */
452 commsz, /* maxsize */
453 1, /* msegments */
454 commsz, /* maxsegsize */
455 0, /* flags */
456 &sc->mfi_comms_dmat)) {
457 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
458 return (ENOMEM);
459 }
460 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
461 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
462 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
463 return (ENOMEM);
464 }
465 bzero(sc->mfi_comms, commsz);
466#if defined(__x86_64__)
467 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
468 sc->mfi_comms, commsz, mfi_addr64_cb, &sc->mfi_comms_busaddr, 0);
469#else
470 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
471 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
472#endif
473
474 /*
475 * Allocate DMA memory for the command frames. Keep them in the
476 * lower 4GB for efficiency. Calculate the size of the commands at
477 * the same time; each command is one 64 byte frame plus a set of
478 * additional frames for holding sg lists or other data.
479 * The assumption here is that the SG list will start at the second
480 * frame and not use the unused bytes in the first frame. While this
481 * isn't technically correct, it simplifies the calculation and allows
482 * for command frames that might be larger than an mfi_io_frame.
483 */
484 if (sizeof(bus_addr_t) == 8) {
485 sc->mfi_sge_size = sizeof(struct mfi_sg64);
486 sc->mfi_flags |= MFI_FLAGS_SG64;
487 } else {
488 sc->mfi_sge_size = sizeof(struct mfi_sg32);
489 }
490 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
491 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
492 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
493 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
494 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
495 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
496 64, 0, /* algnmnt, boundary */
497 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filter, filterarg */
500 framessz, /* maxsize */
501 1, /* nsegments */
502 framessz, /* maxsegsize */
503 0, /* flags */
504 &sc->mfi_frames_dmat)) {
505 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
506 return (ENOMEM);
507 }
508 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
509 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
510 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
511 return (ENOMEM);
512 }
513 bzero(sc->mfi_frames, framessz);
514#if defined(__x86_64__)
515 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
516 sc->mfi_frames, framessz, mfi_addr64_cb, &sc->mfi_frames_busaddr,0);
517#else
518 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
519 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
520#endif
521
522 /*
523 * Allocate DMA memory for the frame sense data. Keep them in the
524 * lower 4GB for efficiency
525 */
526 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
527 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
528 4, 0, /* algnmnt, boundary */
529 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
530 BUS_SPACE_MAXADDR, /* highaddr */
531 NULL, NULL, /* filter, filterarg */
532 sensesz, /* maxsize */
533 1, /* nsegments */
534 sensesz, /* maxsegsize */
535 0, /* flags */
536 &sc->mfi_sense_dmat)) {
537 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
538 return (ENOMEM);
539 }
540 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
541 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
542 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
543 return (ENOMEM);
544 }
545#if defined(__x86_64__)
546 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
547 sc->mfi_sense, sensesz, mfi_addr64_cb, &sc->mfi_sense_busaddr, 0);
548#else
549 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
550 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
551#endif
552
553 if ((error = mfi_alloc_commands(sc)) != 0)
554 return (error);
555
556 if ((error = mfi_comms_init(sc)) != 0)
557 return (error);
558
559 if ((error = mfi_get_controller_info(sc)) != 0)
560 return (error);
561
562 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
563 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
564 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
565 return (error);
566 }
567 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
568
569 /*
570 * Set up the interrupt handler. XXX This should happen in
571 * mfi_pci.c
572 */
573 sc->mfi_irq_rid = 0;
574 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
575 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
576 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
577 return (EINVAL);
578 }
579 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
580 mfi_intr, sc, &sc->mfi_intr, NULL)) {
581 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
582 return (EINVAL);
583 }
584
585 /* Register a config hook to probe the bus for arrays */
586 sc->mfi_ich.ich_func = mfi_startup;
587 sc->mfi_ich.ich_arg = sc;
588 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
589 device_printf(sc->mfi_dev, "Cannot establish configuration "
590 "hook\n");
591 return (EINVAL);
592 }
593
594 /*
595 * Register a shutdown handler.
596 */
597 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
598 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
599 device_printf(sc->mfi_dev, "Warning: shutdown event "
600 "registration failed\n");
601 }
602
603 /*
604 * Create the control device for doing management
605 */
606 unit = device_get_unit(sc->mfi_dev);
607 sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
608 0640, "mfi%d", unit);
609 if (unit == 0)
610 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
611 if (sc->mfi_cdev != NULL)
612 sc->mfi_cdev->si_drv1 = sc;
613 sysctl_ctx_init(&sc->mfi_sysctl_ctx);
614 sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
615 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
616 device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
617 if (sc->mfi_sysctl_tree == NULL) {
618 device_printf(sc->mfi_dev, "can't add sysctl node\n");
619 return (EINVAL);
620 }
621 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
622 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
623 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
624 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
625 SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
626 SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
627 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
628 &sc->mfi_keep_deleted_volumes, 0,
629 "Don't detach the mfid device for a busy volume that is deleted");
630
631 device_add_child(sc->mfi_dev, "mfip", -1);
632 bus_generic_attach(sc->mfi_dev);
633
634 /* Start the timeout watchdog */
635 callout_init(&sc->mfi_watchdog_callout);
636 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
637 mfi_timeout, sc);
638
639 return (0);
640}
641
642static int
643mfi_alloc_commands(struct mfi_softc *sc)
644{
645 struct mfi_command *cm;
646 int i, ncmds;
647
648 /*
649 * XXX Should we allocate all the commands up front, or allocate on
650 * demand later like 'aac' does?
651 */
652 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
653 if (bootverbose)
654 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
655 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
656
657 sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
658 M_WAITOK | M_ZERO);
659
660 for (i = 0; i < ncmds; i++) {
661 cm = &sc->mfi_commands[i];
662 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
663 sc->mfi_cmd_size * i);
664 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
665 sc->mfi_cmd_size * i;
666 cm->cm_frame->header.context = i;
667 cm->cm_sense = &sc->mfi_sense[i];
668 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
669 cm->cm_sc = sc;
670 cm->cm_index = i;
671 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
672 &cm->cm_dmamap) == 0)
673 mfi_release_command(cm);
674 else
675 break;
676 sc->mfi_total_cmds++;
677 }
678
679 return (0);
680}
681
682void
683mfi_release_command(struct mfi_command *cm)
684{
685 struct mfi_frame_header *hdr;
686 uint32_t *hdr_data;
687
688 /*
689 * Zero out the important fields of the frame, but make sure the
690 * context field is preserved. For efficiency, handle the fields
691 * as 32 bit words. Clear out the first S/G entry too for safety.
692 */
693 hdr = &cm->cm_frame->header;
694 if (cm->cm_data != NULL && hdr->sg_count) {
695 cm->cm_sg->sg32[0].len = 0;
696 cm->cm_sg->sg32[0].addr = 0;
697 }
698
699 hdr_data = (uint32_t *)cm->cm_frame;
700 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
701 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
702 hdr_data[4] = 0; /* flags, timeout */
703 hdr_data[5] = 0; /* data_len */
704
705 cm->cm_extra_frames = 0;
706 cm->cm_flags = 0;
707 cm->cm_complete = NULL;
708 cm->cm_private = NULL;
709 cm->cm_data = NULL;
710 cm->cm_sg = 0;
711 cm->cm_total_frame_size = 0;
712
713 mfi_enqueue_free(cm);
714}
715
716static int
717mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
718 void **bufp, size_t bufsize)
719{
720 struct mfi_command *cm;
721 struct mfi_dcmd_frame *dcmd;
722 void *buf = NULL;
723 uint32_t context = 0;
724
725 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
726
727 cm = mfi_dequeue_free(sc);
728 if (cm == NULL)
729 return (EBUSY);
730
731 /* Zero out the MFI frame */
732 context = cm->cm_frame->header.context;
733 bzero(cm->cm_frame, sizeof(union mfi_frame));
734 cm->cm_frame->header.context = context;
735
736 if ((bufsize > 0) && (bufp != NULL)) {
737 if (*bufp == NULL) {
738 buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
739 if (buf == NULL) {
740 mfi_release_command(cm);
741 return (ENOMEM);
742 }
743 *bufp = buf;
744 } else {
745 buf = *bufp;
746 }
747 }
748
749 dcmd = &cm->cm_frame->dcmd;
750 bzero(dcmd->mbox, MFI_MBOX_SIZE);
751 dcmd->header.cmd = MFI_CMD_DCMD;
752 dcmd->header.timeout = 0;
753 dcmd->header.flags = 0;
754 dcmd->header.data_len = bufsize;
755 dcmd->header.scsi_status = 0;
756 dcmd->opcode = opcode;
757 cm->cm_sg = &dcmd->sgl;
758 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
759 cm->cm_flags = 0;
760 cm->cm_data = buf;
761 cm->cm_private = buf;
762 cm->cm_len = bufsize;
763
764 *cmp = cm;
765 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
766 *bufp = buf;
767 return (0);
768}
769
770static int
771mfi_comms_init(struct mfi_softc *sc)
772{
773 struct mfi_command *cm;
774 struct mfi_init_frame *init;
775 struct mfi_init_qinfo *qinfo;
776 int error;
777 uint32_t context = 0;
778
779 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
780 if ((cm = mfi_dequeue_free(sc)) == NULL)
781 return (EBUSY);
782
783 /* Zero out the MFI frame */
784 context = cm->cm_frame->header.context;
785 bzero(cm->cm_frame, sizeof(union mfi_frame));
786 cm->cm_frame->header.context = context;
787
788 /*
789 * Abuse the SG list area of the frame to hold the init_qinfo
790 * object;
791 */
792 init = &cm->cm_frame->init;
793 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
794
795 bzero(qinfo, sizeof(struct mfi_init_qinfo));
796 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
797 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
798 offsetof(struct mfi_hwcomms, hw_reply_q);
799 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
800 offsetof(struct mfi_hwcomms, hw_pi);
801 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
802 offsetof(struct mfi_hwcomms, hw_ci);
803
804 init->header.cmd = MFI_CMD_INIT;
805 init->header.data_len = sizeof(struct mfi_init_qinfo);
806 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
807 cm->cm_data = NULL;
808 cm->cm_flags = MFI_CMD_POLLED;
809
810 if ((error = mfi_mapcmd(sc, cm)) != 0) {
811 device_printf(sc->mfi_dev, "failed to send init command\n");
812 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
813 return (error);
814 }
815 mfi_release_command(cm);
816 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
817
818 return (0);
819}
820
821static int
822mfi_get_controller_info(struct mfi_softc *sc)
823{
824 struct mfi_command *cm = NULL;
825 struct mfi_ctrl_info *ci = NULL;
826 uint32_t max_sectors_1, max_sectors_2;
827 int error;
828
829 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
830 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
831 (void **)&ci, sizeof(*ci));
832 if (error)
833 goto out;
834 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
835
836 if ((error = mfi_mapcmd(sc, cm)) != 0) {
837 device_printf(sc->mfi_dev, "Failed to get controller info\n");
838 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
839 MFI_SECTOR_LEN;
840 error = 0;
841 goto out;
842 }
843
844 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
845 BUS_DMASYNC_POSTREAD);
846 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
847
848 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
849 max_sectors_2 = ci->max_request_size;
850 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
851
852out:
853 if (ci)
854 kfree(ci, M_MFIBUF);
855 if (cm)
856 mfi_release_command(cm);
857 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
858 return (error);
859}
860
861static int
862mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
863{
864 struct mfi_command *cm = NULL;
865 int error;
866
867 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
868 (void **)log_state, sizeof(**log_state));
869 if (error)
870 goto out;
871 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
872
873 if ((error = mfi_mapcmd(sc, cm)) != 0) {
874 device_printf(sc->mfi_dev, "Failed to get log state\n");
875 goto out;
876 }
877
878 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
879 BUS_DMASYNC_POSTREAD);
880 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
881
882out:
883 if (cm)
884 mfi_release_command(cm);
885
886 return (error);
887}
888
889static int
890mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
891{
892 struct mfi_evt_log_state *log_state = NULL;
893 union mfi_evt class_locale;
894 int error = 0;
895 uint32_t seq;
896
897 class_locale.members.reserved = 0;
898 class_locale.members.locale = mfi_event_locale;
899 class_locale.members.evt_class = mfi_event_class;
900
901 if (seq_start == 0) {
902 error = mfi_get_log_state(sc, &log_state);
903 if (error) {
904 if (log_state)
905 kfree(log_state, M_MFIBUF);
906 return (error);
907 }
908
909 /*
910 * Walk through any events that fired since the last
911 * shutdown.
912 */
913 mfi_parse_entries(sc, log_state->shutdown_seq_num,
914 log_state->newest_seq_num);
915 seq = log_state->newest_seq_num;
916 } else
917 seq = seq_start;
918 mfi_aen_register(sc, seq, class_locale.word);
919 if (log_state != NULL)
920 kfree(log_state, M_MFIBUF);
921
922 return 0;
923}
924
925static int
926mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
927{
928
929 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
930 cm->cm_complete = NULL;
931
932
933 /*
934 * MegaCli can issue a DCMD of 0. In this case do nothing
935 * and return 0 to it as status
936 */
937 if (cm->cm_frame->dcmd.opcode == 0) {
938 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
939 cm->cm_error = 0;
940 return (cm->cm_error);
941 }
942 mfi_enqueue_ready(cm);
943 mfi_startio(sc);
944 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
945 lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
946 return (cm->cm_error);
947}
948
949void
950mfi_free(struct mfi_softc *sc)
951{
952 struct mfi_command *cm;
953 int i;
954
955 callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
956
957 if (sc->mfi_cdev != NULL)
958 destroy_dev(sc->mfi_cdev);
959 dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
960
961 if (sc->mfi_total_cmds != 0) {
962 for (i = 0; i < sc->mfi_total_cmds; i++) {
963 cm = &sc->mfi_commands[i];
964 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
965 }
966 kfree(sc->mfi_commands, M_MFIBUF);
967 }
968
969 if (sc->mfi_intr)
970 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
971 if (sc->mfi_irq != NULL)
972 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
973 sc->mfi_irq);
974
975 if (sc->mfi_sense_busaddr != 0)
976 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
977 if (sc->mfi_sense != NULL)
978 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
979 sc->mfi_sense_dmamap);
980 if (sc->mfi_sense_dmat != NULL)
981 bus_dma_tag_destroy(sc->mfi_sense_dmat);
982
983 if (sc->mfi_frames_busaddr != 0)
984 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
985 if (sc->mfi_frames != NULL)
986 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
987 sc->mfi_frames_dmamap);
988 if (sc->mfi_frames_dmat != NULL)
989 bus_dma_tag_destroy(sc->mfi_frames_dmat);
990
991 if (sc->mfi_comms_busaddr != 0)
992 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
993 if (sc->mfi_comms != NULL)
994 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
995 sc->mfi_comms_dmamap);
996 if (sc->mfi_comms_dmat != NULL)
997 bus_dma_tag_destroy(sc->mfi_comms_dmat);
998
999 if (sc->mfi_buffer_dmat != NULL)
1000 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1001 if (sc->mfi_parent_dmat != NULL)
1002 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1003
1004 if (sc->mfi_sysctl_tree != NULL)
1005 sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1006
1007#if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1008
1009 if (mtx_initialized(&sc->mfi_io_lock)) {
1010 lockuninit(&sc->mfi_io_lock);
1011 sx_destroy(&sc->mfi_config_lock);
1012 }
1013#endif
1014
1015 lockuninit(&sc->mfi_io_lock);
1016 lockuninit(&sc->mfi_config_lock);
1017
1018 return;
1019}
1020
1021static void
1022mfi_startup(void *arg)
1023{
1024 struct mfi_softc *sc;
1025
1026 sc = (struct mfi_softc *)arg;
1027
1028 config_intrhook_disestablish(&sc->mfi_ich);
1029
1030 sc->mfi_enable_intr(sc);
1031 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1032 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1033 mfi_ldprobe(sc);
1034 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1035 mfi_syspdprobe(sc);
1036 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1037 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1038}
1039
1040static void
1041mfi_intr(void *arg)
1042{
1043 struct mfi_softc *sc;
1044 struct mfi_command *cm;
1045 uint32_t pi, ci, context;
1046
1047 sc = (struct mfi_softc *)arg;
1048
1049 if (sc->mfi_check_clear_intr(sc))
1050 return;
1051
1052 pi = sc->mfi_comms->hw_pi;
1053 ci = sc->mfi_comms->hw_ci;
1054 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1055 while (ci != pi) {
1056 context = sc->mfi_comms->hw_reply_q[ci];
1057 if (context < sc->mfi_max_fw_cmds) {
1058 cm = &sc->mfi_commands[context];
1059 mfi_remove_busy(cm);
1060 cm->cm_error = 0;
1061 mfi_complete(sc, cm);
1062 }
1063 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1064 ci = 0;
1065 }
1066 }
1067
1068 sc->mfi_comms->hw_ci = ci;
1069
1070 /* Give defered I/O a chance to run */
1071 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1072 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1073 mfi_startio(sc);
1074 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1075
1076 return;
1077}
1078
1079int
1080mfi_shutdown(struct mfi_softc *sc)
1081{
1082 struct mfi_dcmd_frame *dcmd;
1083 struct mfi_command *cm;
1084 int error;
1085
1086 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1087 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1088 if (error) {
1089 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1090 return (error);
1091 }
1092
1093 if (sc->mfi_aen_cm != NULL)
1094 mfi_abort(sc, sc->mfi_aen_cm);
1095
1096 dcmd = &cm->cm_frame->dcmd;
1097 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1098 cm->cm_flags = MFI_CMD_POLLED;
1099 cm->cm_data = NULL;
1100
1101 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1102 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1103 }
1104
1105 mfi_release_command(cm);
1106 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1107 return (error);
1108}
1109static void
1110mfi_syspdprobe(struct mfi_softc *sc)
1111{
1112 struct mfi_frame_header *hdr;
1113 struct mfi_command *cm = NULL;
1114 struct mfi_pd_list *pdlist = NULL;
1115 struct mfi_system_pd *syspd;
1116 int error, i;
1117
1118 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1119 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1120 /* Add SYSTEM PD's */
1121 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1122 (void **)&pdlist, sizeof(*pdlist));
1123 if (error) {
1124 device_printf(sc->mfi_dev,"Error while forming syspd list\n");
1125 goto out;
1126 }
1127
1128 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1129 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1130 cm->cm_frame->dcmd.mbox[1] = 0;
1131 if (mfi_mapcmd(sc, cm) != 0) {
1132 device_printf(sc->mfi_dev, "Failed to get syspd device list\n");
1133 goto out;
1134 }
1135 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1136 BUS_DMASYNC_POSTREAD);
1137 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1138 hdr = &cm->cm_frame->header;
1139 if (hdr->cmd_status != MFI_STAT_OK) {
1140 device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n",
1141 hdr->cmd_status);
1142 goto out;
1143 }
1144 for (i = 0; i < pdlist->count; i++) {
1145 if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id)
1146 goto skip_sys_pd_add;
1147 /* Get each PD and add it to the system */
1148 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1149 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1150 if (syspd->pd_id == pdlist->addr[i].device_id)
1151 goto skip_sys_pd_add;
1152 }
1153 }
1154 mfi_add_sys_pd(sc,pdlist->addr[i].device_id);
1155skip_sys_pd_add:
1156 ;
1157 }
1158 /* Delete SYSPD's whose state has been changed */
1159 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
1160 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
1161 for (i=0;i<pdlist->count;i++) {
1162 if (syspd->pd_id == pdlist->addr[i].device_id)
1163 goto skip_sys_pd_delete;
1164 }
1165 get_mplock();
1166 device_delete_child(sc->mfi_dev,syspd->pd_dev);
1167 rel_mplock();
1168skip_sys_pd_delete:
1169 ;
1170 }
1171 }
1172out:
1173 if (pdlist)
1174 kfree(pdlist, M_MFIBUF);
1175 if (cm)
1176 mfi_release_command(cm);
1177}
1178
1179static void
1180mfi_ldprobe(struct mfi_softc *sc)
1181{
1182 struct mfi_frame_header *hdr;
1183 struct mfi_command *cm = NULL;
1184 struct mfi_ld_list *list = NULL;
1185 struct mfi_disk *ld;
1186 int error, i;
1187
1188 KKASSERT(lockstatus(&sc->mfi_config_lock, curthread) != 0);
1189 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1190
1191 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1192 (void **)&list, sizeof(*list));
1193 if (error)
1194 goto out;
1195
1196 cm->cm_flags = MFI_CMD_DATAIN;
1197 if (mfi_wait_command(sc, cm) != 0) {
1198 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1199 goto out;
1200 }
1201
1202 hdr = &cm->cm_frame->header;
1203 if (hdr->cmd_status != MFI_STAT_OK) {
1204 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1205 hdr->cmd_status);
1206 goto out;
1207 }
1208
1209 for (i = 0; i < list->ld_count; i++) {
1210 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1211 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1212 goto skip_add;
1213 }
1214 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1215 skip_add:;
1216 }
1217out:
1218 if (list)
1219 kfree(list, M_MFIBUF);
1220 if (cm)
1221 mfi_release_command(cm);
1222
1223 return;
1224}
1225
1226/*
1227 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1228 * the bits in 24-31 are all set, then it is the number of seconds since
1229 * boot.
1230 */
1231static const char *
1232format_timestamp(uint32_t timestamp)
1233{
1234 static char buffer[32];
1235
1236 if ((timestamp & 0xff000000) == 0xff000000)
1237 ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1238 0x00ffffff);
1239 else
1240 ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1241 return (buffer);
1242}
1243
1244static const char *
1245format_class(int8_t class)
1246{
1247 static char buffer[6];
1248
1249 switch (class) {
1250 case MFI_EVT_CLASS_DEBUG:
1251 return ("debug");
1252 case MFI_EVT_CLASS_PROGRESS:
1253 return ("progress");
1254 case MFI_EVT_CLASS_INFO:
1255 return ("info");
1256 case MFI_EVT_CLASS_WARNING:
1257 return ("WARN");
1258 case MFI_EVT_CLASS_CRITICAL:
1259 return ("CRIT");
1260 case MFI_EVT_CLASS_FATAL:
1261 return ("FATAL");
1262 case MFI_EVT_CLASS_DEAD:
1263 return ("DEAD");
1264 default:
1265 ksnprintf(buffer, sizeof(buffer), "%d", class);
1266 return (buffer);
1267 }
1268}
1269
1270static void
1271mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1272{
1273
1274 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1275 format_timestamp(detail->time), detail->evt_class.members.locale,
1276 format_class(detail->evt_class.members.evt_class), detail->description);
1277}
1278
1279static int
1280mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1281{
1282 struct mfi_command *cm;
1283 struct mfi_dcmd_frame *dcmd;
1284 union mfi_evt current_aen, prior_aen;
1285 struct mfi_evt_detail *ed = NULL;
1286 int error = 0;
1287
1288 current_aen.word = locale;
1289 if (sc->mfi_aen_cm != NULL) {
1290 prior_aen.word =
1291 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1292 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1293 !((prior_aen.members.locale & current_aen.members.locale)
1294 ^current_aen.members.locale)) {
1295 return (0);
1296 } else {
1297 prior_aen.members.locale |= current_aen.members.locale;
1298 if (prior_aen.members.evt_class
1299 < current_aen.members.evt_class)
1300 current_aen.members.evt_class =
1301 prior_aen.members.evt_class;
1302 mfi_abort(sc, sc->mfi_aen_cm);
1303 }
1304 }
1305
1306 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1307 (void **)&ed, sizeof(*ed));
1308 if (error) {
1309 goto out;
1310 }
1311
1312 dcmd = &cm->cm_frame->dcmd;
1313 ((uint32_t *)&dcmd->mbox)[0] = seq;
1314 ((uint32_t *)&dcmd->mbox)[1] = locale;
1315 cm->cm_flags = MFI_CMD_DATAIN;
1316 cm->cm_complete = mfi_aen_complete;
1317
1318 sc->mfi_aen_cm = cm;
1319
1320 mfi_enqueue_ready(cm);
1321 mfi_startio(sc);
1322
1323out:
1324 return (error);
1325}
1326
1327static void
1328mfi_aen_complete(struct mfi_command *cm)
1329{
1330 struct mfi_frame_header *hdr;
1331 struct mfi_softc *sc;
1332 struct mfi_evt_detail *detail;
1333 struct mfi_aen *mfi_aen_entry, *tmp;
1334 int seq = 0, aborted = 0;
1335
1336 sc = cm->cm_sc;
1337 hdr = &cm->cm_frame->header;
1338
1339 if (sc->mfi_aen_cm == NULL)
1340 return;
1341
1342 if (sc->mfi_aen_cm->cm_aen_abort ||
1343 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1344 sc->mfi_aen_cm->cm_aen_abort = 0;
1345 aborted = 1;
1346 } else {
1347 sc->mfi_aen_triggered = 1;
1348 if (sc->mfi_poll_waiting) {
1349 sc->mfi_poll_waiting = 0;
1350 KNOTE(&sc->mfi_kq.ki_note, 0);
1351 }
1352 detail = cm->cm_data;
1353 /*
1354 * XXX If this function is too expensive or is recursive, then
1355 * events should be put onto a queue and processed later.
1356 */
1357 mfi_decode_evt(sc, detail);
1358 seq = detail->seq + 1;
1359 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1360 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1361 aen_link);
1362 lwkt_gettoken(&proc_token);
1363 ksignal(mfi_aen_entry->p, SIGIO);
1364 lwkt_reltoken(&proc_token);
1365 kfree(mfi_aen_entry, M_MFIBUF);
1366 }
1367 }
1368
1369 kfree(cm->cm_data, M_MFIBUF);
1370 sc->mfi_aen_cm = NULL;
1371 wakeup(&sc->mfi_aen_cm);
1372 mfi_release_command(cm);
1373
1374 /* set it up again so the driver can catch more events */
1375 if (!aborted) {
1376 mfi_aen_setup(sc, seq);
1377 }
1378}
1379
1380#define MAX_EVENTS 15
1381
1382static int
1383mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1384{
1385 struct mfi_command *cm;
1386 struct mfi_dcmd_frame *dcmd;
1387 struct mfi_evt_list *el;
1388 union mfi_evt class_locale;
1389 int error, i, seq, size;
1390 uint32_t context = 0;
1391
1392 class_locale.members.reserved = 0;
1393 class_locale.members.locale = mfi_event_locale;
1394 class_locale.members.evt_class = mfi_event_class;
1395
1396 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1397 * (MAX_EVENTS - 1);
1398 el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1399 if (el == NULL)
1400 return (ENOMEM);
1401
1402 for (seq = start_seq;;) {
1403 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1404 kfree(el, M_MFIBUF);
1405 return (EBUSY);
1406 }
1407
1408 /* Zero out the MFI frame */
1409 context = cm->cm_frame->header.context;
1410 bzero(cm->cm_frame, sizeof(union mfi_frame));
1411 cm->cm_frame->header.context = context;
1412
1413 dcmd = &cm->cm_frame->dcmd;
1414 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1415 dcmd->header.cmd = MFI_CMD_DCMD;
1416 dcmd->header.timeout = 0;
1417 dcmd->header.data_len = size;
1418 dcmd->header.scsi_status = 0;
1419 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1420 ((uint32_t *)&dcmd->mbox)[0] = seq;
1421 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1422 cm->cm_sg = &dcmd->sgl;
1423 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1424 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1425 cm->cm_data = el;
1426 cm->cm_len = size;
1427
1428 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1429 device_printf(sc->mfi_dev,
1430 "Failed to get controller entries\n");
1431 mfi_release_command(cm);
1432 break;
1433 }
1434
1435 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1436 BUS_DMASYNC_POSTREAD);
1437 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1438
1439 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1440 mfi_release_command(cm);
1441 break;
1442 }
1443 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1444 device_printf(sc->mfi_dev,
1445 "Error %d fetching controller entries\n",
1446 dcmd->header.cmd_status);
1447 mfi_release_command(cm);
1448 break;
1449 }
1450 mfi_release_command(cm);
1451
1452 for (i = 0; i < el->count; i++) {
1453 /*
1454 * If this event is newer than 'stop_seq' then
1455 * break out of the loop. Note that the log
1456 * is a circular buffer so we have to handle
1457 * the case that our stop point is earlier in
1458 * the buffer than our start point.
1459 */
1460 if (el->event[i].seq >= stop_seq) {
1461 if (start_seq <= stop_seq)
1462 break;
1463 else if (el->event[i].seq < start_seq)
1464 break;
1465 }
1466 mfi_decode_evt(sc, &el->event[i]);
1467 }
1468 seq = el->event[el->count - 1].seq + 1;
1469 }
1470
1471 kfree(el, M_MFIBUF);
1472 return (0);
1473}
1474
1475static int
1476mfi_add_ld(struct mfi_softc *sc, int id)
1477{
1478 struct mfi_command *cm;
1479 struct mfi_dcmd_frame *dcmd = NULL;
1480 struct mfi_ld_info *ld_info = NULL;
1481 int error;
1482
1483 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1484
1485 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1486 (void **)&ld_info, sizeof(*ld_info));
1487 if (error) {
1488 device_printf(sc->mfi_dev,
1489 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1490 if (ld_info)
1491 kfree(ld_info, M_MFIBUF);
1492 return (error);
1493 }
1494 cm->cm_flags = MFI_CMD_DATAIN;
1495 dcmd = &cm->cm_frame->dcmd;
1496 dcmd->mbox[0] = id;
1497 if (mfi_wait_command(sc, cm) != 0) {
1498 device_printf(sc->mfi_dev,
1499 "Failed to get logical drive: %d\n", id);
1500 kfree(ld_info, M_MFIBUF);
1501 return (0);
1502 }
1503 if (ld_info->ld_config.params.isSSCD != 1) {
1504 mfi_add_ld_complete(cm);
1505 } else {
1506 mfi_release_command(cm);
1507 if(ld_info) /* SSCD drives ld_info free here */
1508 kfree(ld_info, M_MFIBUF);
1509 }
1510 return (0);
1511}
1512
1513static void
1514mfi_add_ld_complete(struct mfi_command *cm)
1515{
1516 struct mfi_frame_header *hdr;
1517 struct mfi_ld_info *ld_info;
1518 struct mfi_softc *sc;
1519 device_t child;
1520
1521 sc = cm->cm_sc;
1522 hdr = &cm->cm_frame->header;
1523 ld_info = cm->cm_private;
1524
1525 if (hdr->cmd_status != MFI_STAT_OK) {
1526 kfree(ld_info, M_MFIBUF);
1527 mfi_release_command(cm);
1528 return;
1529 }
1530 mfi_release_command(cm);
1531
1532 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1533 get_mplock();
1534 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1535 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1536 kfree(ld_info, M_MFIBUF);
1537 rel_mplock();
1538 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1539 return;
1540 }
1541
1542 device_set_ivars(child, ld_info);
1543 device_set_desc(child, "MFI Logical Disk");
1544 bus_generic_attach(sc->mfi_dev);
1545 rel_mplock();
1546 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1547}
1548
1549static int
1550mfi_add_sys_pd(struct mfi_softc *sc,int id)
1551{
1552 struct mfi_command *cm;
1553 struct mfi_dcmd_frame *dcmd = NULL;
1554 struct mfi_pd_info *pd_info = NULL;
1555 int error;
1556
1557 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1558
1559 error = mfi_dcmd_command(sc,&cm,MFI_DCMD_PD_GET_INFO,
1560 (void **)&pd_info, sizeof(*pd_info));
1561 if (error) {
1562 device_printf(sc->mfi_dev,
1563 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error);
1564 if (pd_info)
1565 kfree(pd_info,M_MFIBUF);
1566 return (error);
1567 }
1568 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1569 dcmd = &cm->cm_frame->dcmd;
1570 dcmd->mbox[0] = id;
1571 dcmd->header.scsi_status = 0;
1572 dcmd->header.pad0 = 0;
1573 if (mfi_mapcmd(sc, cm) != 0) {
1574 device_printf(sc->mfi_dev,
1575 "Failed to get physical drive info %d\n", id);
1576 kfree(pd_info,M_MFIBUF);
1577 return (0);
1578 }
1579 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1580 BUS_DMASYNC_POSTREAD);
1581 bus_dmamap_unload(sc->mfi_buffer_dmat,cm->cm_dmamap);
1582 mfi_add_sys_pd_complete(cm);
1583 return (0);
1584}
1585
1586static void
1587mfi_add_sys_pd_complete(struct mfi_command *cm)
1588{
1589 struct mfi_frame_header *hdr;
1590 struct mfi_pd_info *pd_info;
1591 struct mfi_softc *sc;
1592 device_t child;
1593
1594 sc = cm->cm_sc;
1595 hdr = &cm->cm_frame->header;
1596 pd_info = cm->cm_private;
1597
1598 if (hdr->cmd_status != MFI_STAT_OK) {
1599 kfree(pd_info, M_MFIBUF);
1600 mfi_release_command(cm);
1601 return;
1602 }
1603 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1604 device_printf(sc->mfi_dev,"PD=%x is not SYSTEM PD\n",
1605 pd_info->ref.v.device_id);
1606 kfree(pd_info, M_MFIBUF);
1607 mfi_release_command(cm);
1608 return;
1609 }
1610 mfi_release_command(cm);
1611
1612 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1613 get_mplock();
1614 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1615 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1616 kfree(pd_info, M_MFIBUF);
1617 rel_mplock();
1618 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1619 return;
1620 }
1621
1622 device_set_ivars(child, pd_info);
1623 device_set_desc(child, "MFI System PD");
1624 bus_generic_attach(sc->mfi_dev);
1625 rel_mplock();
1626 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1627}
1628
1629static struct mfi_command *
1630mfi_bio_command(struct mfi_softc *sc)
1631{
1632 struct bio *bio;
1633 struct mfi_command *cm = NULL;
1634 struct mfi_disk *mfid;
1635
1636 /* reserving two commands to avoid starvation for IOCTL */
1637 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1638 return (NULL);
1639 if ((bio = mfi_dequeue_bio(sc)) == NULL)
1640 return (NULL);
1641 mfid = bio->bio_driver_info;
1642 if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1643 cm = mfi_build_syspdio(sc, bio);
1644 else
1645 cm = mfi_build_ldio(sc, bio);
1646 if (!cm)
1647 mfi_enqueue_bio(sc,bio);
1648 return cm;
1649}
1650
1651static struct mfi_command *
1652mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1653{
1654 struct mfi_command *cm;
1655 struct buf *bp;
1656 struct mfi_system_pd *disk;
1657 struct mfi_pass_frame *pass;
1658 int flags = 0,blkcount = 0;
1659 uint32_t context = 0;
1660
1661 if ((cm = mfi_dequeue_free(sc)) == NULL)
1662 return (NULL);
1663
1664 /* Zero out the MFI frame */
1665 context = cm->cm_frame->header.context;
1666 bzero(cm->cm_frame, sizeof(union mfi_frame));
1667 cm->cm_frame->header.context = context;
1668 bp = bio->bio_buf;
1669 pass = &cm->cm_frame->pass;
1670 bzero(pass->cdb, 16);
1671 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1672 switch (bp->b_cmd & 0x03) {
1673 case BUF_CMD_READ:
1674 pass->cdb[0] = READ_10;
1675 flags = MFI_CMD_DATAIN;
1676 break;
1677 case BUF_CMD_WRITE:
1678 pass->cdb[0] = WRITE_10;
1679 flags = MFI_CMD_DATAOUT;
1680 break;
1681 default:
1682 panic("Invalid bio command");
1683 }
1684
1685 /* Cheat with the sector length to avoid a non-constant division */
1686 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1687 disk = bio->bio_driver_info;
1688 /* Fill the LBA and Transfer length in CDB */
1689 pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
1690 pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
1691 pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
1692 pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
1693 pass->cdb[7] = (blkcount & 0xff00) >> 8;
1694 pass->cdb[8] = (blkcount & 0x00ff);
1695 pass->header.target_id = disk->pd_id;
1696 pass->header.timeout = 0;
1697 pass->header.flags = 0;
1698 pass->header.scsi_status = 0;
1699 pass->header.sense_len = MFI_SENSE_LEN;
1700 pass->header.data_len = bp->b_bcount;
1701 pass->header.cdb_len = 10;
1702#if defined(__x86_64__)
1703 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1704 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1705#else
1706 pass->sense_addr_lo = cm->cm_sense_busaddr;
1707 pass->sense_addr_hi = 0;
1708#endif
1709 cm->cm_complete = mfi_bio_complete;
1710 cm->cm_private = bio;
1711 cm->cm_data = bp->b_data;
1712 cm->cm_len = bp->b_bcount;
1713 cm->cm_sg = &pass->sgl;
1714 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
1715 cm->cm_flags = flags;
1716 return (cm);
1717}
1718
1719static struct mfi_command *
1720mfi_build_ldio(struct mfi_softc *sc,struct bio *bio)
1721{
1722 struct mfi_io_frame *io;
1723 struct buf *bp;
1724 struct mfi_disk *disk;
1725 struct mfi_command *cm;
1726 int flags, blkcount;
1727 uint32_t context = 0;
1728
1729 if ((cm = mfi_dequeue_free(sc)) == NULL)
1730 return (NULL);
1731
1732 /* Zero out the MFI frame */
1733 context = cm->cm_frame->header.context;
1734 bzero(cm->cm_frame,sizeof(union mfi_frame));
1735 cm->cm_frame->header.context = context;
1736 bp = bio->bio_buf;
1737 io = &cm->cm_frame->io;
1738 switch (bp->b_cmd & 0x03) {
1739 case BUF_CMD_READ:
1740 io->header.cmd = MFI_CMD_LD_READ;
1741 flags = MFI_CMD_DATAIN;
1742 break;
1743 case BUF_CMD_WRITE:
1744 io->header.cmd = MFI_CMD_LD_WRITE;
1745 flags = MFI_CMD_DATAOUT;
1746 break;
1747 default:
1748 panic("Invalid bio command");
1749 }
1750
1751 /* Cheat with the sector length to avoid a non-constant division */
1752 blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1753 disk = bio->bio_driver_info;
1754 io->header.target_id = disk->ld_id;
1755 io->header.timeout = 0;
1756 io->header.flags = 0;
1757 io->header.scsi_status = 0;
1758 io->header.sense_len = MFI_SENSE_LEN;
1759 io->header.data_len = blkcount;
1760#if defined(__x86_64__)
1761 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
1762 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
1763#else
1764 io->sense_addr_lo = cm->cm_sense_busaddr;
1765 io->sense_addr_hi = 0;
1766#endif
1767 io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
1768 io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
1769 cm->cm_complete = mfi_bio_complete;
1770 cm->cm_private = bio;
1771 cm->cm_data = bp->b_data;
1772 cm->cm_len = bp->b_bcount;
1773 cm->cm_sg = &io->sgl;
1774 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1775 cm->cm_flags = flags;
1776 return (cm);
1777}
1778
1779static void
1780mfi_bio_complete(struct mfi_command *cm)
1781{
1782 struct bio *bio;
1783 struct buf *bp;
1784 struct mfi_frame_header *hdr;
1785 struct mfi_softc *sc;
1786
1787 bio = cm->cm_private;
1788 bp = bio->bio_buf;
1789 hdr = &cm->cm_frame->header;
1790 sc = cm->cm_sc;
1791
1792 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1793 bp->b_flags |= B_ERROR;
1794 bp->b_error = EIO;
1795 device_printf(sc->mfi_dev, "I/O error, status= %d "
1796 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1797 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1798 } else if (cm->cm_error != 0) {
1799 bp->b_flags |= B_ERROR;
1800 }
1801
1802 mfi_release_command(cm);
1803 mfi_disk_complete(bio);
1804}
1805
1806void
1807mfi_startio(struct mfi_softc *sc)
1808{
1809 struct mfi_command *cm;
1810 struct ccb_hdr *ccbh;
1811
1812 for (;;) {
1813 /* Don't bother if we're short on resources */
1814 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1815 break;
1816
1817 /* Try a command that has already been prepared */
1818 cm = mfi_dequeue_ready(sc);
1819
1820 if (cm == NULL) {
1821 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1822 cm = sc->mfi_cam_start(ccbh);
1823 }
1824
1825 /* Nope, so look for work on the bioq */
1826 if (cm == NULL)
1827 cm = mfi_bio_command(sc);
1828
1829 /* No work available, so exit */
1830 if (cm == NULL)
1831 break;
1832
1833 /* Send the command to the controller */
1834 if (mfi_mapcmd(sc, cm) != 0) {
1835 mfi_requeue_ready(cm);
1836 break;
1837 }
1838 }
1839}
1840
1841static int
1842mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1843{
1844 int error, polled;
1845
1846 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
1847
1848 if (cm->cm_data != NULL) {
1849 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1850 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1851 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1852 if (error == EINPROGRESS) {
1853 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1854 return (0);
1855 }
1856 } else {
1857 error = mfi_send_frame(sc, cm);
1858 }
1859
1860 return (error);
1861}
1862
1863static void
1864mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1865{
1866 struct mfi_frame_header *hdr;
1867 struct mfi_command *cm;
1868 union mfi_sgl *sgl;
1869 struct mfi_softc *sc;
1870 int i, dir;
1871 int sgl_mapped = 0;
1872 int sge_size = 0;
1873
1874 cm = (struct mfi_command *)arg;
1875 sc = cm->cm_sc;
1876 hdr = &cm->cm_frame->header;
1877 sgl = cm->cm_sg;
1878
1879 if (error) {
1880 kprintf("error %d in callback\n", error);
1881 cm->cm_error = error;
1882 mfi_complete(sc, cm);
1883 return;
1884 }
1885
1886 /* Use IEEE sgl only for IO's on a SKINNY controller
1887 * For other commands on a SKINNY controller use either
1888 * sg32 or sg64 based on the sizeof(bus_addr_t).
1889 * Also calculate the total frame size based on the type
1890 * of SGL used.
1891 */
1892 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
1893 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
1894 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
1895 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
1896 for (i = 0; i < nsegs; i++) {
1897 sgl->sg_skinny[i].addr = segs[i].ds_addr;
1898 sgl->sg_skinny[i].len = segs[i].ds_len;
1899 sgl->sg_skinny[i].flag = 0;
1900 }
1901 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1902 sgl_mapped = 1;
1903 sge_size = sizeof(struct mfi_sg_skinny);
1904 }
1905 if (!sgl_mapped) {
1906 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1907 for (i = 0; i < nsegs; i++) {
1908 sgl->sg32[i].addr = segs[i].ds_addr;
1909 sgl->sg32[i].len = segs[i].ds_len;
1910 }
1911 sge_size = sizeof(struct mfi_sg32);
1912 } else {
1913 for (i = 0; i < nsegs; i++) {
1914 sgl->sg64[i].addr = segs[i].ds_addr;
1915 sgl->sg64[i].len = segs[i].ds_len;
1916 }
1917 hdr->flags |= MFI_FRAME_SGL64;
1918 sge_size = sizeof(struct mfi_sg64);
1919 }
1920 }
1921 hdr->sg_count = nsegs;
1922
1923 dir = 0;
1924 if (cm->cm_flags & MFI_CMD_DATAIN) {
1925 dir |= BUS_DMASYNC_PREREAD;
1926 hdr->flags |= MFI_FRAME_DIR_READ;
1927 }
1928 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1929 dir |= BUS_DMASYNC_PREWRITE;
1930 hdr->flags |= MFI_FRAME_DIR_WRITE;
1931 }
1932 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1933 cm->cm_flags |= MFI_CMD_MAPPED;
1934
1935 /*
1936 * Instead of calculating the total number of frames in the
1937 * compound frame, it's already assumed that there will be at
1938 * least 1 frame, so don't compensate for the modulo of the
1939 * following division.
1940 */
1941 cm->cm_total_frame_size += (sge_size * nsegs);
1942 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1943
1944 mfi_send_frame(sc, cm);
1945}
1946
1947static int
1948mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1949{
1950 struct mfi_frame_header *hdr;
1951 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1952
1953 hdr = &cm->cm_frame->header;
1954
1955 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1956 cm->cm_timestamp = time_second;
1957 mfi_enqueue_busy(cm);
1958 } else {
1959 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1960 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1961 }
1962
1963 /*
1964 * The bus address of the command is aligned on a 64 byte boundary,
1965 * leaving the least 6 bits as zero. For whatever reason, the
1966 * hardware wants the address shifted right by three, leaving just
1967 * 3 zero bits. These three bits are then used as a prefetching
1968 * hint for the hardware to predict how many frames need to be
1969 * fetched across the bus. If a command has more than 8 frames
1970 * then the 3 bits are set to 0x7 and the firmware uses other
1971 * information in the command to determine the total amount to fetch.
1972 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1973 * is enough for both 32bit and 64bit systems.
1974 */
1975 if (cm->cm_extra_frames > 7)
1976 cm->cm_extra_frames = 7;
1977
1978 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1979
1980 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1981 return (0);
1982
1983 /* This is a polled command, so busy-wait for it to complete. */
1984 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1985 DELAY(1000);
1986 tm -= 1;
1987 if (tm <= 0)
1988 break;
1989 }
1990
1991 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1992 device_printf(sc->mfi_dev, "Frame %p timed out "
1993 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1994 return (ETIMEDOUT);
1995 }
1996
1997 return (0);
1998}
1999
2000static void
2001mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2002{
2003 int dir;
2004
2005 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2006 dir = 0;
2007 if (cm->cm_flags & MFI_CMD_DATAIN)
2008 dir |= BUS_DMASYNC_POSTREAD;
2009 if (cm->cm_flags & MFI_CMD_DATAOUT)
2010 dir |= BUS_DMASYNC_POSTWRITE;
2011
2012 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2013 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2014 cm->cm_flags &= ~MFI_CMD_MAPPED;
2015 }
2016
2017 cm->cm_flags |= MFI_CMD_COMPLETED;
2018
2019 if (cm->cm_complete != NULL)
2020 cm->cm_complete(cm);
2021 else
2022 wakeup(cm);
2023}
2024
2025static int
2026mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2027{
2028 struct mfi_command *cm;
2029 struct mfi_abort_frame *abort;
2030 int i = 0;
2031 uint32_t context = 0;
2032
2033 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2034
2035 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2036 return (EBUSY);
2037 }
2038
2039 /* Zero out the MFI frame */
2040 context = cm->cm_frame->header.context;
2041 bzero(cm->cm_frame, sizeof(union mfi_frame));
2042 cm->cm_frame->header.context = context;
2043
2044 abort = &cm->cm_frame->abort;
2045 abort->header.cmd = MFI_CMD_ABORT;
2046 abort->header.flags = 0;
2047 abort->header.scsi_status = 0;
2048 abort->abort_context = cm_abort->cm_frame->header.context;
2049#if defined(__x86_64__)
2050 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr & 0xFFFFFFFF;
2051 abort->abort_mfi_addr_hi = (cm_abort->cm_frame_busaddr & 0xFFFFFFFF00000000 ) >> 32 ;
2052#else
2053 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
2054 abort->abort_mfi_addr_hi = 0;
2055#endif
2056 cm->cm_data = NULL;
2057 cm->cm_flags = MFI_CMD_POLLED;
2058
2059 sc->mfi_aen_cm->cm_aen_abort = 1;
2060 mfi_mapcmd(sc, cm);
2061 mfi_release_command(cm);
2062
2063 while (i < 5 && sc->mfi_aen_cm != NULL) {
2064 lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
2065 i++;
2066 }
2067
2068 return (0);
2069}
2070
2071int
2072mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
2073{
2074 struct mfi_command *cm;
2075 struct mfi_io_frame *io;
2076 int error;
2077 uint32_t context = 0;
2078
2079 if ((cm = mfi_dequeue_free(sc)) == NULL)
2080 return (EBUSY);
2081
2082 /* Zero out the MFI frame */
2083 context = cm->cm_frame->header.context;
2084 bzero(cm->cm_frame, sizeof(union mfi_frame));
2085 cm->cm_frame->header.context = context;
2086
2087 io = &cm->cm_frame->io;
2088 io->header.cmd = MFI_CMD_LD_WRITE;
2089 io->header.target_id = id;
2090 io->header.timeout = 0;
2091 io->header.flags = 0;
2092 io->header.scsi_status = 0;
2093 io->header.sense_len = MFI_SENSE_LEN;
2094 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2095#if defined(__x86_64__)
2096 io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2097 io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2098#else
2099 io->sense_addr_lo = cm->cm_sense_busaddr;
2100 io->sense_addr_hi = 0;
2101#endif
2102 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2103 io->lba_lo = lba & 0xffffffff;
2104 cm->cm_data = virt;
2105 cm->cm_len = len;
2106 cm->cm_sg = &io->sgl;
2107 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2108 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2109
2110 error = mfi_mapcmd(sc, cm);
2111 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2112 BUS_DMASYNC_POSTWRITE);
2113 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2114 mfi_release_command(cm);
2115
2116 return (error);
2117}
2118
2119int
2120mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2121 int len)
2122{
2123 struct mfi_command *cm;
2124 struct mfi_pass_frame *pass;
2125 int error;
2126 int blkcount = 0;
2127
2128 if ((cm = mfi_dequeue_free(sc)) == NULL)
2129 return (EBUSY);
2130
2131 pass = &cm->cm_frame->pass;
2132 bzero(pass->cdb, 16);
2133 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2134 pass->cdb[0] = WRITE_10;
2135 pass->cdb[2] = (lba & 0xff000000) >> 24;
2136 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2137 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2138 pass->cdb[5] = (lba & 0x000000ff);
2139 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2140 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2141 pass->cdb[8] = (blkcount & 0x00ff);
2142 pass->header.target_id = id;
2143 pass->header.timeout = 0;
2144 pass->header.flags = 0;
2145 pass->header.scsi_status = 0;
2146 pass->header.sense_len = MFI_SENSE_LEN;
2147 pass->header.data_len = len;
2148 pass->header.cdb_len = 10;
2149#if defined(__x86_64__)
2150 pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
2151 pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
2152#else
2153 pass->sense_addr_lo = cm->cm_sense_busaddr;
2154 pass->sense_addr_hi = 0;
2155#endif
2156 cm->cm_data = virt;
2157 cm->cm_len = len;
2158 cm->cm_sg = &pass->sgl;
2159 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2160 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2161
2162 error = mfi_mapcmd(sc, cm);
2163 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2164 BUS_DMASYNC_POSTWRITE);
2165 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2166 mfi_release_command(cm);
2167
2168 return (error);
2169}
2170
2171static int
2172mfi_open(struct dev_open_args *ap)
2173{
2174 cdev_t dev = ap->a_head.a_dev;
2175 struct mfi_softc *sc;
2176 int error;
2177
2178 sc = dev->si_drv1;
2179
2180 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2181 if (sc->mfi_detaching)
2182 error = ENXIO;
2183 else {
2184 sc->mfi_flags |= MFI_FLAGS_OPEN;
2185 error = 0;
2186 }
2187 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2188
2189 return (error);
2190}
2191
2192static int
2193mfi_close(struct dev_close_args *ap)
2194{
2195 cdev_t dev = ap->a_head.a_dev;
2196 struct mfi_softc *sc;
2197 struct mfi_aen *mfi_aen_entry, *tmp;
2198
2199 sc = dev->si_drv1;
2200
2201 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2202 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2203
2204 TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2205 if (mfi_aen_entry->p == curproc) {
2206 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2207 aen_link);
2208 kfree(mfi_aen_entry, M_MFIBUF);
2209 }
2210 }
2211 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2212 return (0);
2213}
2214
2215static int
2216mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2217{
2218
2219 switch (opcode) {
2220 case MFI_DCMD_LD_DELETE:
2221 case MFI_DCMD_CFG_ADD:
2222 case MFI_DCMD_CFG_CLEAR:
2223 lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2224 return (1);
2225 default:
2226 return (0);
2227 }
2228}
2229
2230static void
2231mfi_config_unlock(struct mfi_softc *sc, int locked)
2232{
2233
2234 if (locked)
2235 lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2236}
2237
2238/* Perform pre-issue checks on commands from userland and possibly veto them. */
2239static int
2240mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2241{
2242 struct mfi_disk *ld, *ld2;
2243 int error;
2244 struct mfi_system_pd *syspd = NULL;
2245 uint16_t syspd_id;
2246 uint16_t *mbox;
2247
2248 KKASSERT(lockstatus(&sc->mfi_io_lock, curthread) != 0);
2249 error = 0;
2250 switch (cm->cm_frame->dcmd.opcode) {
2251 case MFI_DCMD_LD_DELETE:
2252 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2253 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2254 break;
2255 }
2256 if (ld == NULL)
2257 error = ENOENT;
2258 else
2259 error = mfi_disk_disable(ld);
2260 break;
2261 case MFI_DCMD_CFG_CLEAR:
2262 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2263 error = mfi_disk_disable(ld);
2264 if (error)
2265 break;
2266 }
2267 if (error) {
2268 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2269 if (ld2 == ld)
2270 break;
2271 mfi_disk_enable(ld2);
2272 }
2273 }
2274 break;
2275 case MFI_DCMD_PD_STATE_SET:
2276 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2277 syspd_id = mbox[0];
2278 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2279 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2280 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2281 if (syspd->pd_id == syspd_id)
2282 break;
2283 }
2284 }
2285 } else {
2286 break;
2287 }
2288 if(syspd)
2289 error = mfi_syspd_disable(syspd);
2290 break;
2291 default:
2292 break;
2293 }
2294 return (error);
2295}
2296
2297/* Perform post-issue checks on commands from userland. */
2298static void
2299mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2300{
2301 struct mfi_disk *ld, *ldn;
2302 struct mfi_system_pd *syspd = NULL;
2303 uint16_t syspd_id;
2304 uint16_t *mbox;
2305
2306 switch (cm->cm_frame->dcmd.opcode) {
2307 case MFI_DCMD_LD_DELETE:
2308 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2309 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2310 break;
2311 }
2312 KASSERT(ld != NULL, ("volume dissappeared"));
2313 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2314 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2315 get_mplock();
2316 device_delete_child(sc->mfi_dev, ld->ld_dev);
2317 rel_mplock();
2318 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2319 } else
2320 mfi_disk_enable(ld);
2321 break;
2322 case MFI_DCMD_CFG_CLEAR:
2323 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2324 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2325 get_mplock();
2326 TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2327 device_delete_child(sc->mfi_dev, ld->ld_dev);
2328 }
2329 rel_mplock();
2330 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2331 } else {
2332 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2333 mfi_disk_enable(ld);
2334 }
2335 break;
2336 case MFI_DCMD_CFG_ADD:
2337 mfi_ldprobe(sc);
2338 break;
2339 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2340 mfi_ldprobe(sc);
2341 break;
2342 case MFI_DCMD_PD_STATE_SET:
2343 mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2344 syspd_id = mbox[0];
2345 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2346 if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
2347 TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
2348 if (syspd->pd_id == syspd_id)
2349 break;
2350 }
2351 }
2352 } else {
2353 break;
2354 }
2355 /* If the transition fails then enable the syspd again */
2356 if(syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2357 mfi_syspd_enable(syspd);
2358 break;
2359 }
2360}
2361
2362static int
2363mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2364{
2365 struct mfi_command *cm;
2366 struct mfi_dcmd_frame *dcmd;
2367 void *ioc_buf = NULL;
2368 uint32_t context;
2369 int error = 0, locked;
2370
2371
2372 if (ioc->buf_size > 0) {
2373 ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2374 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2375 if (error) {
2376 device_printf(sc->mfi_dev, "failed to copyin\n");
2377 kfree(ioc_buf, M_MFIBUF);
2378 return (error);
2379 }
2380 }
2381
2382 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2383
2384 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2385 while ((cm = mfi_dequeue_free(sc)) == NULL)
2386 lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2387
2388 /* Save context for later */
2389 context = cm->cm_frame->header.context;
2390
2391 dcmd = &cm->cm_frame->dcmd;
2392 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2393
2394 cm->cm_sg = &dcmd->sgl;
2395 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2396 cm->cm_data = ioc_buf;
2397 cm->cm_len = ioc->buf_size;
2398
2399 /* restore context */
2400 cm->cm_frame->header.context = context;
2401
2402 /* Cheat since we don't know if we're writing or reading */
2403 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2404
2405 error = mfi_check_command_pre(sc, cm);
2406 if (error)
2407 goto out;
2408
2409 error = mfi_wait_command(sc, cm);
2410 if (error) {
2411 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2412 goto out;
2413 }
2414 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2415 mfi_check_command_post(sc, cm);
2416out:
2417 mfi_release_command(cm);
2418 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2419 mfi_config_unlock(sc, locked);
2420 if (ioc->buf_size > 0)
2421 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2422 if (ioc_buf)
2423 kfree(ioc_buf, M_MFIBUF);
2424 return (error);
2425}
2426
2427#ifdef __x86_64__
2428#define PTRIN(p) ((void *)(uintptr_t)(p))
2429#else
2430#define PTRIN(p) (p)
2431#endif
2432
2433static int
2434mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2435{
2436 struct mfi_config_data *conf_data = cm->cm_data;
2437 struct mfi_command *ld_cm = NULL;
2438 struct mfi_ld_info *ld_info = NULL;
2439 int error = 0;
2440
2441 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2442 (conf_data->ld[0].params.isSSCD == 1)) {
2443 error = 1;
2444 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2445 error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2446 (void **)&ld_info, sizeof(*ld_info));
2447 if (error) {
2448 device_printf(sc->mfi_dev,"Failed to allocate "
2449 "MFI_DCMD_LD_GET_INFO %d", error);
2450 if (ld_info)
2451 kfree(ld_info, M_MFIBUF);
2452 return 0;
2453 }
2454 ld_cm->cm_flags = MFI_CMD_DATAIN;
2455 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2456 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2457 if (mfi_wait_command(sc, ld_cm) != 0) {
2458 device_printf(sc->mfi_dev, "failed to get log drv\n");
2459 mfi_release_command(ld_cm);
2460 kfree(ld_info, M_MFIBUF);
2461 return 0;
2462 }
2463
2464 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2465 kfree(ld_info, M_MFIBUF);
2466 mfi_release_command(ld_cm);
2467 return 0;
2468 } else {
2469 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2470 }
2471
2472 if (ld_info->ld_config.params.isSSCD == 1)
2473 error = 1;
2474
2475 mfi_release_command(ld_cm);
2476 kfree(ld_info, M_MFIBUF);
2477 }
2478 return error;
2479}
2480
2481static int
2482mfi_ioctl(struct dev_ioctl_args *ap)
2483{
2484 cdev_t dev = ap->a_head.a_dev;
2485 u_long cmd = ap->a_cmd;
2486 int flag = ap->a_fflag;
2487 caddr_t arg = ap->a_data;
2488 struct mfi_softc *sc;
2489 union mfi_statrequest *ms;
2490 struct mfi_ioc_packet *ioc;
2491#ifdef __x86_64__
2492 struct mfi_ioc_packet32 *ioc32;
2493#endif
2494 struct mfi_ioc_aen *aen;
2495 struct mfi_command *cm = NULL;
2496 uint32_t context;
2497 union mfi_sense_ptr sense_ptr;
2498 uint8_t *data = NULL, *temp, skip_pre_post = 0;
2499 int i;
2500 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2501#ifdef __x86_64__
2502 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2503 struct mfi_ioc_passthru iop_swab;
2504#endif
2505 int error, locked;
2506
2507 sc = dev->si_drv1;
2508 error = 0;
2509
2510 switch (cmd) {
2511 case MFIIO_STATS:
2512 ms = (union mfi_statrequest *)arg;
2513 switch (ms->ms_item) {
2514 case MFIQ_FREE:
2515 case MFIQ_BIO:
2516 case MFIQ_READY:
2517 case MFIQ_BUSY:
2518 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2519 sizeof(struct mfi_qstat));
2520 break;
2521 default:
2522 error = ENOIOCTL;
2523 break;
2524 }
2525 break;
2526 case MFIIO_QUERY_DISK:
2527 {
2528 struct mfi_query_disk *qd;
2529 struct mfi_disk *ld;
2530
2531 qd = (struct mfi_query_disk *)arg;
2532 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2533 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2534 if (ld->ld_id == qd->array_id)
2535 break;
2536 }
2537 if (ld == NULL) {
2538 qd->present = 0;
2539 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2540 return (0);
2541 }
2542 qd->present = 1;
2543 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2544 qd->open = 1;
2545 bzero(qd->devname, SPECNAMELEN + 1);
2546 ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2547 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2548 break;
2549 }
2550 case MFI_CMD:
2551#ifdef __x86_64__
2552 case MFI_CMD32:
2553#endif
2554 {
2555 devclass_t devclass;
2556 ioc = (struct mfi_ioc_packet *)arg;
2557 int adapter;
2558
2559 adapter = ioc->mfi_adapter_no;
2560 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2561 devclass = devclass_find("mfi");
2562 sc = devclass_get_softc(devclass, adapter);
2563 }
2564 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2565 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2566 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2567 return (EBUSY);
2568 }
2569 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2570 locked = 0;
2571
2572 /*
2573 * save off original context since copying from user
2574 * will clobber some data
2575 */
2576 context = cm->cm_frame->header.context;
2577
2578 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2579 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2580 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2581 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2582 cm->cm_frame->header.scsi_status = 0;
2583 cm->cm_frame->header.pad0 = 0;
2584 if (ioc->mfi_sge_count) {
2585 cm->cm_sg =
2586 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2587 }
2588 cm->cm_flags = 0;
2589 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2590 cm->cm_flags |= MFI_CMD_DATAIN;
2591 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2592 cm->cm_flags |= MFI_CMD_DATAOUT;
2593 /* Legacy app shim */
2594 if (cm->cm_flags == 0)
2595 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2596 cm->cm_len = cm->cm_frame->header.data_len;
2597 if (cm->cm_len &&
2598 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2599 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2600 M_WAITOK | M_ZERO);
2601 if (cm->cm_data == NULL) {
2602 device_printf(sc->mfi_dev, "Malloc failed\n");
2603 goto out;
2604 }
2605 } else {
2606 cm->cm_data = 0;
2607 }
2608
2609 /* restore header context */
2610 cm->cm_frame->header.context = context;
2611
2612 temp = data;
2613 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2614 for (i = 0; i < ioc->mfi_sge_count; i++) {
2615#ifdef __x86_64__
2616 if (cmd == MFI_CMD) {
2617 /* Native */
2618 error = copyin(ioc->mfi_sgl[i].iov_base,
2619 temp,
2620 ioc->mfi_sgl[i].iov_len);
2621 } else {
2622 void *temp_convert;
2623 /* 32bit */
2624 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2625 temp_convert =
2626 PTRIN(ioc32->mfi_sgl[i].iov_base);
2627 error = copyin(temp_convert,
2628 temp,
2629 ioc32->mfi_sgl[i].iov_len);
2630 }
2631#else
2632 error = copyin(ioc->mfi_sgl[i].iov_base,
2633 temp,
2634 ioc->mfi_sgl[i].iov_len);
2635#endif
2636 if (error != 0) {
2637 device_printf(sc->mfi_dev,
2638 "Copy in failed\n");
2639 goto out;
2640 }
2641 temp = &temp[ioc->mfi_sgl[i].iov_len];
2642 }
2643 }
2644
2645 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2646 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2647
2648 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2649#if defined(__x86_64__)
2650 cm->cm_frame->pass.sense_addr_lo =
2651 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2652 cm->cm_frame->pass.sense_addr_hi =
2653 (cm->cm_sense_busaddr& 0xFFFFFFFF00000000) >> 32;
2654#else
2655 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2656 cm->cm_frame->pass.sense_addr_hi = 0;
2657#endif
2658 }
2659
2660 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2661 skip_pre_post = mfi_check_for_sscd(sc, cm);
2662 if (!skip_pre_post) {
2663 error = mfi_check_command_pre(sc, cm);
2664 if (error) {
2665 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2666 goto out;
2667 }
2668 }
2669
2670 if ((error = mfi_wait_command(sc, cm)) != 0) {
2671 device_printf(sc->mfi_dev,
2672 "Controller polled failed\n");
2673 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2674 goto out;
2675 }
2676
2677 if (!skip_pre_post)
2678 mfi_check_command_post(sc, cm);
2679 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2680
2681 temp = data;
2682 if (cm->cm_flags & MFI_CMD_DATAIN) {
2683 for (i = 0; i < ioc->mfi_sge_count; i++) {
2684#ifdef __x86_64__
2685 if (cmd == MFI_CMD) {
2686 /* Native */
2687 error = copyout(temp,
2688 ioc->mfi_sgl[i].iov_base,
2689 ioc->mfi_sgl[i].iov_len);
2690 } else {
2691 void *temp_convert;
2692 /* 32bit */
2693 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2694 temp_convert =
2695 PTRIN(ioc32->mfi_sgl[i].iov_base);
2696 error = copyout(temp,
2697 temp_convert,
2698 ioc32->mfi_sgl[i].iov_len);
2699 }
2700#else
2701 error = copyout(temp,
2702 ioc->mfi_sgl[i].iov_base,
2703 ioc->mfi_sgl[i].iov_len);
2704#endif
2705 if (error != 0) {
2706 device_printf(sc->mfi_dev,
2707 "Copy out failed\n");
2708 goto out;
2709 }
2710 temp = &temp[ioc->mfi_sgl[i].iov_len];
2711 }
2712 }
2713
2714 if (ioc->mfi_sense_len) {
2715 /* get user-space sense ptr then copy out sense */
2716 bcopy(&((struct mfi_ioc_packet*)arg)
2717 ->mfi_frame.raw[ioc->mfi_sense_off],
2718 &sense_ptr.sense_ptr_data[0],
2719 sizeof(sense_ptr.sense_ptr_data));
2720#ifdef __x86_64__
2721 if (cmd != MFI_CMD) {
2722 /*
2723 * not 64bit native so zero out any address
2724 * over 32bit */
2725 sense_ptr.addr.high = 0;
2726 }
2727#endif
2728 error = copyout(cm->cm_sense, sense_ptr.user_space,
2729 ioc->mfi_sense_len);
2730 if (error != 0) {
2731 device_printf(sc->mfi_dev,
2732 "Copy out failed\n");
2733 goto out;
2734 }
2735 }
2736
2737 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2738out:
2739 mfi_config_unlock(sc, locked);
2740 if (data)
2741 kfree(data, M_MFIBUF);
2742 if (cm) {
2743 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2744 mfi_release_command(cm);
2745 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2746 }
2747
2748 break;
2749 }
2750 case MFI_SET_AEN:
2751 aen = (struct mfi_ioc_aen *)arg;
2752 error = mfi_aen_register(sc, aen->aen_seq_num,
2753 aen->aen_class_locale);
2754
2755 break;
2756 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2757 {
2758 devclass_t devclass;
2759 struct mfi_linux_ioc_packet l_ioc;
2760 int adapter;
2761
2762 devclass = devclass_find("mfi");
2763 if (devclass == NULL)
2764 return (ENOENT);
2765
2766 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2767 if (error)
2768 return (error);
2769 adapter = l_ioc.lioc_adapter_no;
2770 sc = devclass_get_softc(devclass, adapter);
2771 if (sc == NULL)
2772 return (ENOENT);
2773 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2774 cmd, arg, flag));
2775 break;
2776 }
2777 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2778 {
2779 devclass_t devclass;
2780 struct mfi_linux_ioc_aen l_aen;
2781 int adapter;
2782
2783 devclass = devclass_find("mfi");
2784 if (devclass == NULL)
2785 return (ENOENT);
2786
2787 error = copyin(arg, &l_aen, sizeof(l_aen));
2788 if (error)
2789 return (error);
2790 adapter = l_aen.laen_adapter_no;
2791 sc = devclass_get_softc(devclass, adapter);
2792 if (sc == NULL)
2793 return (ENOENT);
2794 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2795 cmd, arg, flag));
2796 break;
2797 }
2798#ifdef __x86_64__
2799 case MFIIO_PASSTHRU32:
2800 iop_swab.ioc_frame = iop32->ioc_frame;
2801 iop_swab.buf_size = iop32->buf_size;
2802 iop_swab.buf = PTRIN(iop32->buf);
2803 iop = &iop_swab;
2804 /* FALLTHROUGH */
2805#endif
2806 case MFIIO_PASSTHRU:
2807 error = mfi_user_command(sc, iop);
2808#ifdef __x86_64__
2809 if (cmd == MFIIO_PASSTHRU32)
2810 iop32->ioc_frame = iop_swab.ioc_frame;
2811#endif
2812 break;
2813 default:
2814 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2815 error = ENOENT;
2816 break;
2817 }
2818
2819 return (error);
2820}
2821
2822static int
2823mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
2824{
2825 struct mfi_softc *sc;
2826 struct mfi_linux_ioc_packet l_ioc;
2827 struct mfi_linux_ioc_aen l_aen;
2828 struct mfi_command *cm = NULL;
2829 struct mfi_aen *mfi_aen_entry;
2830 union mfi_sense_ptr sense_ptr;
2831 uint32_t context;
2832 uint8_t *data = NULL, *temp;
2833 int i;
2834 int error, locked;
2835
2836 sc = dev->si_drv1;
2837 error = 0;
2838 switch (cmd) {
2839 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2840 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2841 if (error != 0)
2842 return (error);
2843
2844 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2845 return (EINVAL);
2846 }
2847
2848 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2849 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2850 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2851 return (EBUSY);
2852 }
2853 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2854 locked = 0;
2855
2856 /*
2857 * save off original context since copying from user
2858 * will clobber some data
2859 */
2860 context = cm->cm_frame->header.context;
2861
2862 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2863 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2864 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2865 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2866 cm->cm_frame->header.scsi_status = 0;
2867 cm->cm_frame->header.pad0 = 0;
2868 if (l_ioc.lioc_sge_count)
2869 cm->cm_sg =
2870 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2871 cm->cm_flags = 0;
2872 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2873 cm->cm_flags |= MFI_CMD_DATAIN;
2874 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2875 cm->cm_flags |= MFI_CMD_DATAOUT;
2876 cm->cm_len = cm->cm_frame->header.data_len;
2877 if (cm->cm_len &&
2878 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2879 cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
2880 M_WAITOK | M_ZERO);
2881 if (cm->cm_data == NULL) {
2882 device_printf(sc->mfi_dev, "Malloc failed\n");
2883 goto out;
2884 }
2885 } else {
2886 cm->cm_data = 0;
2887 }
2888
2889 /* restore header context */
2890 cm->cm_frame->header.context = context;
2891
2892 temp = data;
2893 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2894 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2895 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2896 temp,
2897 l_ioc.lioc_sgl[i].iov_len);
2898 if (error != 0) {
2899 device_printf(sc->mfi_dev,
2900 "Copy in failed\n");
2901 goto out;
2902 }
2903 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2904 }
2905 }
2906
2907 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2908 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2909
2910 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2911#if defined(__x86_64__)
2912 cm->cm_frame->pass.sense_addr_lo =
2913 (cm->cm_sense_busaddr & 0xFFFFFFFF);
2914 cm->cm_frame->pass.sense_addr_hi =
2915 (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
2916#else
2917 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2918 cm->cm_frame->pass.sense_addr_hi = 0;
2919#endif
2920 }
2921
2922 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2923 error = mfi_check_command_pre(sc, cm);
2924 if (error) {
2925 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2926 goto out;
2927 }
2928
2929 if ((error = mfi_wait_command(sc, cm)) != 0) {
2930 device_printf(sc->mfi_dev,
2931 "Controller polled failed\n");
2932 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2933 goto out;
2934 }
2935
2936 mfi_check_command_post(sc, cm);
2937 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2938
2939 temp = data;
2940 if (cm->cm_flags & MFI_CMD_DATAIN) {
2941 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2942 error = copyout(temp,
2943 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2944 l_ioc.lioc_sgl[i].iov_len);
2945 if (error != 0) {
2946 device_printf(sc->mfi_dev,
2947 "Copy out failed\n");
2948 goto out;
2949 }
2950 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2951 }
2952 }
2953
2954 if (l_ioc.lioc_sense_len) {
2955 /* get user-space sense ptr then copy out sense */
2956 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2957 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2958 &sense_ptr.sense_ptr_data[0],
2959 sizeof(sense_ptr.sense_ptr_data));
2960#ifdef __x86_64__
2961 /*
2962 * only 32bit Linux support so zero out any
2963 * address over 32bit
2964 */
2965 sense_ptr.addr.high = 0;
2966#endif
2967 error = copyout(cm->cm_sense, sense_ptr.user_space,
2968 l_ioc.lioc_sense_len);
2969 if (error != 0) {
2970 device_printf(sc->mfi_dev,
2971 "Copy out failed\n");
2972 goto out;
2973 }
2974 }
2975
2976 error = copyout(&cm->cm_frame->header.cmd_status,
2977 &((struct mfi_linux_ioc_packet*)arg)
2978 ->lioc_frame.hdr.cmd_status,
2979 1);
2980 if (error != 0) {
2981 device_printf(sc->mfi_dev,
2982 "Copy out failed\n");
2983 goto out;
2984 }
2985
2986out:
2987 mfi_config_unlock(sc, locked);
2988 if (data)
2989 kfree(data, M_MFIBUF);
2990 if (cm) {
2991 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2992 mfi_release_command(cm);
2993 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2994 }
2995
2996 return (error);
2997 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2998 error = copyin(arg, &l_aen, sizeof(l_aen));
2999 if (error != 0)
3000 return (error);
3001 kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3002 mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3003 M_WAITOK);
3004 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3005 if (mfi_aen_entry != NULL) {
3006 mfi_aen_entry->p = curproc;
3007 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3008 aen_link);
3009 }
3010 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3011 l_aen.laen_class_locale);
3012
3013 if (error != 0) {
3014 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3015 aen_link);
3016 kfree(mfi_aen_entry, M_MFIBUF);
3017 }
3018 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3019
3020 return (error);
3021 default:
3022 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3023 error = ENOENT;
3024 break;
3025 }
3026
3027 return (error);
3028}
3029
3030static int
3031mfi_kqfilter(struct dev_kqfilter_args *ap)
3032{
3033 cdev_t dev = ap->a_head.a_dev;
3034 struct knote *kn = ap->a_kn;
3035 struct mfi_softc *sc;
3036 struct klist *klist;
3037
3038 ap->a_result = 0;
3039 sc = dev->si_drv1;
3040
3041 switch (kn->kn_filter) {
3042 case EVFILT_READ:
3043 kn->kn_fop = &mfi_read_filterops;
3044 kn->kn_hook = (caddr_t)sc;
3045 break;
3046 case EVFILT_WRITE:
3047 kn->kn_fop = &mfi_write_filterops;
3048 kn->kn_hook = (caddr_t)sc;
3049 break;
3050 default:
3051 ap->a_result = EOPNOTSUPP;
3052 return (0);
3053 }
3054
3055 klist = &sc->mfi_kq.ki_note;
3056 knote_insert(klist, kn);
3057
3058 return(0);
3059}
3060
3061static void
3062mfi_filter_detach(struct knote *kn)
3063{
3064 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3065 struct klist *klist = &sc->mfi_kq.ki_note;
3066
3067 knote_remove(klist, kn);
3068}
3069
3070static int
3071mfi_filter_read(struct knote *kn, long hint)
3072{
3073 struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3074 int ready = 0;
3075
3076 if (sc->mfi_aen_triggered != 0) {
3077 ready = 1;
3078 sc->mfi_aen_triggered = 0;
3079 }
3080 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3081 kn->kn_flags |= EV_ERROR;
3082
3083 if (ready == 0)
3084 sc->mfi_poll_waiting = 1;
3085
3086 return (ready);
3087}
3088
3089static int
3090mfi_filter_write(struct knote *kn, long hint)
3091{
3092 return (0);
3093}
3094
3095static void
3096mfi_dump_all(void)
3097{
3098 struct mfi_softc *sc;
3099 struct mfi_command *cm;
3100 devclass_t dc;
3101 time_t deadline;
3102 int timedout;
3103 int i;
3104
3105 dc = devclass_find("mfi");
3106 if (dc == NULL) {
3107 kprintf("No mfi dev class\n");
3108 return;
3109 }
3110
3111 for (i = 0; ; i++) {
3112 sc = devclass_get_softc(dc, i);
3113 if (sc == NULL)
3114 break;
3115 device_printf(sc->mfi_dev, "Dumping\n\n");
3116 timedout = 0;
3117 deadline = time_second - MFI_CMD_TIMEOUT;
3118 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3119 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3120 if (cm->cm_timestamp < deadline) {
3121 device_printf(sc->mfi_dev,
3122 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3123 (int)(time_second - cm->cm_timestamp));
3124 MFI_PRINT_CMD(cm);
3125 timedout++;
3126 }
3127 }
3128
3129#if 0
3130 if (timedout)
3131 MFI_DUMP_CMDS(SC);
3132#endif
3133
3134 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3135 }
3136
3137 return;
3138}
3139
3140static void
3141mfi_timeout(void *data)
3142{
3143 struct mfi_softc *sc = (struct mfi_softc *)data;
3144 struct mfi_command *cm;
3145 time_t deadline;
3146 int timedout = 0;
3147
3148 deadline = time_second - MFI_CMD_TIMEOUT;
3149 lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3150 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3151 if (sc->mfi_aen_cm == cm)
3152 continue;
3153 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3154 device_printf(sc->mfi_dev,
3155 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
3156 (int)(time_second - cm->cm_timestamp));
3157 MFI_PRINT_CMD(cm);
3158 MFI_VALIDATE_CMD(sc, cm);
3159 timedout++;
3160 }
3161 }
3162
3163#if 0
3164 if (timedout)
3165 MFI_DUMP_CMDS(SC);
3166#endif
3167
3168 lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3169
3170 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3171 mfi_timeout, sc);
3172
3173 if (0)
3174 mfi_dump_all();
3175 return;
3176}