Change the kernel dev_t, representing a pointer to a specinfo structure,
[dragonfly.git] / sys / dev / raid / mly / mly.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/dev/mly/mly.c,v 1.3.2.3 2001/03/05 20:17:24 msmith Exp $
b13267a5 28 * $DragonFly: src/sys/dev/raid/mly/mly.c,v 1.15 2006/09/10 01:26:36 dillon Exp $
984263bc
MD
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/bus.h>
36#include <sys/conf.h>
fef8985e 37#include <sys/device.h>
984263bc
MD
38#include <sys/ctype.h>
39#include <sys/ioccom.h>
40#include <sys/stat.h>
41
42#include <machine/bus_memio.h>
43#include <machine/bus.h>
44#include <machine/resource.h>
45#include <sys/rman.h>
5eb77fd5 46#include <sys/thread2.h>
984263bc 47
1f2de5d4 48#include <bus/cam/scsi/scsi_all.h>
984263bc 49
1f2de5d4
MD
50#include "mlyreg.h"
51#include "mlyio.h"
52#include "mlyvar.h"
984263bc 53#define MLY_DEFINE_TABLES
1f2de5d4 54#include "mly_tables.h"
984263bc
MD
55
56static int mly_get_controllerinfo(struct mly_softc *sc);
57static void mly_scan_devices(struct mly_softc *sc);
58static void mly_rescan_btl(struct mly_softc *sc, int bus, int target);
59static void mly_complete_rescan(struct mly_command *mc);
60static int mly_get_eventstatus(struct mly_softc *sc);
61static int mly_enable_mmbox(struct mly_softc *sc);
62static int mly_flush(struct mly_softc *sc);
63static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
64 size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
65static void mly_fetch_event(struct mly_softc *sc);
66static void mly_complete_event(struct mly_command *mc);
67static void mly_process_event(struct mly_softc *sc, struct mly_event *me);
68static void mly_periodic(void *data);
69
70static int mly_immediate_command(struct mly_command *mc);
71static int mly_start(struct mly_command *mc);
72static void mly_complete(void *context, int pending);
73
74static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
75static int mly_alloc_commands(struct mly_softc *sc);
76static void mly_map_command(struct mly_command *mc);
77static void mly_unmap_command(struct mly_command *mc);
78
79static int mly_fwhandshake(struct mly_softc *sc);
80
81static void mly_describe_controller(struct mly_softc *sc);
82#ifdef MLY_DEBUG
83static void mly_printstate(struct mly_softc *sc);
84static void mly_print_command(struct mly_command *mc);
85static void mly_print_packet(struct mly_command *mc);
86static void mly_panic(struct mly_softc *sc, char *reason);
87#endif
88void mly_print_controller(int controller);
89
90static d_open_t mly_user_open;
91static d_close_t mly_user_close;
92static d_ioctl_t mly_user_ioctl;
93static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc);
94static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh);
95
96#define MLY_CDEV_MAJOR 158
97
fef8985e
MD
98static struct dev_ops mly_ops = {
99 { "mly", MLY_CDEV_MAJOR, 0 },
100 .d_open = mly_user_open,
101 .d_close = mly_user_close,
102 .d_ioctl = mly_user_ioctl,
984263bc
MD
103};
104
105/********************************************************************************
106 ********************************************************************************
107 Device Interface
108 ********************************************************************************
109 ********************************************************************************/
110
111/********************************************************************************
112 * Initialise the controller and softc
113 */
114int
115mly_attach(struct mly_softc *sc)
116{
117 int error;
118
119 debug_called(1);
120
5b617807
JS
121 callout_init(&sc->mly_periodic);
122
984263bc
MD
123 /*
124 * Initialise per-controller queues.
125 */
126 mly_initq_free(sc);
127 mly_initq_ready(sc);
128 mly_initq_busy(sc);
129 mly_initq_complete(sc);
130
42cdd4ab 131#if defined(__FreeBSD__) && __FreeBSD_version >= 500005
984263bc
MD
132 /*
133 * Initialise command-completion task.
134 */
135 TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
136#endif
137
138 /* disable interrupts before we start talking to the controller */
139 MLY_MASK_INTERRUPTS(sc);
140
141 /*
142 * Wait for the controller to come ready, handshake with the firmware if required.
143 * This is typically only necessary on platforms where the controller BIOS does not
144 * run.
145 */
146 if ((error = mly_fwhandshake(sc)))
147 return(error);
148
149 /*
150 * Allocate command buffers
151 */
152 if ((error = mly_alloc_commands(sc)))
153 return(error);
154
155 /*
156 * Obtain controller feature information
157 */
158 if ((error = mly_get_controllerinfo(sc)))
159 return(error);
160
161 /*
162 * Get the current event counter for health purposes, populate the initial
163 * health status buffer.
164 */
165 if ((error = mly_get_eventstatus(sc)))
166 return(error);
167
168 /*
169 * Enable memory-mailbox mode
170 */
171 if ((error = mly_enable_mmbox(sc)))
172 return(error);
173
174 /*
175 * Attach to CAM.
176 */
177 if ((error = mly_cam_attach(sc)))
178 return(error);
179
180 /*
181 * Print a little information about the controller
182 */
183 mly_describe_controller(sc);
184
185 /*
186 * Mark all attached devices for rescan
187 */
188 mly_scan_devices(sc);
189
190 /*
191 * Instigate the first status poll immediately. Rescan completions won't
192 * happen until interrupts are enabled, which should still be before
193 * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
194 * discovery here...)
195 */
196 mly_periodic((void *)sc);
197
198 /*
199 * Create the control device.
200 */
fef8985e
MD
201 dev_ops_add(&mly_ops, -1, device_get_unit(sc->mly_dev));
202 sc->mly_dev_t = make_dev(&mly_ops, device_get_unit(sc->mly_dev),
e4c9c0c8
MD
203 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
204 "mly%d", device_get_unit(sc->mly_dev));
984263bc
MD
205 sc->mly_dev_t->si_drv1 = sc;
206
207 /* enable interrupts now */
208 MLY_UNMASK_INTERRUPTS(sc);
209
210 return(0);
211}
212
213/********************************************************************************
214 * Bring the controller to a state where it can be safely left alone.
215 */
216void
217mly_detach(struct mly_softc *sc)
218{
219
220 debug_called(1);
221
222 /* kill the periodic event */
5b617807 223 callout_stop(&sc->mly_periodic);
984263bc
MD
224
225 sc->mly_state |= MLY_STATE_SUSPEND;
226
227 /* flush controller */
228 mly_printf(sc, "flushing cache...");
229 printf("%s\n", mly_flush(sc) ? "failed" : "done");
230
231 MLY_MASK_INTERRUPTS(sc);
232}
233
234/********************************************************************************
235 ********************************************************************************
236 Command Wrappers
237 ********************************************************************************
238 ********************************************************************************/
239
240/********************************************************************************
241 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
242 */
243static int
244mly_get_controllerinfo(struct mly_softc *sc)
245{
246 struct mly_command_ioctl mci;
247 u_int8_t status;
248 int error;
249
250 debug_called(1);
251
252 if (sc->mly_controllerinfo != NULL)
efda3bd0 253 kfree(sc->mly_controllerinfo, M_DEVBUF);
984263bc
MD
254
255 /* build the getcontrollerinfo ioctl and send it */
256 bzero(&mci, sizeof(mci));
257 sc->mly_controllerinfo = NULL;
258 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
259 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
260 &status, NULL, NULL)))
261 return(error);
262 if (status != 0)
263 return(EIO);
264
265 if (sc->mly_controllerparam != NULL)
efda3bd0 266 kfree(sc->mly_controllerparam, M_DEVBUF);
984263bc
MD
267
268 /* build the getcontrollerparameter ioctl and send it */
269 bzero(&mci, sizeof(mci));
270 sc->mly_controllerparam = NULL;
271 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
272 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
273 &status, NULL, NULL)))
274 return(error);
275 if (status != 0)
276 return(EIO);
277
278 return(0);
279}
280
281/********************************************************************************
282 * Schedule all possible devices for a rescan.
283 *
284 */
285static void
286mly_scan_devices(struct mly_softc *sc)
287{
288 int bus, target, nchn;
289
290 debug_called(1);
291
292 /*
293 * Clear any previous BTL information.
294 */
295 bzero(&sc->mly_btl, sizeof(sc->mly_btl));
296
297 /*
298 * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
299 */
300 nchn = sc->mly_controllerinfo->physical_channels_present +
301 sc->mly_controllerinfo->virtual_channels_present;
302 for (bus = 0; bus < nchn; bus++)
303 for (target = 0; target < MLY_MAX_TARGETS; target++)
304 sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
305
306}
307
308/********************************************************************************
309 * Rescan a device, possibly as a consequence of getting an event which suggests
310 * that it may have changed.
311 */
312static void
313mly_rescan_btl(struct mly_softc *sc, int bus, int target)
314{
315 struct mly_command *mc;
316 struct mly_command_ioctl *mci;
317
318 debug_called(2);
319
320 /* get a command */
321 mc = NULL;
322 if (mly_alloc_command(sc, &mc))
323 return; /* we'll be retried soon */
324
325 /* set up the data buffer */
efda3bd0 326 mc->mc_data = kmalloc(sizeof(union mly_devinfo), M_DEVBUF, M_INTWAIT | M_ZERO);
984263bc
MD
327 mc->mc_flags |= MLY_CMD_DATAIN;
328 mc->mc_complete = mly_complete_rescan;
329
330 sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
331
332 /*
333 * Build the ioctl.
334 *
335 * At this point we are committed to sending this request, as it
336 * will be the only one constructed for this particular update.
337 */
338 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
339 mci->opcode = MDACMD_IOCTL;
340 mci->addr.phys.controller = 0;
341 mci->timeout.value = 30;
342 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
343 if (bus >= sc->mly_controllerinfo->physical_channels_present) {
344 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
345 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
346 mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
347 + target;
348 debug(2, "logical device %d", mci->addr.log.logdev);
349 } else {
350 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
351 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
352 mci->addr.phys.lun = 0;
353 mci->addr.phys.target = target;
354 mci->addr.phys.channel = bus;
355 debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
356 }
357
358 /*
359 * Use the ready queue to get this command dispatched.
360 */
361 mly_enqueue_ready(mc);
362 mly_startio(sc);
363}
364
365/********************************************************************************
366 * Handle the completion of a rescan operation
367 */
368static void
369mly_complete_rescan(struct mly_command *mc)
370{
371 struct mly_softc *sc = mc->mc_sc;
372 struct mly_ioctl_getlogdevinfovalid *ldi;
373 struct mly_ioctl_getphysdevinfovalid *pdi;
374 int bus, target;
375
376 debug_called(2);
377
378 /* iff the command completed OK, we should use the result to update our data */
379 if (mc->mc_status == 0) {
380 if (mc->mc_length == sizeof(*ldi)) {
381 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
382 bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
383 target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
384 sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL; /* clears all other flags */
385 sc->mly_btl[bus][target].mb_type = ldi->raid_level;
386 sc->mly_btl[bus][target].mb_state = ldi->state;
387 debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
388 mly_describe_code(mly_table_device_type, ldi->raid_level),
389 mly_describe_code(mly_table_device_state, ldi->state));
390 } else if (mc->mc_length == sizeof(*pdi)) {
391 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
392 bus = pdi->channel;
393 target = pdi->target;
394 sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */
395 sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
396 sc->mly_btl[bus][target].mb_state = pdi->state;
397 sc->mly_btl[bus][target].mb_speed = pdi->speed;
398 sc->mly_btl[bus][target].mb_width = pdi->width;
399 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
400 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
401 debug(2, "BTL rescan for %d:%d returns %s", bus, target,
402 mly_describe_code(mly_table_device_state, pdi->state));
403 } else {
404 mly_printf(sc, "BTL rescan result corrupted\n");
405 }
406 } else {
407 /*
408 * A request sent for a device beyond the last device present will fail.
409 * We don't care about this, so we do nothing about it.
410 */
411 }
efda3bd0 412 kfree(mc->mc_data, M_DEVBUF);
984263bc
MD
413 mly_release_command(mc);
414}
415
416/********************************************************************************
417 * Get the current health status and set the 'next event' counter to suit.
418 */
419static int
420mly_get_eventstatus(struct mly_softc *sc)
421{
422 struct mly_command_ioctl mci;
423 struct mly_health_status *mh;
424 u_int8_t status;
425 int error;
426
427 /* build the gethealthstatus ioctl and send it */
428 bzero(&mci, sizeof(mci));
429 mh = NULL;
430 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
431
432 if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
433 return(error);
434 if (status != 0)
435 return(EIO);
436
437 /* get the event counter */
438 sc->mly_event_change = mh->change_counter;
439 sc->mly_event_waiting = mh->next_event;
440 sc->mly_event_counter = mh->next_event;
441
442 /* save the health status into the memory mailbox */
443 bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
444
445 debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
446
efda3bd0 447 kfree(mh, M_DEVBUF);
984263bc
MD
448 return(0);
449}
450
451/********************************************************************************
452 * Enable the memory mailbox mode.
453 */
454static int
455mly_enable_mmbox(struct mly_softc *sc)
456{
457 struct mly_command_ioctl mci;
458 u_int8_t *sp, status;
459 int error;
460
461 debug_called(1);
462
463 /* build the ioctl and send it */
464 bzero(&mci, sizeof(mci));
465 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
466 /* set buffer addresses */
467 mci.param.setmemorymailbox.command_mailbox_physaddr =
468 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
469 mci.param.setmemorymailbox.status_mailbox_physaddr =
470 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
471 mci.param.setmemorymailbox.health_buffer_physaddr =
472 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
473
474 /* set buffer sizes - abuse of data_size field is revolting */
475 sp = (u_int8_t *)&mci.data_size;
476 sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
477 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
478 mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
479
480 debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
481 mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
482 mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
483 mci.param.setmemorymailbox.health_buffer_physaddr,
484 mci.param.setmemorymailbox.health_buffer_size);
485
486 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
487 return(error);
488 if (status != 0)
489 return(EIO);
490 sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
491 debug(1, "memory mailbox active");
492 return(0);
493}
494
495/********************************************************************************
496 * Flush all pending I/O from the controller.
497 */
498static int
499mly_flush(struct mly_softc *sc)
500{
501 struct mly_command_ioctl mci;
502 u_int8_t status;
503 int error;
504
505 debug_called(1);
506
507 /* build the ioctl */
508 bzero(&mci, sizeof(mci));
509 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
510 mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
511
512 /* pass it off to the controller */
513 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
514 return(error);
515
516 return((status == 0) ? 0 : EIO);
517}
518
519/********************************************************************************
520 * Perform an ioctl command.
521 *
522 * If (data) is not NULL, the command requires data transfer. If (*data) is NULL
523 * the command requires data transfer from the controller, and we will allocate
524 * a buffer for it. If (*data) is not NULL, the command requires data transfer
525 * to the controller.
526 *
527 * XXX passing in the whole ioctl structure is ugly. Better ideas?
528 *
529 * XXX we don't even try to handle the case where datasize > 4k. We should.
530 */
531static int
532mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
533 u_int8_t *status, void *sense_buffer, size_t *sense_length)
534{
535 struct mly_command *mc;
536 struct mly_command_ioctl *mci;
537 int error;
538
539 debug_called(1);
540
541 mc = NULL;
542 if (mly_alloc_command(sc, &mc)) {
543 error = ENOMEM;
544 goto out;
545 }
546
547 /* copy the ioctl structure, but save some important fields and then fixup */
548 mci = &mc->mc_packet->ioctl;
549 ioctl->sense_buffer_address = mci->sense_buffer_address;
550 ioctl->maximum_sense_size = mci->maximum_sense_size;
551 *mci = *ioctl;
552 mci->opcode = MDACMD_IOCTL;
553 mci->timeout.value = 30;
554 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
555
556 /* handle the data buffer */
557 if (data != NULL) {
558 if (*data == NULL) {
559 /* allocate data buffer */
efda3bd0 560 mc->mc_data = kmalloc(datasize, M_DEVBUF, M_INTWAIT);
984263bc
MD
561 mc->mc_flags |= MLY_CMD_DATAIN;
562 } else {
563 mc->mc_data = *data;
564 mc->mc_flags |= MLY_CMD_DATAOUT;
565 }
566 mc->mc_length = datasize;
567 mc->mc_packet->generic.data_size = datasize;
568 }
569
570 /* run the command */
571 if ((error = mly_immediate_command(mc)))
572 goto out;
573
574 /* clean up and return any data */
575 *status = mc->mc_status;
576 if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
577 bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
578 *sense_length = mc->mc_sense;
579 goto out;
580 }
581
582 /* should we return a data pointer? */
583 if ((data != NULL) && (*data == NULL))
584 *data = mc->mc_data;
585
586 /* command completed OK */
587 error = 0;
588
589out:
590 if (mc != NULL) {
591 /* do we need to free a data buffer we allocated? */
592 if (error && (mc->mc_data != NULL) && (*data == NULL))
efda3bd0 593 kfree(mc->mc_data, M_DEVBUF);
984263bc
MD
594 mly_release_command(mc);
595 }
596 return(error);
597}
598
599/********************************************************************************
600 * Fetch one event from the controller.
601 */
602static void
603mly_fetch_event(struct mly_softc *sc)
604{
605 struct mly_command *mc;
606 struct mly_command_ioctl *mci;
984263bc
MD
607 u_int32_t event;
608
609 debug_called(2);
610
611 /* get a command */
612 mc = NULL;
613 if (mly_alloc_command(sc, &mc))
614 return; /* we'll get retried the next time a command completes */
615
616 /* set up the data buffer */
efda3bd0 617 mc->mc_data = kmalloc(sizeof(struct mly_event), M_DEVBUF, M_INTWAIT|M_ZERO);
984263bc
MD
618 mc->mc_length = sizeof(struct mly_event);
619 mc->mc_flags |= MLY_CMD_DATAIN;
620 mc->mc_complete = mly_complete_event;
621
622 /*
623 * Get an event number to fetch. It's possible that we've raced with another
624 * context for the last event, in which case there will be no more events.
625 */
5eb77fd5 626 crit_enter();
984263bc
MD
627 if (sc->mly_event_counter == sc->mly_event_waiting) {
628 mly_release_command(mc);
5eb77fd5 629 crit_exit();
984263bc
MD
630 return;
631 }
632 event = sc->mly_event_counter++;
5eb77fd5 633 crit_exit();
984263bc
MD
634
635 /*
636 * Build the ioctl.
637 *
638 * At this point we are committed to sending this request, as it
639 * will be the only one constructed for this particular event number.
640 */
641 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
642 mci->opcode = MDACMD_IOCTL;
643 mci->data_size = sizeof(struct mly_event);
644 mci->addr.phys.lun = (event >> 16) & 0xff;
645 mci->addr.phys.target = (event >> 24) & 0xff;
646 mci->addr.phys.channel = 0;
647 mci->addr.phys.controller = 0;
648 mci->timeout.value = 30;
649 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
650 mci->sub_ioctl = MDACIOCTL_GETEVENT;
651 mci->param.getevent.sequence_number_low = event & 0xffff;
652
653 debug(2, "fetch event %u", event);
654
655 /*
656 * Use the ready queue to get this command dispatched.
657 */
658 mly_enqueue_ready(mc);
659 mly_startio(sc);
660}
661
662/********************************************************************************
663 * Handle the completion of an event poll.
664 *
665 * Note that we don't actually have to instigate another poll; the completion of
666 * this command will trigger that if there are any more events to poll for.
667 */
668static void
669mly_complete_event(struct mly_command *mc)
670{
671 struct mly_softc *sc = mc->mc_sc;
672 struct mly_event *me = (struct mly_event *)mc->mc_data;
673
674 debug_called(2);
675
676 /*
677 * If the event was successfully fetched, process it.
678 */
679 if (mc->mc_status == SCSI_STATUS_OK) {
680 mly_process_event(sc, me);
efda3bd0 681 kfree(me, M_DEVBUF);
984263bc
MD
682 }
683 mly_release_command(mc);
684}
685
686/********************************************************************************
687 * Process a controller event.
688 */
689static void
690mly_process_event(struct mly_softc *sc, struct mly_event *me)
691{
692 struct scsi_sense_data *ssd = (struct scsi_sense_data *)&me->sense[0];
693 char *fp, *tp;
694 int bus, target, event, class, action;
695
696 /*
697 * Errors can be reported using vendor-unique sense data. In this case, the
698 * event code will be 0x1c (Request sense data present), the sense key will
699 * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
700 * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
701 * and low seven bits of the ASC (low seven bits of the high byte).
702 */
703 if ((me->code == 0x1c) &&
704 ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
705 (ssd->add_sense_code & 0x80)) {
706 event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
707 } else {
708 event = me->code;
709 }
710
711 /* look up event, get codes */
712 fp = mly_describe_code(mly_table_event, event);
713
714 debug(2, "Event %d code 0x%x", me->sequence_number, me->code);
715
716 /* quiet event? */
717 class = fp[0];
718 if (isupper(class) && bootverbose)
719 class = tolower(class);
720
721 /* get action code, text string */
722 action = fp[1];
723 tp = &fp[2];
724
725 /*
726 * Print some information about the event.
727 *
728 * This code uses a table derived from the corresponding portion of the Linux
729 * driver, and thus the parser is very similar.
730 */
731 switch(class) {
732 case 'p': /* error on physical device */
733 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
734 if (action == 'r')
735 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
736 break;
737 case 'l': /* error on logical unit */
738 case 'm': /* message about logical unit */
739 bus = MLY_LOGDEV_BUS(sc, me->lun);
740 target = MLY_LOGDEV_TARGET(me->lun);
741 mly_name_device(sc, bus, target);
742 mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
743 if (action == 'r')
744 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
745 break;
746 break;
747 case 's': /* report of sense data */
748 if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
749 (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
750 (ssd->add_sense_code == 0x04) &&
751 ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
752 break; /* ignore NO_SENSE or NOT_READY in one case */
753
754 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
755 mly_printf(sc, " sense key %d asc %02x ascq %02x\n",
756 ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
757 mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
758 if (action == 'r')
759 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
760 break;
761 case 'e':
762 mly_printf(sc, tp, me->target, me->lun);
763 break;
764 case 'c':
765 mly_printf(sc, "controller %s\n", tp);
766 break;
767 case '?':
768 mly_printf(sc, "%s - %d\n", tp, me->code);
769 break;
770 default: /* probably a 'noisy' event being ignored */
771 break;
772 }
773}
774
775/********************************************************************************
776 * Perform periodic activities.
777 */
778static void
779mly_periodic(void *data)
780{
781 struct mly_softc *sc = (struct mly_softc *)data;
782 int nchn, bus, target;
783
784 debug_called(2);
785
786 /*
787 * Scan devices.
788 */
789 nchn = sc->mly_controllerinfo->physical_channels_present +
790 sc->mly_controllerinfo->virtual_channels_present;
791 for (bus = 0; bus < nchn; bus++) {
792 for (target = 0; target < MLY_MAX_TARGETS; target++) {
793
794 /* ignore the controller in this scan */
795 if (target == sc->mly_controllerparam->initiator_id)
796 continue;
797
798 /* perform device rescan? */
799 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
800 mly_rescan_btl(sc, bus, target);
801 }
802 }
803
5b617807 804 callout_reset(&sc->mly_periodic, hz, mly_periodic, sc);
984263bc
MD
805}
806
807/********************************************************************************
808 ********************************************************************************
809 Command Processing
810 ********************************************************************************
811 ********************************************************************************/
812
813/********************************************************************************
814 * Run a command and wait for it to complete.
815 *
816 */
817static int
818mly_immediate_command(struct mly_command *mc)
819{
820 struct mly_softc *sc = mc->mc_sc;
5eb77fd5 821 int error;
984263bc
MD
822
823 debug_called(2);
824
825 /* spinning at splcam is ugly, but we're only used during controller init */
5eb77fd5 826 crit_enter();
984263bc
MD
827 if ((error = mly_start(mc)))
828 return(error);
829
830 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
831 /* sleep on the command */
832 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
377d4740 833 tsleep(mc, 0, "mlywait", 0);
984263bc
MD
834 }
835 } else {
836 /* spin and collect status while we do */
837 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
838 mly_done(mc->mc_sc);
839 }
840 }
5eb77fd5 841 crit_exit();
984263bc
MD
842 return(0);
843}
844
845/********************************************************************************
846 * Start as much queued I/O as possible on the controller
847 */
848void
849mly_startio(struct mly_softc *sc)
850{
851 struct mly_command *mc;
852
853 debug_called(2);
854
855 for (;;) {
856
857 /* try for a ready command */
858 mc = mly_dequeue_ready(sc);
859
860 /* try to build a command from a queued ccb */
861 if (!mc)
862 mly_cam_command(sc, &mc);
863
864 /* no command == nothing to do */
865 if (!mc)
866 break;
867
868 /* try to post the command */
869 if (mly_start(mc)) {
870 /* controller busy, or no resources - defer for later */
871 mly_requeue_ready(mc);
872 break;
873 }
874 }
875}
876
877/********************************************************************************
878 * Deliver a command to the controller; allocate controller resources at the
879 * last moment.
880 */
881static int
882mly_start(struct mly_command *mc)
883{
884 struct mly_softc *sc = mc->mc_sc;
885 union mly_command_packet *pkt;
984263bc
MD
886
887 debug_called(2);
888
889 /*
890 * Set the command up for delivery to the controller.
891 */
892 mly_map_command(mc);
893 mc->mc_packet->generic.command_id = mc->mc_slot;
894
5eb77fd5 895 crit_enter();
984263bc
MD
896
897 /*
898 * Do we have to use the hardware mailbox?
899 */
900 if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
901 /*
902 * Check to see if the controller is ready for us.
903 */
904 if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
5eb77fd5 905 crit_exit();
984263bc
MD
906 return(EBUSY);
907 }
908 mc->mc_flags |= MLY_CMD_BUSY;
909
910 /*
911 * It's ready, send the command.
912 */
913 MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
914 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
915
916 } else { /* use memory-mailbox mode */
917
918 pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
919
920 /* check to see if the next index is free yet */
921 if (pkt->mmbox.flag != 0) {
5eb77fd5 922 crit_exit();
984263bc
MD
923 return(EBUSY);
924 }
925 mc->mc_flags |= MLY_CMD_BUSY;
926
927 /* copy in new command */
928 bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
929 /* barrier to ensure completion of previous write before we write the flag */
930 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle? */
931 /* copy flag last */
932 pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
933 /* barrier to ensure completion of previous write before we notify the controller */
934 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle */
935
936 /* signal controller, update index */
937 MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
938 sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
939 }
940
941 mly_enqueue_busy(mc);
5eb77fd5 942 crit_exit();
984263bc
MD
943 return(0);
944}
945
946/********************************************************************************
947 * Pick up command status from the controller, schedule a completion event
948 */
949void
950mly_done(struct mly_softc *sc)
951{
952 struct mly_command *mc;
953 union mly_status_packet *sp;
954 u_int16_t slot;
5eb77fd5 955 int worked;
984263bc 956
5eb77fd5 957 crit_enter();
984263bc
MD
958 worked = 0;
959
960 /* pick up hardware-mailbox commands */
961 if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
962 slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
963 if (slot < MLY_SLOT_MAX) {
964 mc = &sc->mly_command[slot - MLY_SLOT_START];
965 mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
966 mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
967 mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
968 mly_remove_busy(mc);
969 mc->mc_flags &= ~MLY_CMD_BUSY;
970 mly_enqueue_complete(mc);
971 worked = 1;
972 } else {
973 /* slot 0xffff may mean "extremely bogus command" */
974 mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
975 }
976 /* unconditionally acknowledge status */
977 MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
978 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
979 }
980
981 /* pick up memory-mailbox commands */
982 if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
983 for (;;) {
984 sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
985
986 /* check for more status */
987 if (sp->mmbox.flag == 0)
988 break;
989
990 /* get slot number */
991 slot = sp->status.command_id;
992 if (slot < MLY_SLOT_MAX) {
993 mc = &sc->mly_command[slot - MLY_SLOT_START];
994 mc->mc_status = sp->status.status;
995 mc->mc_sense = sp->status.sense_length;
996 mc->mc_resid = sp->status.residue;
997 mly_remove_busy(mc);
998 mc->mc_flags &= ~MLY_CMD_BUSY;
999 mly_enqueue_complete(mc);
1000 worked = 1;
1001 } else {
1002 /* slot 0xffff may mean "extremely bogus command" */
1003 mly_printf(sc, "got AM completion for illegal slot %u at %d\n",
1004 slot, sc->mly_mmbox_status_index);
1005 }
1006
1007 /* clear and move to next index */
1008 sp->mmbox.flag = 0;
1009 sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
1010 }
1011 /* acknowledge that we have collected status value(s) */
1012 MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
1013 }
1014
5eb77fd5 1015 crit_exit();
984263bc 1016 if (worked) {
42cdd4ab 1017#if defined(__FreeBSD__) && __FreeBSD_version >= 500005
984263bc
MD
1018 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
1019 taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
1020 else
1021#endif
1022 mly_complete(sc, 0);
1023 }
1024}
1025
1026/********************************************************************************
1027 * Process completed commands
1028 */
1029static void
1030mly_complete(void *context, int pending)
1031{
1032 struct mly_softc *sc = (struct mly_softc *)context;
1033 struct mly_command *mc;
1034 void (* mc_complete)(struct mly_command *mc);
1035
1036
1037 debug_called(2);
1038
1039 /*
1040 * Spin pulling commands off the completed queue and processing them.
1041 */
1042 while ((mc = mly_dequeue_complete(sc)) != NULL) {
1043
1044 /*
1045 * Free controller resources, mark command complete.
1046 *
1047 * Note that as soon as we mark the command complete, it may be freed
1048 * out from under us, so we need to save the mc_complete field in
1049 * order to later avoid dereferencing mc. (We would not expect to
1050 * have a polling/sleeping consumer with mc_complete != NULL).
1051 */
1052 mly_unmap_command(mc);
1053 mc_complete = mc->mc_complete;
1054 mc->mc_flags |= MLY_CMD_COMPLETE;
1055
1056 /*
1057 * Call completion handler or wake up sleeping consumer.
1058 */
1059 if (mc_complete != NULL) {
1060 mc_complete(mc);
1061 } else {
1062 wakeup(mc);
1063 }
1064 }
1065
1066 /*
1067 * We may have freed up controller resources which would allow us
1068 * to push more commands onto the controller, so we check here.
1069 */
1070 mly_startio(sc);
1071
1072 /*
1073 * The controller may have updated the health status information,
1074 * so check for it here.
1075 *
1076 * Note that we only check for health status after a completed command. It
1077 * might be wise to ping the controller occasionally if it's been idle for
1078 * a while just to check up on it. While a filesystem is mounted, or I/O is
1079 * active this isn't really an issue.
1080 */
1081 if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1082 sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1083 debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1084 sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1085 sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1086
1087 /* wake up anyone that might be interested in this */
1088 wakeup(&sc->mly_event_change);
1089 }
1090 if (sc->mly_event_counter != sc->mly_event_waiting)
1091 mly_fetch_event(sc);
1092}
1093
1094/********************************************************************************
1095 ********************************************************************************
1096 Command Buffer Management
1097 ********************************************************************************
1098 ********************************************************************************/
1099
1100/********************************************************************************
1101 * Allocate a command.
1102 */
1103int
1104mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
1105{
1106 struct mly_command *mc;
1107
1108 debug_called(3);
1109
1110 if ((mc = mly_dequeue_free(sc)) == NULL)
1111 return(ENOMEM);
1112
1113 *mcp = mc;
1114 return(0);
1115}
1116
1117/********************************************************************************
1118 * Release a command back to the freelist.
1119 */
1120void
1121mly_release_command(struct mly_command *mc)
1122{
1123 debug_called(3);
1124
1125 /*
1126 * Fill in parts of the command that may cause confusion if
1127 * a consumer doesn't when we are later allocated.
1128 */
1129 mc->mc_data = NULL;
1130 mc->mc_flags = 0;
1131 mc->mc_complete = NULL;
1132 mc->mc_private = NULL;
1133
1134 /*
1135 * By default, we set up to overwrite the command packet with
1136 * sense information.
1137 */
1138 mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
1139 mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
1140
1141 mly_enqueue_free(mc);
1142}
1143
1144/********************************************************************************
1145 * Map helper for command allocation.
1146 */
1147static void
1148mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1149{
1150 struct mly_softc *sc = (struct mly_softc *)arg
1151
1152 debug_called(2);
1153
1154 sc->mly_packetphys = segs[0].ds_addr;
1155}
1156
1157/********************************************************************************
1158 * Allocate and initialise command and packet structures.
1159 */
1160static int
1161mly_alloc_commands(struct mly_softc *sc)
1162{
1163 struct mly_command *mc;
1164 int i;
1165
1166 /*
1167 * Allocate enough space for all the command packets in one chunk and
1168 * map them permanently into controller-visible space.
1169 */
1170 if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet,
1171 BUS_DMA_NOWAIT, &sc->mly_packetmap)) {
1172 return(ENOMEM);
1173 }
1174 bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet,
1175 MLY_MAXCOMMANDS * sizeof(union mly_command_packet),
1176 mly_alloc_commands_map, sc, 0);
1177
1178 for (i = 0; i < MLY_MAXCOMMANDS; i++) {
1179 mc = &sc->mly_command[i];
1180 bzero(mc, sizeof(*mc));
1181 mc->mc_sc = sc;
1182 mc->mc_slot = MLY_SLOT_START + i;
1183 mc->mc_packet = sc->mly_packet + i;
1184 mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet));
1185 if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1186 mly_release_command(mc);
1187 }
1188 return(0);
1189}
1190
1191/********************************************************************************
1192 * Command-mapping helper function - populate this command's s/g table
1193 * with the s/g entries for its data.
1194 */
1195static void
1196mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1197{
1198 struct mly_command *mc = (struct mly_command *)arg;
1199 struct mly_softc *sc = mc->mc_sc;
1200 struct mly_command_generic *gen = &(mc->mc_packet->generic);
1201 struct mly_sg_entry *sg;
1202 int i, tabofs;
1203
1204 debug_called(3);
1205
1206 /* can we use the transfer structure directly? */
1207 if (nseg <= 2) {
1208 sg = &gen->transfer.direct.sg[0];
1209 gen->command_control.extended_sg_table = 0;
1210 } else {
1211 tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES);
1212 sg = sc->mly_sg_table + tabofs;
1213 gen->transfer.indirect.entries[0] = nseg;
1214 gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1215 gen->command_control.extended_sg_table = 1;
1216 }
1217
1218 /* copy the s/g table */
1219 for (i = 0; i < nseg; i++) {
1220 sg[i].physaddr = segs[i].ds_addr;
1221 sg[i].length = segs[i].ds_len;
1222 }
1223
1224}
1225
1226#if 0
1227/********************************************************************************
1228 * Command-mapping helper function - save the cdb's physical address.
1229 *
1230 * We don't support 'large' SCSI commands at this time, so this is unused.
1231 */
1232static void
1233mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1234{
1235 struct mly_command *mc = (struct mly_command *)arg;
1236
1237 debug_called(3);
1238
1239 /* XXX can we safely assume that a CDB will never cross a page boundary? */
1240 if ((segs[0].ds_addr % PAGE_SIZE) >
1241 ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1242 panic("cdb crosses page boundary");
1243
1244 /* fix up fields in the command packet */
1245 mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
1246}
1247#endif
1248
1249/********************************************************************************
1250 * Map a command into controller-visible space
1251 */
1252static void
1253mly_map_command(struct mly_command *mc)
1254{
1255 struct mly_softc *sc = mc->mc_sc;
1256
1257 debug_called(2);
1258
1259 /* don't map more than once */
1260 if (mc->mc_flags & MLY_CMD_MAPPED)
1261 return;
1262
1263 /* does the command have a data buffer? */
1264 if (mc->mc_data != NULL)
1265 bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1266 mly_map_command_sg, mc, 0);
1267
1268 if (mc->mc_flags & MLY_CMD_DATAIN)
1269 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1270 if (mc->mc_flags & MLY_CMD_DATAOUT)
1271 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1272
1273 mc->mc_flags |= MLY_CMD_MAPPED;
1274}
1275
1276/********************************************************************************
1277 * Unmap a command from controller-visible space
1278 */
1279static void
1280mly_unmap_command(struct mly_command *mc)
1281{
1282 struct mly_softc *sc = mc->mc_sc;
1283
1284 debug_called(2);
1285
1286 if (!(mc->mc_flags & MLY_CMD_MAPPED))
1287 return;
1288
1289 if (mc->mc_flags & MLY_CMD_DATAIN)
1290 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1291 if (mc->mc_flags & MLY_CMD_DATAOUT)
1292 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1293
1294 /* does the command have a data buffer? */
1295 if (mc->mc_data != NULL)
1296 bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1297
1298 mc->mc_flags &= ~MLY_CMD_MAPPED;
1299}
1300
1301/********************************************************************************
1302 ********************************************************************************
1303 Hardware Control
1304 ********************************************************************************
1305 ********************************************************************************/
1306
1307/********************************************************************************
1308 * Handshake with the firmware while the card is being initialised.
1309 */
1310static int
1311mly_fwhandshake(struct mly_softc *sc)
1312{
1313 u_int8_t error, param0, param1;
1314 int spinup = 0;
1315
1316 debug_called(1);
1317
1318 /* set HM_STSACK and let the firmware initialise */
1319 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
1320 DELAY(1000); /* too short? */
1321
1322 /* if HM_STSACK is still true, the controller is initialising */
1323 if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
1324 return(0);
1325 mly_printf(sc, "controller initialisation started\n");
1326
1327 /* spin waiting for initialisation to finish, or for a message to be delivered */
1328 while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
1329 /* check for a message */
1330 if (MLY_ERROR_VALID(sc)) {
1331 error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
1332 param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
1333 param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
1334
1335 switch(error) {
1336 case MLY_MSG_SPINUP:
1337 if (!spinup) {
1338 mly_printf(sc, "drive spinup in progress\n");
1339 spinup = 1; /* only print this once (should print drive being spun?) */
1340 }
1341 break;
1342 case MLY_MSG_RACE_RECOVERY_FAIL:
1343 mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
1344 break;
1345 case MLY_MSG_RACE_IN_PROGRESS:
1346 mly_printf(sc, "mirror race recovery in progress\n");
1347 break;
1348 case MLY_MSG_RACE_ON_CRITICAL:
1349 mly_printf(sc, "mirror race recovery on a critical drive\n");
1350 break;
1351 case MLY_MSG_PARITY_ERROR:
1352 mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
1353 return(ENXIO);
1354 default:
1355 mly_printf(sc, "unknown initialisation code 0x%x\n", error);
1356 }
1357 }
1358 }
1359 return(0);
1360}
1361
1362/********************************************************************************
1363 ********************************************************************************
1364 Debugging and Diagnostics
1365 ********************************************************************************
1366 ********************************************************************************/
1367
1368/********************************************************************************
1369 * Print some information about the controller.
1370 */
1371static void
1372mly_describe_controller(struct mly_softc *sc)
1373{
1374 struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo;
1375
1376 mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
1377 mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
1378 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */
1379 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
1380 mi->memory_size);
1381
1382 if (bootverbose) {
1383 mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
1384 mly_describe_code(mly_table_oemname, mi->oem_information),
1385 mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
1386 mi->interface_speed, mi->interface_width, mi->interface_name);
1387 mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
1388 mi->memory_size, mi->memory_speed, mi->memory_width,
1389 mly_describe_code(mly_table_memorytype, mi->memory_type),
1390 mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
1391 mi->cache_size);
1392 mly_printf(sc, "CPU: %s @ %dMHZ\n",
1393 mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
1394 if (mi->l2cache_size != 0)
1395 mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
1396 if (mi->exmemory_size != 0)
1397 mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
1398 mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
1399 mly_describe_code(mly_table_memorytype, mi->exmemory_type),
1400 mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
1401 mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
1402 mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
1403 mi->maximum_block_count, mi->maximum_sg_entries);
1404 mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
1405 mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
1406 mly_printf(sc, "physical devices present %d\n",
1407 mi->physical_devices_present);
1408 mly_printf(sc, "physical disks present/offline %d/%d\n",
1409 mi->physical_disks_present, mi->physical_disks_offline);
1410 mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
1411 mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
1412 mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
1413 mi->virtual_channels_possible);
1414 mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
1415 mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
1416 mi->flash_size, mi->flash_age, mi->flash_maximum_age);
1417 }
1418}
1419
1420#ifdef MLY_DEBUG
1421/********************************************************************************
1422 * Print some controller state
1423 */
1424static void
1425mly_printstate(struct mly_softc *sc)
1426{
1427 mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n",
1428 MLY_GET_REG(sc, sc->mly_idbr),
1429 MLY_GET_REG(sc, sc->mly_odbr),
1430 MLY_GET_REG(sc, sc->mly_error_status),
1431 sc->mly_idbr,
1432 sc->mly_odbr,
1433 sc->mly_error_status);
1434 mly_printf(sc, "IMASK %02x ISTATUS %02x\n",
1435 MLY_GET_REG(sc, sc->mly_interrupt_mask),
1436 MLY_GET_REG(sc, sc->mly_interrupt_status));
1437 mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
1438 MLY_GET_REG(sc, sc->mly_command_mailbox),
1439 MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
1440 MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
1441 MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
1442 MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
1443 MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
1444 MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
1445 MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
1446 mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n",
1447 MLY_GET_REG(sc, sc->mly_status_mailbox),
1448 MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
1449 MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
1450 MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
1451 MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
1452 MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
1453 MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
1454 MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
1455 mly_printf(sc, " %04x %08x\n",
1456 MLY_GET_REG2(sc, sc->mly_status_mailbox),
1457 MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
1458}
1459
1460struct mly_softc *mly_softc0 = NULL;
1461void
1462mly_printstate0(void)
1463{
1464 if (mly_softc0 != NULL)
1465 mly_printstate(mly_softc0);
1466}
1467
1468/********************************************************************************
1469 * Print a command
1470 */
1471static void
1472mly_print_command(struct mly_command *mc)
1473{
1474 struct mly_softc *sc = mc->mc_sc;
1475
1476 mly_printf(sc, "COMMAND @ %p\n", mc);
1477 mly_printf(sc, " slot %d\n", mc->mc_slot);
1478 mly_printf(sc, " status 0x%x\n", mc->mc_status);
1479 mly_printf(sc, " sense len %d\n", mc->mc_sense);
1480 mly_printf(sc, " resid %d\n", mc->mc_resid);
1481 mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
1482 if (mc->mc_packet != NULL)
1483 mly_print_packet(mc);
1484 mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length);
1485 mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n");
1486 mly_printf(sc, " complete %p\n", mc->mc_complete);
1487 mly_printf(sc, " private %p\n", mc->mc_private);
1488}
1489
1490/********************************************************************************
1491 * Print a command packet
1492 */
1493static void
1494mly_print_packet(struct mly_command *mc)
1495{
1496 struct mly_softc *sc = mc->mc_sc;
1497 struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet;
1498 struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet;
1499 struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet;
1500 struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet;
1501 int transfer;
1502
1503 mly_printf(sc, " command_id %d\n", ge->command_id);
1504 mly_printf(sc, " opcode %d\n", ge->opcode);
1505 mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n",
1506 ge->command_control.force_unit_access,
1507 ge->command_control.disable_page_out,
1508 ge->command_control.extended_sg_table,
1509 (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
1510 ge->command_control.no_auto_sense,
1511 ge->command_control.disable_disconnect);
1512 mly_printf(sc, " data_size %d\n", ge->data_size);
1513 mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
1514 mly_printf(sc, " lun %d\n", ge->addr.phys.lun);
1515 mly_printf(sc, " target %d\n", ge->addr.phys.target);
1516 mly_printf(sc, " channel %d\n", ge->addr.phys.channel);
1517 mly_printf(sc, " logical device %d\n", ge->addr.log.logdev);
1518 mly_printf(sc, " controller %d\n", ge->addr.phys.controller);
1519 mly_printf(sc, " timeout %d %s\n",
1520 ge->timeout.value,
1521 (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
1522 ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
1523 mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size);
1524 switch(ge->opcode) {
1525 case MDACMD_SCSIPT:
1526 case MDACMD_SCSI:
1527 mly_printf(sc, " cdb length %d\n", ss->cdb_length);
1528 mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " ");
1529 transfer = 1;
1530 break;
1531 case MDACMD_SCSILC:
1532 case MDACMD_SCSILCPT:
1533 mly_printf(sc, " cdb length %d\n", sl->cdb_length);
1534 mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr);
1535 transfer = 1;
1536 break;
1537 case MDACMD_IOCTL:
1538 mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl);
1539 switch(io->sub_ioctl) {
1540 case MDACIOCTL_SETMEMORYMAILBOX:
1541 mly_printf(sc, " health_buffer_size %d\n",
1542 io->param.setmemorymailbox.health_buffer_size);
1543 mly_printf(sc, " health_buffer_phys 0x%llx\n",
1544 io->param.setmemorymailbox.health_buffer_physaddr);
1545 mly_printf(sc, " command_mailbox 0x%llx\n",
1546 io->param.setmemorymailbox.command_mailbox_physaddr);
1547 mly_printf(sc, " status_mailbox 0x%llx\n",
1548 io->param.setmemorymailbox.status_mailbox_physaddr);
1549 transfer = 0;
1550 break;
1551
1552 case MDACIOCTL_SETREALTIMECLOCK:
1553 case MDACIOCTL_GETHEALTHSTATUS:
1554 case MDACIOCTL_GETCONTROLLERINFO:
1555 case MDACIOCTL_GETLOGDEVINFOVALID:
1556 case MDACIOCTL_GETPHYSDEVINFOVALID:
1557 case MDACIOCTL_GETPHYSDEVSTATISTICS:
1558 case MDACIOCTL_GETLOGDEVSTATISTICS:
1559 case MDACIOCTL_GETCONTROLLERSTATISTICS:
1560 case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
1561 case MDACIOCTL_CREATENEWCONF:
1562 case MDACIOCTL_ADDNEWCONF:
1563 case MDACIOCTL_GETDEVCONFINFO:
1564 case MDACIOCTL_GETFREESPACELIST:
1565 case MDACIOCTL_MORE:
1566 case MDACIOCTL_SETPHYSDEVPARAMETER:
1567 case MDACIOCTL_GETPHYSDEVPARAMETER:
1568 case MDACIOCTL_GETLOGDEVPARAMETER:
1569 case MDACIOCTL_SETLOGDEVPARAMETER:
1570 mly_printf(sc, " param %10D\n", io->param.data.param, " ");
1571 transfer = 1;
1572 break;
1573
1574 case MDACIOCTL_GETEVENT:
1575 mly_printf(sc, " event %d\n",
1576 io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
1577 transfer = 1;
1578 break;
1579
1580 case MDACIOCTL_SETRAIDDEVSTATE:
1581 mly_printf(sc, " state %d\n", io->param.setraiddevstate.state);
1582 transfer = 0;
1583 break;
1584
1585 case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
1586 mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device);
1587 mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller);
1588 mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel);
1589 mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target);
1590 mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun);
1591 transfer = 0;
1592 break;
1593
1594 case MDACIOCTL_GETGROUPCONFINFO:
1595 mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group);
1596 transfer = 1;
1597 break;
1598
1599 case MDACIOCTL_GET_SUBSYSTEM_DATA:
1600 case MDACIOCTL_SET_SUBSYSTEM_DATA:
1601 case MDACIOCTL_STARTDISOCVERY:
1602 case MDACIOCTL_INITPHYSDEVSTART:
1603 case MDACIOCTL_INITPHYSDEVSTOP:
1604 case MDACIOCTL_INITRAIDDEVSTART:
1605 case MDACIOCTL_INITRAIDDEVSTOP:
1606 case MDACIOCTL_REBUILDRAIDDEVSTART:
1607 case MDACIOCTL_REBUILDRAIDDEVSTOP:
1608 case MDACIOCTL_MAKECONSISTENTDATASTART:
1609 case MDACIOCTL_MAKECONSISTENTDATASTOP:
1610 case MDACIOCTL_CONSISTENCYCHECKSTART:
1611 case MDACIOCTL_CONSISTENCYCHECKSTOP:
1612 case MDACIOCTL_RESETDEVICE:
1613 case MDACIOCTL_FLUSHDEVICEDATA:
1614 case MDACIOCTL_PAUSEDEVICE:
1615 case MDACIOCTL_UNPAUSEDEVICE:
1616 case MDACIOCTL_LOCATEDEVICE:
1617 case MDACIOCTL_SETMASTERSLAVEMODE:
1618 case MDACIOCTL_DELETERAIDDEV:
1619 case MDACIOCTL_REPLACEINTERNALDEV:
1620 case MDACIOCTL_CLEARCONF:
1621 case MDACIOCTL_GETCONTROLLERPARAMETER:
1622 case MDACIOCTL_SETCONTRLLERPARAMETER:
1623 case MDACIOCTL_CLEARCONFSUSPMODE:
1624 case MDACIOCTL_STOREIMAGE:
1625 case MDACIOCTL_READIMAGE:
1626 case MDACIOCTL_FLASHIMAGES:
1627 case MDACIOCTL_RENAMERAIDDEV:
1628 default: /* no idea what to print */
1629 transfer = 0;
1630 break;
1631 }
1632 break;
1633
1634 case MDACMD_IOCTLCHECK:
1635 case MDACMD_MEMCOPY:
1636 default:
1637 transfer = 0;
1638 break; /* print nothing */
1639 }
1640 if (transfer) {
1641 if (ge->command_control.extended_sg_table) {
1642 mly_printf(sc, " sg table 0x%llx/%d\n",
1643 ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
1644 } else {
1645 mly_printf(sc, " 0000 0x%llx/%lld\n",
1646 ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
1647 mly_printf(sc, " 0001 0x%llx/%lld\n",
1648 ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
1649 }
1650 }
1651}
1652
1653/********************************************************************************
1654 * Panic in a slightly informative fashion
1655 */
1656static void
1657mly_panic(struct mly_softc *sc, char *reason)
1658{
1659 mly_printstate(sc);
1660 panic(reason);
1661}
1662#endif
1663
1664/********************************************************************************
1665 * Print queue statistics, callable from DDB.
1666 */
1667void
1668mly_print_controller(int controller)
1669{
1670 struct mly_softc *sc;
1671
1672 if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) {
1673 printf("mly: controller %d invalid\n", controller);
1674 } else {
1675 device_printf(sc->mly_dev, "queue curr max\n");
1676 device_printf(sc->mly_dev, "free %04d/%04d\n",
1677 sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max);
1678 device_printf(sc->mly_dev, "ready %04d/%04d\n",
1679 sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max);
1680 device_printf(sc->mly_dev, "busy %04d/%04d\n",
1681 sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max);
1682 device_printf(sc->mly_dev, "complete %04d/%04d\n",
1683 sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max);
1684 }
1685}
1686
1687
1688/********************************************************************************
1689 ********************************************************************************
1690 Control device interface
1691 ********************************************************************************
1692 ********************************************************************************/
1693
1694/********************************************************************************
1695 * Accept an open operation on the control device.
1696 */
1697static int
fef8985e 1698mly_user_open(struct dev_open_args *ap)
984263bc 1699{
b13267a5 1700 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
1701 int unit = minor(dev);
1702 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit);
1703
1704 sc->mly_state |= MLY_STATE_OPEN;
1705 return(0);
1706}
1707
1708/********************************************************************************
1709 * Accept the last close on the control device.
1710 */
1711static int
fef8985e 1712mly_user_close(struct dev_close_args *ap)
984263bc 1713{
b13267a5 1714 cdev_t dev = ap->a_head.a_dev;
984263bc
MD
1715 int unit = minor(dev);
1716 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit);
1717
1718 sc->mly_state &= ~MLY_STATE_OPEN;
1719 return (0);
1720}
1721
1722/********************************************************************************
1723 * Handle controller-specific control operations.
1724 */
1725static int
fef8985e 1726mly_user_ioctl(struct dev_ioctl_args *ap)
984263bc 1727{
b13267a5 1728 cdev_t dev = ap->a_head.a_dev;
984263bc 1729 struct mly_softc *sc = (struct mly_softc *)dev->si_drv1;
fef8985e
MD
1730 struct mly_user_command *uc = (struct mly_user_command *)ap->a_data;
1731 struct mly_user_health *uh = (struct mly_user_health *)ap->a_data;
984263bc 1732
fef8985e 1733 switch(ap->a_cmd) {
984263bc
MD
1734 case MLYIO_COMMAND:
1735 return(mly_user_command(sc, uc));
1736 case MLYIO_HEALTH:
1737 return(mly_user_health(sc, uh));
1738 default:
1739 return(ENOIOCTL);
1740 }
1741}
1742
1743/********************************************************************************
1744 * Execute a command passed in from userspace.
1745 *
1746 * The control structure contains the actual command for the controller, as well
1747 * as the user-space data pointer and data size, and an optional sense buffer
1748 * size/pointer. On completion, the data size is adjusted to the command
1749 * residual, and the sense buffer size to the size of the returned sense data.
1750 *
1751 */
1752static int
1753mly_user_command(struct mly_softc *sc, struct mly_user_command *uc)
1754{
1755 struct mly_command *mc;
5eb77fd5 1756 int error;
984263bc
MD
1757
1758 /* allocate a command */
1759 if (mly_alloc_command(sc, &mc)) {
1760 error = ENOMEM;
1761 goto out; /* XXX Linux version will wait for a command */
1762 }
1763
1764 /* handle data size/direction */
1765 mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength;
076ae0ab 1766 if (mc->mc_length > 0)
efda3bd0 1767 mc->mc_data = kmalloc(mc->mc_length, M_DEVBUF, M_INTWAIT);
984263bc
MD
1768 if (uc->DataTransferLength > 0) {
1769 mc->mc_flags |= MLY_CMD_DATAIN;
1770 bzero(mc->mc_data, mc->mc_length);
1771 }
1772 if (uc->DataTransferLength < 0) {
1773 mc->mc_flags |= MLY_CMD_DATAOUT;
1774 if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0)
1775 goto out;
1776 }
1777
1778 /* copy the controller command */
1779 bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox));
1780
1781 /* clear command completion handler so that we get woken up */
1782 mc->mc_complete = NULL;
1783
1784 /* execute the command */
5eb77fd5 1785 crit_enter();
984263bc
MD
1786 mly_requeue_ready(mc);
1787 mly_startio(sc);
1788 while (!(mc->mc_flags & MLY_CMD_COMPLETE))
377d4740 1789 tsleep(mc, 0, "mlyioctl", 0);
5eb77fd5 1790 crit_exit();
984263bc
MD
1791
1792 /* return the data to userspace */
1793 if (uc->DataTransferLength > 0)
1794 if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0)
1795 goto out;
1796
1797 /* return the sense buffer to userspace */
1798 if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) {
1799 if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer,
1800 min(uc->RequestSenseLength, mc->mc_sense))) != 0)
1801 goto out;
1802 }
1803
1804 /* return command results to userspace (caller will copy out) */
1805 uc->DataTransferLength = mc->mc_resid;
1806 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
1807 uc->CommandStatus = mc->mc_status;
1808 error = 0;
1809
1810 out:
1811 if (mc->mc_data != NULL)
efda3bd0 1812 kfree(mc->mc_data, M_DEVBUF);
984263bc
MD
1813 if (mc != NULL)
1814 mly_release_command(mc);
1815 return(error);
1816}
1817
1818/********************************************************************************
1819 * Return health status to userspace. If the health change index in the user
1820 * structure does not match that currently exported by the controller, we
1821 * return the current status immediately. Otherwise, we block until either
1822 * interrupted or new status is delivered.
1823 */
1824static int
1825mly_user_health(struct mly_softc *sc, struct mly_user_health *uh)
1826{
1827 struct mly_health_status mh;
5eb77fd5 1828 int error;
984263bc
MD
1829
1830 /* fetch the current health status from userspace */
1831 if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0)
1832 return(error);
1833
1834 /* spin waiting for a status update */
5eb77fd5 1835 crit_enter();
984263bc
MD
1836 error = EWOULDBLOCK;
1837 while ((error != 0) && (sc->mly_event_change == mh.change_counter))
377d4740 1838 error = tsleep(&sc->mly_event_change, PCATCH, "mlyhealth", 0);
5eb77fd5 1839 crit_exit();
984263bc
MD
1840
1841 /* copy the controller's health status buffer out (there is a race here if it changes again) */
1842 error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer,
1843 sizeof(uh->HealthStatusBuffer));
1844 return(error);
1845}