2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001 Adaptec, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/dev/aac/aac.c,v 1.9.2.14 2003/04/08 13:22:08 scottl Exp $
30 * $DragonFly: src/sys/dev/raid/aac/aac.c,v 1.34 2008/01/20 03:40:35 pavalos Exp $
34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
36 #define AAC_DRIVER_VERSION 0x02000000
37 #define AAC_DRIVERNAME "aac"
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/sysctl.h>
48 #include <sys/event.h>
52 #include <sys/devicestat.h>
54 #include <sys/signalvar.h>
56 #include <sys/eventhandler.h>
59 #include <sys/mplock2.h>
61 #include <bus/pci/pcireg.h>
62 #include <bus/pci/pcivar.h>
65 #include "aac_ioctl.h"
67 #include "aac_tables.h"
69 static void aac_startup(void *arg);
70 static void aac_add_container(struct aac_softc *sc,
71 struct aac_mntinforesp *mir, int f);
72 static void aac_get_bus_info(struct aac_softc *sc);
73 static int aac_shutdown(device_t dev);
75 /* Command Processing */
76 static void aac_timeout(void *ssc);
77 static int aac_map_command(struct aac_command *cm);
78 static void aac_complete(void *context, int pending);
79 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
80 static void aac_bio_complete(struct aac_command *cm);
81 static int aac_wait_command(struct aac_command *cm);
82 static void aac_command_thread(void *arg);
84 /* Command Buffer Management */
85 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
87 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
89 static int aac_alloc_commands(struct aac_softc *sc);
90 static void aac_free_commands(struct aac_softc *sc);
91 static void aac_unmap_command(struct aac_command *cm);
93 /* Hardware Interface */
94 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
96 static int aac_check_firmware(struct aac_softc *sc);
97 static int aac_init(struct aac_softc *sc);
98 static int aac_sync_command(struct aac_softc *sc, u_int32_t command,
99 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
100 u_int32_t arg3, u_int32_t *sp);
101 static int aac_enqueue_fib(struct aac_softc *sc, int queue,
102 struct aac_command *cm);
103 static int aac_dequeue_fib(struct aac_softc *sc, int queue,
104 u_int32_t *fib_size, struct aac_fib **fib_addr);
105 static int aac_enqueue_response(struct aac_softc *sc, int queue,
106 struct aac_fib *fib);
108 /* Falcon/PPC interface */
109 static int aac_fa_get_fwstatus(struct aac_softc *sc);
110 static void aac_fa_qnotify(struct aac_softc *sc, int qbit);
111 static int aac_fa_get_istatus(struct aac_softc *sc);
112 static void aac_fa_clear_istatus(struct aac_softc *sc, int mask);
113 static void aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command,
114 u_int32_t arg0, u_int32_t arg1,
115 u_int32_t arg2, u_int32_t arg3);
116 static int aac_fa_get_mailbox(struct aac_softc *sc, int mb);
117 static void aac_fa_set_interrupts(struct aac_softc *sc, int enable);
119 struct aac_interface aac_fa_interface = {
123 aac_fa_clear_istatus,
126 aac_fa_set_interrupts,
130 /* StrongARM interface */
131 static int aac_sa_get_fwstatus(struct aac_softc *sc);
132 static void aac_sa_qnotify(struct aac_softc *sc, int qbit);
133 static int aac_sa_get_istatus(struct aac_softc *sc);
134 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask);
135 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
136 u_int32_t arg0, u_int32_t arg1,
137 u_int32_t arg2, u_int32_t arg3);
138 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb);
139 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable);
141 struct aac_interface aac_sa_interface = {
145 aac_sa_clear_istatus,
148 aac_sa_set_interrupts,
152 /* i960Rx interface */
153 static int aac_rx_get_fwstatus(struct aac_softc *sc);
154 static void aac_rx_qnotify(struct aac_softc *sc, int qbit);
155 static int aac_rx_get_istatus(struct aac_softc *sc);
156 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask);
157 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
158 u_int32_t arg0, u_int32_t arg1,
159 u_int32_t arg2, u_int32_t arg3);
160 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb);
161 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable);
162 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
163 static int aac_rx_get_outb_queue(struct aac_softc *sc);
164 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
166 struct aac_interface aac_rx_interface = {
170 aac_rx_clear_istatus,
173 aac_rx_set_interrupts,
175 aac_rx_get_outb_queue,
176 aac_rx_set_outb_queue
179 /* Rocket/MIPS interface */
180 static int aac_rkt_get_fwstatus(struct aac_softc *sc);
181 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit);
182 static int aac_rkt_get_istatus(struct aac_softc *sc);
183 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
184 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
185 u_int32_t arg0, u_int32_t arg1,
186 u_int32_t arg2, u_int32_t arg3);
187 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
188 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
189 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
190 static int aac_rkt_get_outb_queue(struct aac_softc *sc);
191 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
193 struct aac_interface aac_rkt_interface = {
194 aac_rkt_get_fwstatus,
197 aac_rkt_clear_istatus,
200 aac_rkt_set_interrupts,
201 aac_rkt_send_command,
202 aac_rkt_get_outb_queue,
203 aac_rkt_set_outb_queue
206 /* Debugging and Diagnostics */
207 static void aac_describe_controller(struct aac_softc *sc);
208 static char *aac_describe_code(struct aac_code_lookup *table,
211 /* Management Interface */
212 static d_open_t aac_open;
213 static d_close_t aac_close;
214 static d_ioctl_t aac_ioctl;
215 static d_kqfilter_t aac_kqfilter;
216 static void aac_filter_detach(struct knote *kn);
217 static int aac_filter(struct knote *kn, long hint);
218 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) __unused;
219 static void aac_handle_aif(struct aac_softc *sc,
220 struct aac_fib *fib);
221 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
222 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
223 static int aac_return_aif(struct aac_softc *sc, caddr_t uptr);
224 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
225 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
226 static void aac_ioctl_event(struct aac_softc *sc,
227 struct aac_event *event, void *arg);
229 #define AAC_CDEV_MAJOR 150
231 static struct dev_ops aac_ops = {
232 { "aac", AAC_CDEV_MAJOR, 0 },
234 .d_close = aac_close,
235 .d_ioctl = aac_ioctl,
236 .d_kqfilter = aac_kqfilter
239 DECLARE_DUMMY_MODULE(aac);
241 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
244 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
251 * Initialise the controller and softc
254 aac_attach(struct aac_softc *sc)
259 callout_init(&sc->aac_watchdog);
262 * Initialise per-controller queues.
267 aac_initq_complete(sc);
271 * Initialise command-completion task.
273 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
275 /* mark controller as suspended until we get ourselves organised */
276 sc->aac_state |= AAC_STATE_SUSPEND;
279 * Check that the firmware on the card is supported.
281 if ((error = aac_check_firmware(sc)) != 0)
287 AAC_LOCK_INIT(&sc->aac_aifq_lock, "AAC AIF lock");
288 AAC_LOCK_INIT(&sc->aac_io_lock, "AAC I/O lock");
289 AAC_LOCK_INIT(&sc->aac_container_lock, "AAC container lock");
290 TAILQ_INIT(&sc->aac_container_tqh);
291 TAILQ_INIT(&sc->aac_ev_cmfree);
294 /* Initialize the local AIF queue pointers */
295 sc->aac_aifq_head = sc->aac_aifq_tail = AAC_AIFQ_LENGTH;
298 * Initialise the adapter.
300 if ((error = aac_init(sc)) != 0)
304 * Allocate and connect our interrupt.
307 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ,
310 RF_ACTIVE)) == NULL) {
311 device_printf(sc->aac_dev, "can't allocate interrupt\n");
314 if (sc->flags & AAC_FLAGS_NEW_COMM) {
315 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
317 sc, &sc->aac_intr, NULL)) {
318 device_printf(sc->aac_dev, "can't set up interrupt\n");
322 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 0,
323 aac_fast_intr, sc, &sc->aac_intr, NULL)) {
324 device_printf(sc->aac_dev,
325 "can't set up FAST interrupt\n");
326 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
328 sc, &sc->aac_intr, NULL)) {
329 device_printf(sc->aac_dev,
330 "can't set up MPSAFE interrupt\n");
337 * Print a little information about the controller.
339 aac_describe_controller(sc);
342 * Register to probe our containers later.
344 sc->aac_ich.ich_func = aac_startup;
345 sc->aac_ich.ich_arg = sc;
346 sc->aac_ich.ich_desc = "aac";
347 if (config_intrhook_establish(&sc->aac_ich) != 0) {
348 device_printf(sc->aac_dev,
349 "can't establish configuration hook\n");
354 * Make the control device.
356 unit = device_get_unit(sc->aac_dev);
357 sc->aac_dev_t = make_dev(&aac_ops, unit, UID_ROOT, GID_OPERATOR,
358 0640, "aac%d", unit);
359 sc->aac_dev_t->si_drv1 = sc;
360 reference_dev(sc->aac_dev_t);
362 /* Create the AIF thread */
363 if (kthread_create(aac_command_thread, sc,
364 &sc->aifthread, "aac%daif", unit))
365 panic("Could not create AIF thread\n");
367 /* Register the shutdown method to only be called post-dump */
368 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, aac_shutdown,
369 sc->aac_dev, SHUTDOWN_PRI_DRIVER)) == NULL)
370 device_printf(sc->aac_dev,
371 "shutdown event registration failed\n");
373 /* Register with CAM for the non-DASD devices */
374 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
375 TAILQ_INIT(&sc->aac_sim_tqh);
376 aac_get_bus_info(sc);
383 aac_add_event(struct aac_softc *sc, struct aac_event *event)
386 switch (event->ev_type & AAC_EVENT_MASK) {
387 case AAC_EVENT_CMFREE:
388 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
391 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
400 * Probe for containers, create disks.
403 aac_startup(void *arg)
405 struct aac_softc *sc;
407 struct aac_mntinfo *mi;
408 struct aac_mntinforesp *mir = NULL;
409 int count = 0, i = 0;
413 sc = (struct aac_softc *)arg;
415 /* disconnect ourselves from the intrhook chain */
416 config_intrhook_disestablish(&sc->aac_ich);
418 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
419 aac_alloc_sync_fib(sc, &fib);
420 mi = (struct aac_mntinfo *)&fib->data[0];
422 /* loop over possible containers */
424 /* request information on this container */
425 bzero(mi, sizeof(struct aac_mntinfo));
426 mi->Command = VM_NameServe;
427 mi->MntType = FT_FILESYS;
429 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
430 sizeof(struct aac_mntinfo))) {
431 device_printf(sc->aac_dev,
432 "error probing container %d", i);
437 mir = (struct aac_mntinforesp *)&fib->data[0];
438 /* XXX Need to check if count changed */
439 count = mir->MntRespCount;
440 aac_add_container(sc, mir, 0);
442 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
444 aac_release_sync_fib(sc);
445 AAC_LOCK_RELEASE(&sc->aac_io_lock);
447 /* poke the bus to actually attach the child devices */
448 if (bus_generic_attach(sc->aac_dev))
449 device_printf(sc->aac_dev, "bus_generic_attach failed\n");
451 /* mark the controller up */
452 sc->aac_state &= ~AAC_STATE_SUSPEND;
454 /* enable interrupts now */
455 AAC_UNMASK_INTERRUPTS(sc);
459 * Create a device to respresent a new container
462 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
464 struct aac_container *co;
468 * Check container volume type for validity. Note that many of
469 * the possible types may never show up.
471 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
472 co = (struct aac_container *)kmalloc(sizeof *co, M_AACBUF,
474 debug(1, "id %x name '%.16s' size %u type %d",
475 mir->MntTable[0].ObjectId,
476 mir->MntTable[0].FileSystemName,
477 mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
479 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
480 device_printf(sc->aac_dev, "device_add_child failed\n");
482 device_set_ivars(child, co);
483 device_set_desc(child, aac_describe_code(aac_container_types,
484 mir->MntTable[0].VolType));
487 bcopy(&mir->MntTable[0], &co->co_mntobj,
488 sizeof(struct aac_mntobj));
489 AAC_LOCK_ACQUIRE(&sc->aac_container_lock);
490 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
491 AAC_LOCK_RELEASE(&sc->aac_container_lock);
496 * Free all of the resources associated with (sc)
498 * Should not be called if the controller is active.
501 aac_free(struct aac_softc *sc)
506 /* remove the control device */
507 if (sc->aac_dev_t != NULL)
508 destroy_dev(sc->aac_dev_t);
510 /* throw away any FIB buffers, discard the FIB DMA tag */
511 aac_free_commands(sc);
512 if (sc->aac_fib_dmat)
513 bus_dma_tag_destroy(sc->aac_fib_dmat);
515 kfree(sc->aac_commands, M_AACBUF);
517 /* destroy the common area */
518 if (sc->aac_common) {
519 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
520 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
521 sc->aac_common_dmamap);
523 if (sc->aac_common_dmat)
524 bus_dma_tag_destroy(sc->aac_common_dmat);
526 /* disconnect the interrupt handler */
528 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
529 if (sc->aac_irq != NULL)
530 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid,
533 /* destroy data-transfer DMA tag */
534 if (sc->aac_buffer_dmat)
535 bus_dma_tag_destroy(sc->aac_buffer_dmat);
537 /* destroy the parent DMA tag */
538 if (sc->aac_parent_dmat)
539 bus_dma_tag_destroy(sc->aac_parent_dmat);
541 /* release the register window mapping */
542 if (sc->aac_regs_resource != NULL) {
543 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
544 sc->aac_regs_rid, sc->aac_regs_resource);
546 dev_ops_remove_minor(&aac_ops, device_get_unit(sc->aac_dev));
550 * Disconnect from the controller completely, in preparation for unload.
553 aac_detach(device_t dev)
555 struct aac_softc *sc;
556 struct aac_container *co;
562 sc = device_get_softc(dev);
564 callout_stop(&sc->aac_watchdog);
566 if (sc->aac_state & AAC_STATE_OPEN)
569 /* Remove the child containers */
570 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
571 error = device_delete_child(dev, co->co_disk);
574 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
578 /* Remove the CAM SIMs */
579 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
580 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
581 error = device_delete_child(dev, sim->sim_dev);
584 kfree(sim, M_AACBUF);
587 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
588 sc->aifflags |= AAC_AIFFLAGS_EXIT;
589 wakeup(sc->aifthread);
590 tsleep(sc->aac_dev, PCATCH, "aacdch", 30 * hz);
593 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
594 panic("Cannot shutdown AIF thread\n");
596 if ((error = aac_shutdown(dev)))
599 EVENTHANDLER_DEREGISTER(shutdown_post_sync, sc->eh);
603 lockuninit(&sc->aac_aifq_lock);
604 lockuninit(&sc->aac_io_lock);
605 lockuninit(&sc->aac_container_lock);
611 * Bring the controller down to a dormant state and detach all child devices.
613 * This function is called before detach or system shutdown.
615 * Note that we can assume that the bioq on the controller is empty, as we won't
616 * allow shutdown if any device is open.
619 aac_shutdown(device_t dev)
621 struct aac_softc *sc;
623 struct aac_close_command *cc;
627 sc = device_get_softc(dev);
629 sc->aac_state |= AAC_STATE_SUSPEND;
632 * Send a Container shutdown followed by a HostShutdown FIB to the
633 * controller to convince it that we don't want to talk to it anymore.
634 * We've been closed and all I/O completed already
636 device_printf(sc->aac_dev, "shutting down controller...");
638 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
639 aac_alloc_sync_fib(sc, &fib);
640 cc = (struct aac_close_command *)&fib->data[0];
642 bzero(cc, sizeof(struct aac_close_command));
643 cc->Command = VM_CloseAll;
644 cc->ContainerId = 0xffffffff;
645 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
646 sizeof(struct aac_close_command)))
647 kprintf("FAILED.\n");
654 * XXX Issuing this command to the controller makes it shut down
655 * but also keeps it from coming back up without a reset of the
656 * PCI bus. This is not desirable if you are just unloading the
657 * driver module with the intent to reload it later.
659 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
661 kprintf("FAILED.\n");
668 AAC_MASK_INTERRUPTS(sc);
669 aac_release_sync_fib(sc);
670 AAC_LOCK_RELEASE(&sc->aac_io_lock);
676 * Bring the controller to a quiescent state, ready for system suspend.
679 aac_suspend(device_t dev)
681 struct aac_softc *sc;
685 sc = device_get_softc(dev);
687 sc->aac_state |= AAC_STATE_SUSPEND;
689 AAC_MASK_INTERRUPTS(sc);
694 * Bring the controller back to a state ready for operation.
697 aac_resume(device_t dev)
699 struct aac_softc *sc;
703 sc = device_get_softc(dev);
705 sc->aac_state &= ~AAC_STATE_SUSPEND;
706 AAC_UNMASK_INTERRUPTS(sc);
711 * Interrupt handler for NEW_COMM interface.
714 aac_new_intr(void *arg)
716 struct aac_softc *sc;
717 u_int32_t index, fast;
718 struct aac_command *cm;
724 sc = (struct aac_softc *)arg;
726 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
728 index = AAC_GET_OUTB_QUEUE(sc);
729 if (index == 0xffffffff)
730 index = AAC_GET_OUTB_QUEUE(sc);
731 if (index == 0xffffffff)
734 if (index == 0xfffffffe) {
735 /* XXX This means that the controller wants
736 * more work. Ignore it for now.
741 fib = (struct aac_fib *)kmalloc(sizeof *fib, M_AACBUF,
744 for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
745 ((u_int32_t *)fib)[i] = AAC_GETREG4(sc, index + i*4);
746 aac_handle_aif(sc, fib);
747 kfree(fib, M_AACBUF);
750 * AIF memory is owned by the adapter, so let it
751 * know that we are done with it.
753 AAC_SET_OUTB_QUEUE(sc, index);
754 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
757 cm = sc->aac_commands + (index >> 2);
760 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
761 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
764 aac_unmap_command(cm);
765 cm->cm_flags |= AAC_CMD_COMPLETED;
767 /* is there a completion handler? */
768 if (cm->cm_complete != NULL) {
771 /* assume that someone is sleeping on this
776 sc->flags &= ~AAC_QUEUE_FRZN;
779 /* see if we can start some more I/O */
780 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
783 AAC_LOCK_RELEASE(&sc->aac_io_lock);
787 aac_fast_intr(void *arg)
789 struct aac_softc *sc;
794 sc = (struct aac_softc *)arg;
797 * Read the status register directly. This is faster than taking the
798 * driver lock and reading the queues directly. It also saves having
799 * to turn parts of the driver lock into a spin mutex, which would be
802 reason = AAC_GET_ISTATUS(sc);
803 AAC_CLEAR_ISTATUS(sc, reason);
805 /* handle completion processing */
806 if (reason & AAC_DB_RESPONSE_READY)
807 taskqueue_enqueue(taskqueue_swi, &sc->aac_task_complete);
809 /* controller wants to talk to us */
810 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
812 * XXX Make sure that we don't get fooled by strange messages
813 * that start with a NULL.
815 if ((reason & AAC_DB_PRINTF) &&
816 (sc->aac_common->ac_printf[0] == 0))
817 sc->aac_common->ac_printf[0] = 32;
820 * This might miss doing the actual wakeup. However, the
821 * ssleep that this is waking up has a timeout, so it will
822 * wake up eventually. AIFs and printfs are low enough
823 * priority that they can handle hanging out for a few seconds
826 wakeup(sc->aifthread);
835 * Start as much queued I/O as possible on the controller
838 aac_startio(struct aac_softc *sc)
840 struct aac_command *cm;
844 if (sc->flags & AAC_QUEUE_FRZN)
849 * Try to get a command that's been put off for lack of
852 cm = aac_dequeue_ready(sc);
855 * Try to build a command off the bio queue (ignore error
859 aac_bio_command(sc, &cm);
866 * Try to give the command to the controller. Any error is
867 * catastrophic since it means that bus_dmamap_load() failed.
869 if (aac_map_command(cm) != 0)
870 panic("aac: error mapping command %p\n", cm);
875 * Deliver a command to the controller; allocate controller resources at the
876 * last moment when possible.
879 aac_map_command(struct aac_command *cm)
881 struct aac_softc *sc;
889 /* don't map more than once */
890 if (cm->cm_flags & AAC_CMD_MAPPED)
891 panic("aac: command %p already mapped", cm);
893 if (cm->cm_datalen != 0) {
894 error = bus_dmamap_load(sc->aac_buffer_dmat, cm->cm_datamap,
895 cm->cm_data, cm->cm_datalen,
896 aac_map_command_sg, cm, 0);
897 if (error == EINPROGRESS) {
898 debug(1, "freezing queue\n");
899 sc->flags |= AAC_QUEUE_FRZN;
903 aac_map_command_sg(cm, NULL, 0, 0);
909 * Handle notification of one or more FIBs coming from the controller.
912 aac_command_thread(void *arg)
914 struct aac_softc *sc = arg;
922 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
923 sc->aifflags = AAC_AIFFLAGS_RUNNING;
925 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
927 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) {
928 tsleep_interlock(sc->aifthread, 0);
929 AAC_LOCK_RELEASE(&sc->aac_io_lock);
930 retval = tsleep(sc->aifthread, PINTERLOCKED,
931 "aifthd", AAC_PERIODIC_INTERVAL * hz);
932 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
935 * First see if any FIBs need to be allocated. This needs
936 * to be called without the driver lock because contigmalloc
937 * will grab Giant, and would result in an LOR.
939 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
940 AAC_LOCK_RELEASE(&sc->aac_io_lock);
941 aac_alloc_commands(sc);
942 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
943 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
948 * While we're here, check to see if any commands are stuck.
949 * This is pretty low-priority, so it's ok if it doesn't
952 if (retval == EWOULDBLOCK)
955 /* Check the hardware printf message buffer */
956 if (sc->aac_common->ac_printf[0] != 0)
957 aac_print_printf(sc);
959 /* Also check to see if the adapter has a command for us. */
960 if (sc->flags & AAC_FLAGS_NEW_COMM)
963 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
967 AAC_PRINT_FIB(sc, fib);
969 switch (fib->Header.Command) {
971 aac_handle_aif(sc, fib);
974 device_printf(sc->aac_dev, "unknown command "
975 "from controller\n");
979 if ((fib->Header.XferState == 0) ||
980 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
984 /* Return the AIF to the controller. */
985 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
986 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
987 *(AAC_FSAStatus*)fib->data = ST_OK;
989 /* XXX Compute the Size field? */
990 size = fib->Header.Size;
991 if (size > sizeof(struct aac_fib)) {
992 size = sizeof(struct aac_fib);
993 fib->Header.Size = size;
996 * Since we did not generate this command, it
997 * cannot go through the normal
998 * enqueue->startio chain.
1000 aac_enqueue_response(sc,
1001 AAC_ADAP_NORM_RESP_QUEUE,
1006 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1007 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1008 wakeup(sc->aac_dev);
1014 * Process completed commands.
1017 aac_complete(void *context, int pending)
1019 struct aac_softc *sc;
1020 struct aac_command *cm;
1021 struct aac_fib *fib;
1026 sc = (struct aac_softc *)context;
1028 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1030 /* pull completed commands off the queue */
1032 /* look for completed FIBs on our queue */
1033 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1035 break; /* nothing to do */
1037 /* get the command, unmap and queue for later processing */
1038 cm = sc->aac_commands + fib->Header.SenderData;
1040 AAC_PRINT_FIB(sc, fib);
1043 aac_remove_busy(cm);
1044 aac_unmap_command(cm); /* XXX defer? */
1045 cm->cm_flags |= AAC_CMD_COMPLETED;
1047 /* is there a completion handler? */
1048 if (cm->cm_complete != NULL) {
1049 cm->cm_complete(cm);
1051 /* assume that someone is sleeping on this command */
1056 /* see if we can start some more I/O */
1057 sc->flags &= ~AAC_QUEUE_FRZN;
1060 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1064 * Handle a bio submitted from a disk device.
1067 aac_submit_bio(struct aac_disk *ad, struct bio *bio)
1069 struct aac_softc *sc;
1073 bio->bio_driver_info = ad;
1074 sc = ad->ad_controller;
1076 /* queue the BIO and try to get some work done */
1077 aac_enqueue_bio(sc, bio);
1082 * Get a bio and build a command to go with it.
1085 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1087 struct aac_command *cm;
1088 struct aac_fib *fib;
1089 struct aac_disk *ad;
1095 /* get the resources we will need */
1098 if (aac_alloc_command(sc, &cm)) /* get a command */
1100 if ((bio = aac_dequeue_bio(sc)) == NULL)
1103 /* fill out the command */
1105 cm->cm_data = (void *)bp->b_data;
1106 cm->cm_datalen = bp->b_bcount;
1107 cm->cm_complete = aac_bio_complete;
1108 cm->cm_private = bio;
1109 cm->cm_timestamp = time_second;
1110 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1114 fib->Header.Size = sizeof(struct aac_fib_header);
1115 fib->Header.XferState =
1116 AAC_FIBSTATE_HOSTOWNED |
1117 AAC_FIBSTATE_INITIALISED |
1118 AAC_FIBSTATE_EMPTY |
1119 AAC_FIBSTATE_FROMHOST |
1120 AAC_FIBSTATE_REXPECTED |
1122 AAC_FIBSTATE_ASYNC |
1123 AAC_FIBSTATE_FAST_RESPONSE;
1125 /* build the read/write request */
1126 ad = (struct aac_disk *)bio->bio_driver_info;
1128 if (sc->flags & AAC_FLAGS_RAW_IO) {
1129 struct aac_raw_io *raw;
1130 raw = (struct aac_raw_io *)&fib->data[0];
1131 fib->Header.Command = RawIo;
1132 raw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1133 raw->ByteCount = bp->b_bcount;
1134 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1136 raw->BpComplete = 0;
1137 fib->Header.Size += sizeof(struct aac_raw_io);
1138 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1139 if (bp->b_cmd == BUF_CMD_READ) {
1141 cm->cm_flags |= AAC_CMD_DATAIN;
1144 cm->cm_flags |= AAC_CMD_DATAOUT;
1146 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1147 fib->Header.Command = ContainerCommand;
1148 if (bp->b_cmd == BUF_CMD_READ) {
1149 struct aac_blockread *br;
1150 br = (struct aac_blockread *)&fib->data[0];
1151 br->Command = VM_CtBlockRead;
1152 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1153 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1154 br->ByteCount = bp->b_bcount;
1155 fib->Header.Size += sizeof(struct aac_blockread);
1156 cm->cm_sgtable = &br->SgMap;
1157 cm->cm_flags |= AAC_CMD_DATAIN;
1159 struct aac_blockwrite *bw;
1160 bw = (struct aac_blockwrite *)&fib->data[0];
1161 bw->Command = VM_CtBlockWrite;
1162 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1163 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1164 bw->ByteCount = bp->b_bcount;
1165 bw->Stable = CUNSTABLE;
1166 fib->Header.Size += sizeof(struct aac_blockwrite);
1167 cm->cm_flags |= AAC_CMD_DATAOUT;
1168 cm->cm_sgtable = &bw->SgMap;
1171 fib->Header.Command = ContainerCommand64;
1172 if (bp->b_cmd == BUF_CMD_READ) {
1173 struct aac_blockread64 *br;
1174 br = (struct aac_blockread64 *)&fib->data[0];
1175 br->Command = VM_CtHostRead64;
1176 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1177 br->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE;
1178 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1181 fib->Header.Size += sizeof(struct aac_blockread64);
1182 cm->cm_flags |= AAC_CMD_DATAOUT;
1183 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1185 struct aac_blockwrite64 *bw;
1186 bw = (struct aac_blockwrite64 *)&fib->data[0];
1187 bw->Command = VM_CtHostWrite64;
1188 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1189 bw->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE;
1190 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1193 fib->Header.Size += sizeof(struct aac_blockwrite64);
1194 cm->cm_flags |= AAC_CMD_DATAIN;
1195 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1204 aac_enqueue_bio(sc, bio);
1206 aac_release_command(cm);
1211 * Handle a bio-instigated command that has been completed.
1214 aac_bio_complete(struct aac_command *cm)
1216 struct aac_blockread_response *brr;
1217 struct aac_blockwrite_response *bwr;
1221 AAC_FSAStatus status;
1223 /* fetch relevant status and then release the command */
1224 bio = (struct bio *)cm->cm_private;
1226 if (bp->b_cmd == BUF_CMD_READ) {
1227 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1228 status = brr->Status;
1230 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1231 status = bwr->Status;
1233 aac_release_command(cm);
1235 /* fix up the bio based on status */
1236 if (status == ST_OK) {
1241 bp->b_flags |= B_ERROR;
1242 /* pass an error string out to the disk layer */
1243 code = aac_describe_code(aac_command_status_table, status);
1245 aac_biodone(bio, code);
1249 * Dump a block of data to the controller. If the queue is full, tell the
1250 * caller to hold off and wait for the queue to drain.
1253 aac_dump_enqueue(struct aac_disk *ad, u_int64_t lba, void *data, int dumppages)
1255 struct aac_softc *sc;
1256 struct aac_command *cm;
1257 struct aac_fib *fib;
1258 struct aac_blockwrite *bw;
1260 sc = ad->ad_controller;
1263 KKASSERT(lba <= 0x100000000ULL);
1265 if (aac_alloc_command(sc, &cm))
1268 /* fill out the command */
1270 cm->cm_datalen = dumppages * PAGE_SIZE;
1271 cm->cm_complete = NULL;
1272 cm->cm_private = NULL;
1273 cm->cm_timestamp = time_second;
1274 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1278 fib->Header.XferState =
1279 AAC_FIBSTATE_HOSTOWNED |
1280 AAC_FIBSTATE_INITIALISED |
1281 AAC_FIBSTATE_FROMHOST |
1282 AAC_FIBSTATE_REXPECTED |
1284 fib->Header.Command = ContainerCommand;
1285 fib->Header.Size = sizeof(struct aac_fib_header);
1287 bw = (struct aac_blockwrite *)&fib->data[0];
1288 bw->Command = VM_CtBlockWrite;
1289 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1290 bw->BlockNumber = lba;
1291 bw->ByteCount = dumppages * PAGE_SIZE;
1292 bw->Stable = CUNSTABLE; /* XXX what's appropriate here? */
1293 fib->Header.Size += sizeof(struct aac_blockwrite);
1294 cm->cm_flags |= AAC_CMD_DATAOUT;
1295 cm->cm_sgtable = &bw->SgMap;
1297 return (aac_map_command(cm));
1301 * Wait for the card's queue to drain when dumping. Also check for monitor
1305 aac_dump_complete(struct aac_softc *sc)
1307 struct aac_fib *fib;
1308 struct aac_command *cm;
1310 u_int32_t pi, ci, fib_size;
1313 reason = AAC_GET_ISTATUS(sc);
1314 if (reason & AAC_DB_RESPONSE_READY) {
1315 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
1317 if (aac_dequeue_fib(sc,
1318 AAC_HOST_NORM_RESP_QUEUE,
1321 cm = (struct aac_command *)
1322 fib->Header.SenderData;
1324 AAC_PRINT_FIB(sc, fib);
1326 aac_remove_busy(cm);
1327 aac_unmap_command(cm);
1328 aac_enqueue_complete(cm);
1329 aac_release_command(cm);
1333 if (reason & AAC_DB_PRINTF) {
1334 AAC_CLEAR_ISTATUS(sc, AAC_DB_PRINTF);
1335 aac_print_printf(sc);
1337 pi = sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][
1338 AAC_PRODUCER_INDEX];
1339 ci = sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][
1340 AAC_CONSUMER_INDEX];
1347 * Submit a command to the controller, return when it completes.
1348 * XXX This is very dangerous! If the card has gone out to lunch, we could
1349 * be stuck here forever. At the same time, signals are not caught
1350 * because there is a risk that a signal could wakeup the sleep before
1351 * the card has a chance to complete the command. Since there is no way
1352 * to cancel a command that is in progress, we can't protect against the
1353 * card completing a command late and spamming the command and data
1354 * memory. So, we are held hostage until the command completes.
1357 aac_wait_command(struct aac_command *cm)
1359 struct aac_softc *sc;
1366 /* Put the command on the ready queue and get things going */
1367 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1368 aac_enqueue_ready(cm);
1371 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0);
1372 tsleep_interlock(cm, 0);
1373 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1374 error = tsleep(cm, PINTERLOCKED, "aacwait", 0);
1375 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1380 *Command Buffer Management
1384 * Allocate a command.
1387 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1389 struct aac_command *cm;
1393 if ((cm = aac_dequeue_free(sc)) == NULL) {
1394 if (sc->total_fibs < sc->aac_max_fibs) {
1395 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1396 wakeup(sc->aifthread);
1406 * Release a command back to the freelist.
1409 aac_release_command(struct aac_command *cm)
1411 struct aac_event *event;
1412 struct aac_softc *sc;
1416 /* (re)initialise the command/FIB */
1417 cm->cm_sgtable = NULL;
1419 cm->cm_complete = NULL;
1420 cm->cm_private = NULL;
1421 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1422 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1423 cm->cm_fib->Header.Flags = 0;
1424 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1427 * These are duplicated in aac_start to cover the case where an
1428 * intermediate stage may have destroyed them. They're left
1429 * initialised here for debugging purposes only.
1431 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1432 cm->cm_fib->Header.SenderData = 0;
1434 aac_enqueue_free(cm);
1437 event = TAILQ_FIRST(&sc->aac_ev_cmfree);
1438 if (event != NULL) {
1439 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1440 event->ev_callback(sc, event, event->ev_arg);
1445 * Map helper for command/FIB allocation.
1448 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1452 fibphys = (uint64_t *)arg;
1456 *fibphys = segs[0].ds_addr;
1460 * Allocate and initialise commands/FIBs for this adapter.
1463 aac_alloc_commands(struct aac_softc *sc)
1465 struct aac_command *cm;
1466 struct aac_fibmap *fm;
1472 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1475 fm = kmalloc(sizeof(struct aac_fibmap), M_AACBUF, M_INTWAIT | M_ZERO);
1477 /* allocate the FIBs in DMAable memory and load them */
1478 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1479 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1480 device_printf(sc->aac_dev,
1481 "Not enough contiguous memory available.\n");
1482 kfree(fm, M_AACBUF);
1486 /* Ignore errors since this doesn't bounce */
1487 bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1488 sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1489 aac_map_command_helper, &fibphys, 0);
1491 /* initialise constant fields in the command structure */
1492 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1493 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1494 cm = sc->aac_commands + sc->total_fibs;
1495 fm->aac_commands = cm;
1497 cm->cm_fib = (struct aac_fib *)
1498 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1499 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1500 cm->cm_index = sc->total_fibs;
1502 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1503 &cm->cm_datamap)) != 0)
1505 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1506 aac_release_command(cm);
1508 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1512 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1513 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1514 debug(1, "total_fibs= %d\n", sc->total_fibs);
1515 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1519 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1520 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1521 kfree(fm, M_AACBUF);
1526 * Free FIBs owned by this adapter.
1529 aac_free_commands(struct aac_softc *sc)
1531 struct aac_fibmap *fm;
1532 struct aac_command *cm;
1537 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1539 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1541 * We check against total_fibs to handle partially
1544 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1545 cm = fm->aac_commands + i;
1546 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1548 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1549 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1550 kfree(fm, M_AACBUF);
1555 * Command-mapping helper function - populate this command's s/g table.
1558 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1560 struct aac_softc *sc;
1561 struct aac_command *cm;
1562 struct aac_fib *fib;
1567 cm = (struct aac_command *)arg;
1571 /* copy into the FIB */
1572 if (cm->cm_sgtable != NULL) {
1573 if (fib->Header.Command == RawIo) {
1574 struct aac_sg_tableraw *sg;
1575 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1577 for (i = 0; i < nseg; i++) {
1578 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1579 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1580 sg->SgEntryRaw[i].Next = 0;
1581 sg->SgEntryRaw[i].Prev = 0;
1582 sg->SgEntryRaw[i].Flags = 0;
1584 /* update the FIB size for the s/g count */
1585 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1586 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1587 struct aac_sg_table *sg;
1588 sg = cm->cm_sgtable;
1590 for (i = 0; i < nseg; i++) {
1591 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1592 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1594 /* update the FIB size for the s/g count */
1595 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1597 struct aac_sg_table64 *sg;
1598 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1600 for (i = 0; i < nseg; i++) {
1601 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1602 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1604 /* update the FIB size for the s/g count */
1605 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1609 /* Fix up the address values in the FIB. Use the command array index
1610 * instead of a pointer since these fields are only 32 bits. Shift
1611 * the SenderFibAddress over to make room for the fast response bit
1612 * and for the AIF bit
1614 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1615 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1617 /* save a pointer to the command for speedy reverse-lookup */
1618 cm->cm_fib->Header.SenderData = cm->cm_index;
1620 if (cm->cm_flags & AAC_CMD_DATAIN)
1621 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1622 BUS_DMASYNC_PREREAD);
1623 if (cm->cm_flags & AAC_CMD_DATAOUT)
1624 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1625 BUS_DMASYNC_PREWRITE);
1626 cm->cm_flags |= AAC_CMD_MAPPED;
1628 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1629 int count = 10000000L;
1630 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1632 aac_unmap_command(cm);
1633 sc->flags |= AAC_QUEUE_FRZN;
1634 aac_requeue_ready(cm);
1637 DELAY(5); /* wait 5 usec. */
1640 /* Put the FIB on the outbound queue */
1641 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1642 aac_unmap_command(cm);
1643 sc->flags |= AAC_QUEUE_FRZN;
1644 aac_requeue_ready(cm);
1650 * Unmap a command from controller-visible space.
1653 aac_unmap_command(struct aac_command *cm)
1655 struct aac_softc *sc;
1661 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1664 if (cm->cm_datalen != 0) {
1665 if (cm->cm_flags & AAC_CMD_DATAIN)
1666 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1667 BUS_DMASYNC_POSTREAD);
1668 if (cm->cm_flags & AAC_CMD_DATAOUT)
1669 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1670 BUS_DMASYNC_POSTWRITE);
1672 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1674 cm->cm_flags &= ~AAC_CMD_MAPPED;
1678 * Hardware Interface
1682 * Initialise the adapter.
1685 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1687 struct aac_softc *sc;
1691 sc = (struct aac_softc *)arg;
1693 sc->aac_common_busaddr = segs[0].ds_addr;
1697 aac_check_firmware(struct aac_softc *sc)
1699 u_int32_t major, minor, options = 0, atu_size = 0;
1705 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1706 * firmware version 1.x are not compatible with this driver.
1708 if (sc->flags & AAC_FLAGS_PERC2QC) {
1709 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1711 device_printf(sc->aac_dev,
1712 "Error reading firmware version\n");
1716 /* These numbers are stored as ASCII! */
1717 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1718 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1720 device_printf(sc->aac_dev,
1721 "Firmware version %d.%d is not supported.\n",
1728 * Retrieve the capabilities/supported options word so we know what
1729 * work-arounds to enable. Some firmware revs don't support this
1732 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1733 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1734 device_printf(sc->aac_dev,
1735 "RequestAdapterInfo failed\n");
1739 options = AAC_GET_MAILBOX(sc, 1);
1740 atu_size = AAC_GET_MAILBOX(sc, 2);
1741 sc->supported_options = options;
1743 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1744 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1745 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1746 if (options & AAC_SUPPORTED_NONDASD)
1747 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1748 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1749 && (sizeof(bus_addr_t) > 4)) {
1750 device_printf(sc->aac_dev,
1751 "Enabling 64-bit address support\n");
1752 sc->flags |= AAC_FLAGS_SG_64BIT;
1754 if ((options & AAC_SUPPORTED_NEW_COMM)
1755 && sc->aac_if.aif_send_command)
1756 sc->flags |= AAC_FLAGS_NEW_COMM;
1757 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1758 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1761 /* Check for broken hardware that does a lower number of commands */
1762 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1764 /* Remap mem. resource, if required */
1765 if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1766 atu_size > rman_get_size(sc->aac_regs_resource)) {
1767 bus_release_resource(
1768 sc->aac_dev, SYS_RES_MEMORY,
1769 sc->aac_regs_rid, sc->aac_regs_resource);
1770 sc->aac_regs_resource = bus_alloc_resource(
1771 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid,
1772 0ul, ~0ul, atu_size, RF_ACTIVE);
1773 if (sc->aac_regs_resource == NULL) {
1774 sc->aac_regs_resource = bus_alloc_resource_any(
1775 sc->aac_dev, SYS_RES_MEMORY,
1776 &sc->aac_regs_rid, RF_ACTIVE);
1777 if (sc->aac_regs_resource == NULL) {
1778 device_printf(sc->aac_dev,
1779 "couldn't allocate register window\n");
1782 sc->flags &= ~AAC_FLAGS_NEW_COMM;
1784 sc->aac_btag = rman_get_bustag(sc->aac_regs_resource);
1785 sc->aac_bhandle = rman_get_bushandle(sc->aac_regs_resource);
1788 /* Read preferred settings */
1789 sc->aac_max_fib_size = sizeof(struct aac_fib);
1790 sc->aac_max_sectors = 128; /* 64KB */
1791 if (sc->flags & AAC_FLAGS_SG_64BIT)
1792 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1793 - sizeof(struct aac_blockwrite64)
1794 + sizeof(struct aac_sg_table64))
1795 / sizeof(struct aac_sg_table64);
1797 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1798 - sizeof(struct aac_blockwrite)
1799 + sizeof(struct aac_sg_table))
1800 / sizeof(struct aac_sg_table);
1802 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1803 options = AAC_GET_MAILBOX(sc, 1);
1804 sc->aac_max_fib_size = (options & 0xFFFF);
1805 sc->aac_max_sectors = (options >> 16) << 1;
1806 options = AAC_GET_MAILBOX(sc, 2);
1807 sc->aac_sg_tablesize = (options >> 16);
1808 options = AAC_GET_MAILBOX(sc, 3);
1809 sc->aac_max_fibs = (options & 0xFFFF);
1811 if (sc->aac_max_fib_size > PAGE_SIZE)
1812 sc->aac_max_fib_size = PAGE_SIZE;
1813 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1819 aac_init(struct aac_softc *sc)
1821 struct aac_adapter_init *ip;
1823 u_int32_t code, qoffset;
1829 * First wait for the adapter to come ready.
1833 code = AAC_GET_FWSTATUS(sc);
1834 if (code & AAC_SELF_TEST_FAILED) {
1835 device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1838 if (code & AAC_KERNEL_PANIC) {
1839 device_printf(sc->aac_dev,
1840 "FATAL: controller kernel panic\n");
1843 if (time_second > (then + AAC_BOOT_TIMEOUT)) {
1844 device_printf(sc->aac_dev,
1845 "FATAL: controller not coming ready, "
1846 "status %x\n", code);
1849 } while (!(code & AAC_UP_AND_RUNNING));
1853 * Create DMA tag for mapping buffers into controller-addressable space.
1855 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1856 1, 0, /* algnmnt, boundary */
1857 (sc->flags & AAC_FLAGS_SG_64BIT) ?
1859 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1860 BUS_SPACE_MAXADDR, /* highaddr */
1861 NULL, NULL, /* filter, filterarg */
1862 MAXBSIZE, /* maxsize */
1863 sc->aac_sg_tablesize, /* nsegments */
1864 MAXBSIZE, /* maxsegsize */
1865 BUS_DMA_ALLOCNOW, /* flags */
1866 &sc->aac_buffer_dmat)) {
1867 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
1872 * Create DMA tag for mapping FIBs into controller-addressable space..
1874 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1875 1, 0, /* algnmnt, boundary */
1876 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
1877 BUS_SPACE_MAXADDR_32BIT :
1878 0x7fffffff, /* lowaddr */
1879 BUS_SPACE_MAXADDR, /* highaddr */
1880 NULL, NULL, /* filter, filterarg */
1881 sc->aac_max_fibs_alloc *
1882 sc->aac_max_fib_size, /* maxsize */
1884 sc->aac_max_fibs_alloc *
1885 sc->aac_max_fib_size, /* maxsegsize */
1887 &sc->aac_fib_dmat)) {
1888 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
1893 * Create DMA tag for the common structure and allocate it.
1895 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1896 1, 0, /* algnmnt, boundary */
1897 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
1898 BUS_SPACE_MAXADDR_32BIT :
1899 0x7fffffff, /* lowaddr */
1900 BUS_SPACE_MAXADDR, /* highaddr */
1901 NULL, NULL, /* filter, filterarg */
1902 8192 + sizeof(struct aac_common), /* maxsize */
1904 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1906 &sc->aac_common_dmat)) {
1907 device_printf(sc->aac_dev,
1908 "can't allocate common structure DMA tag\n");
1911 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
1912 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
1913 device_printf(sc->aac_dev, "can't allocate common structure\n");
1917 * Work around a bug in the 2120 and 2200 that cannot DMA commands
1918 * below address 8192 in physical memory.
1919 * XXX If the padding is not needed, can it be put to use instead
1922 bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
1923 sc->aac_common, 8192 + sizeof(*sc->aac_common),
1924 aac_common_map, sc, 0);
1926 if (sc->aac_common_busaddr < 8192) {
1928 (struct aac_common *)((uint8_t *)sc->aac_common + 8192);
1929 sc->aac_common_busaddr += 8192;
1931 bzero(sc->aac_common, sizeof(*sc->aac_common));
1933 /* Allocate some FIBs and associated command structs */
1934 TAILQ_INIT(&sc->aac_fibmap_tqh);
1935 sc->aac_commands = kmalloc(sc->aac_max_fibs * sizeof(struct aac_command),
1936 M_AACBUF, M_INTWAIT | M_ZERO);
1937 while (sc->total_fibs < AAC_PREALLOCATE_FIBS) {
1938 if (aac_alloc_commands(sc) != 0)
1941 if (sc->total_fibs == 0)
1945 * Fill in the init structure. This tells the adapter about the
1946 * physical location of various important shared data structures.
1948 ip = &sc->aac_common->ac_init;
1949 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1950 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1951 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1952 sc->flags |= AAC_FLAGS_RAW_IO;
1954 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1956 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1957 offsetof(struct aac_common, ac_fibs);
1958 ip->AdapterFibsVirtualAddress = 0;
1959 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1960 ip->AdapterFibAlign = sizeof(struct aac_fib);
1962 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1963 offsetof(struct aac_common, ac_printf);
1964 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1967 * The adapter assumes that pages are 4K in size, except on some
1968 * broken firmware versions that do the page->byte conversion twice,
1969 * therefore 'assuming' that this value is in 16MB units (2^24).
1970 * Round up since the granularity is so high.
1972 /* XXX why should the adapter care? */
1973 ip->HostPhysMemPages = ctob((int)Maxmem) / AAC_PAGE_SIZE;
1974 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1975 ip->HostPhysMemPages =
1976 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1978 ip->HostElapsedSeconds = time_second; /* reset later if invalid */
1981 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1982 ip->InitFlags = INITFLAGS_NEW_COMM_SUPPORTED;
1983 device_printf(sc->aac_dev, "New comm. interface enabled\n");
1986 ip->MaxIoCommands = sc->aac_max_fibs;
1987 ip->MaxIoSize = sc->aac_max_sectors << 9;
1988 ip->MaxFibSize = sc->aac_max_fib_size;
1991 * Initialise FIB queues. Note that it appears that the layout of the
1992 * indexes and the segmentation of the entries may be mandated by the
1993 * adapter, which is only told about the base of the queue index fields.
1995 * The initial values of the indices are assumed to inform the adapter
1996 * of the sizes of the respective queues, and theoretically it could
1997 * work out the entire layout of the queue structures from this. We
1998 * take the easy route and just lay this area out like everyone else
2001 * The Linux driver uses a much more complex scheme whereby several
2002 * header records are kept for each queue. We use a couple of generic
2003 * list manipulation functions which 'know' the size of each list by
2004 * virtue of a table.
2006 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
2007 qoffset &= ~(AAC_QUEUE_ALIGN - 1);
2009 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
2010 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
2012 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2013 AAC_HOST_NORM_CMD_ENTRIES;
2014 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2015 AAC_HOST_NORM_CMD_ENTRIES;
2016 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2017 AAC_HOST_HIGH_CMD_ENTRIES;
2018 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2019 AAC_HOST_HIGH_CMD_ENTRIES;
2020 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2021 AAC_ADAP_NORM_CMD_ENTRIES;
2022 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2023 AAC_ADAP_NORM_CMD_ENTRIES;
2024 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2025 AAC_ADAP_HIGH_CMD_ENTRIES;
2026 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2027 AAC_ADAP_HIGH_CMD_ENTRIES;
2028 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2029 AAC_HOST_NORM_RESP_ENTRIES;
2030 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2031 AAC_HOST_NORM_RESP_ENTRIES;
2032 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2033 AAC_HOST_HIGH_RESP_ENTRIES;
2034 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2035 AAC_HOST_HIGH_RESP_ENTRIES;
2036 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2037 AAC_ADAP_NORM_RESP_ENTRIES;
2038 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2039 AAC_ADAP_NORM_RESP_ENTRIES;
2040 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2041 AAC_ADAP_HIGH_RESP_ENTRIES;
2042 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2043 AAC_ADAP_HIGH_RESP_ENTRIES;
2044 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
2045 &sc->aac_queues->qt_HostNormCmdQueue[0];
2046 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
2047 &sc->aac_queues->qt_HostHighCmdQueue[0];
2048 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
2049 &sc->aac_queues->qt_AdapNormCmdQueue[0];
2050 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
2051 &sc->aac_queues->qt_AdapHighCmdQueue[0];
2052 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
2053 &sc->aac_queues->qt_HostNormRespQueue[0];
2054 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
2055 &sc->aac_queues->qt_HostHighRespQueue[0];
2056 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
2057 &sc->aac_queues->qt_AdapNormRespQueue[0];
2058 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
2059 &sc->aac_queues->qt_AdapHighRespQueue[0];
2062 * Do controller-type-specific initialisation
2064 switch (sc->aac_hwif) {
2065 case AAC_HWIF_I960RX:
2066 AAC_SETREG4(sc, AAC_RX_ODBR, ~0);
2069 AAC_SETREG4(sc, AAC_RKT_ODBR, ~0);
2076 * Give the init structure to the controller.
2078 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
2079 sc->aac_common_busaddr +
2080 offsetof(struct aac_common, ac_init), 0, 0, 0,
2082 device_printf(sc->aac_dev,
2083 "error establishing init structure\n");
2094 * Send a synchronous command to the controller and wait for a result.
2095 * Indicate if the controller completed the command with an error status.
2098 aac_sync_command(struct aac_softc *sc, u_int32_t command,
2099 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2107 /* populate the mailbox */
2108 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2110 /* ensure the sync command doorbell flag is cleared */
2111 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2113 /* then set it to signal the adapter */
2114 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2116 /* spin waiting for the command to complete */
2119 if (time_second > (then + AAC_IMMEDIATE_TIMEOUT)) {
2120 debug(1, "timed out");
2123 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2125 /* clear the completion flag */
2126 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2128 /* get the command status */
2129 status = AAC_GET_MAILBOX(sc, 0);
2133 if (status != AAC_SRB_STS_SUCCESS)
2139 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2140 struct aac_fib *fib, u_int16_t datasize)
2143 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0);
2145 if (datasize > AAC_FIB_DATASIZE)
2149 * Set up the sync FIB
2151 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2152 AAC_FIBSTATE_INITIALISED |
2154 fib->Header.XferState |= xferstate;
2155 fib->Header.Command = command;
2156 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2157 fib->Header.Size = sizeof(struct aac_fib) + datasize;
2158 fib->Header.SenderSize = sizeof(struct aac_fib);
2159 fib->Header.SenderFibAddress = 0; /* Not needed */
2160 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2161 offsetof(struct aac_common,
2165 * Give the FIB to the controller, wait for a response.
2167 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2168 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2169 debug(2, "IO error");
2177 * Adapter-space FIB queue manipulation
2179 * Note that the queue implementation here is a little funky; neither the PI or
2180 * CI will ever be zero. This behaviour is a controller feature.
2186 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2187 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
2188 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2189 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2190 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2191 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
2192 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2193 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2197 * Atomically insert an entry into the nominated queue, returns 0 on success or
2198 * EBUSY if the queue is full.
2200 * Note: it would be more efficient to defer notifying the controller in
2201 * the case where we may be inserting several entries in rapid succession,
2202 * but implementing this usefully may be difficult (it would involve a
2203 * separate queue/notify interface).
2206 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2215 fib_size = cm->cm_fib->Header.Size;
2216 fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2218 /* get the producer/consumer indices */
2219 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2220 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2222 /* wrap the queue? */
2223 if (pi >= aac_qinfo[queue].size)
2226 /* check for queue full */
2227 if ((pi + 1) == ci) {
2232 * To avoid a race with its completion interrupt, place this command on
2233 * the busy queue prior to advertising it to the controller.
2235 aac_enqueue_busy(cm);
2239 /* populate queue entry */
2240 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2241 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2243 /* update producer index */
2244 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2246 /* notify the adapter if we know how */
2247 if (aac_qinfo[queue].notify != 0)
2248 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2257 * Atomically remove one entry from the nominated queue, returns 0 on
2258 * success or ENOENT if the queue is empty.
2261 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2262 struct aac_fib **fib_addr)
2265 u_int32_t fib_index;
2271 /* get the producer/consumer indices */
2272 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2273 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2275 /* check for queue empty */
2281 /* wrap the pi so the following test works */
2282 if (pi >= aac_qinfo[queue].size)
2289 /* wrap the queue? */
2290 if (ci >= aac_qinfo[queue].size)
2293 /* fetch the entry */
2294 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2297 case AAC_HOST_NORM_CMD_QUEUE:
2298 case AAC_HOST_HIGH_CMD_QUEUE:
2300 * The aq_fib_addr is only 32 bits wide so it can't be counted
2301 * on to hold an address. For AIF's, the adapter assumes
2302 * that it's giving us an address into the array of AIF fibs.
2303 * Therefore, we have to convert it to an index.
2305 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2306 sizeof(struct aac_fib);
2307 *fib_addr = &sc->aac_common->ac_fibs[fib_index];
2310 case AAC_HOST_NORM_RESP_QUEUE:
2311 case AAC_HOST_HIGH_RESP_QUEUE:
2313 struct aac_command *cm;
2316 * As above, an index is used instead of an actual address.
2317 * Gotta shift the index to account for the fast response
2318 * bit. No other correction is needed since this value was
2319 * originally provided by the driver via the SenderFibAddress
2322 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2323 cm = sc->aac_commands + (fib_index >> 2);
2324 *fib_addr = cm->cm_fib;
2327 * Is this a fast response? If it is, update the fib fields in
2328 * local memory since the whole fib isn't DMA'd back up.
2330 if (fib_index & 0x01) {
2331 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2332 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2337 panic("Invalid queue in aac_dequeue_fib()");
2341 /* update consumer index */
2342 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2344 /* if we have made the queue un-full, notify the adapter */
2345 if (notify && (aac_qinfo[queue].notify != 0))
2346 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2354 * Put our response to an Adapter Initialed Fib on the response queue
2357 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2366 /* Tell the adapter where the FIB is */
2367 fib_size = fib->Header.Size;
2368 fib_addr = fib->Header.SenderFibAddress;
2369 fib->Header.ReceiverFibAddress = fib_addr;
2371 /* get the producer/consumer indices */
2372 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2373 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2375 /* wrap the queue? */
2376 if (pi >= aac_qinfo[queue].size)
2379 /* check for queue full */
2380 if ((pi + 1) == ci) {
2385 /* populate queue entry */
2386 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2387 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2389 /* update producer index */
2390 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2392 /* notify the adapter if we know how */
2393 if (aac_qinfo[queue].notify != 0)
2394 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2403 * Check for commands that have been outstanding for a suspiciously long time,
2404 * and complain about them.
2407 aac_timeout(void *xsc)
2409 struct aac_softc *sc = xsc;
2410 struct aac_command *cm;
2414 * Traverse the busy command list, bitch about late commands once
2418 deadline = time_second - AAC_CMD_TIMEOUT;
2419 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2420 if ((cm->cm_timestamp < deadline)
2421 /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) {
2422 cm->cm_flags |= AAC_CMD_TIMEDOUT;
2423 device_printf(sc->aac_dev,
2424 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2425 cm, (int)(time_second-cm->cm_timestamp));
2426 AAC_PRINT_FIB(sc, cm->cm_fib);
2431 code = AAC_GET_FWSTATUS(sc);
2432 if (code != AAC_UP_AND_RUNNING) {
2433 device_printf(sc->aac_dev, "WARNING! Controller is no "
2434 "longer running! code= 0x%x\n", code);
2441 * Interface Function Vectors
2445 * Read the current firmware status word.
2448 aac_sa_get_fwstatus(struct aac_softc *sc)
2452 return(AAC_GETREG4(sc, AAC_SA_FWSTATUS));
2456 aac_rx_get_fwstatus(struct aac_softc *sc)
2460 return(AAC_GETREG4(sc, AAC_RX_FWSTATUS));
2464 aac_fa_get_fwstatus(struct aac_softc *sc)
2470 val = AAC_GETREG4(sc, AAC_FA_FWSTATUS);
2475 aac_rkt_get_fwstatus(struct aac_softc *sc)
2479 return(AAC_GETREG4(sc, AAC_RKT_FWSTATUS));
2483 * Notify the controller of a change in a given queue
2487 aac_sa_qnotify(struct aac_softc *sc, int qbit)
2491 AAC_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2495 aac_rx_qnotify(struct aac_softc *sc, int qbit)
2499 AAC_SETREG4(sc, AAC_RX_IDBR, qbit);
2503 aac_fa_qnotify(struct aac_softc *sc, int qbit)
2507 AAC_SETREG2(sc, AAC_FA_DOORBELL1, qbit);
2512 aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2516 AAC_SETREG4(sc, AAC_RKT_IDBR, qbit);
2520 * Get the interrupt reason bits
2523 aac_sa_get_istatus(struct aac_softc *sc)
2527 return(AAC_GETREG2(sc, AAC_SA_DOORBELL0));
2531 aac_rx_get_istatus(struct aac_softc *sc)
2535 return(AAC_GETREG4(sc, AAC_RX_ODBR));
2539 aac_fa_get_istatus(struct aac_softc *sc)
2545 val = AAC_GETREG2(sc, AAC_FA_DOORBELL0);
2550 aac_rkt_get_istatus(struct aac_softc *sc)
2554 return(AAC_GETREG4(sc, AAC_RKT_ODBR));
2558 * Clear some interrupt reason bits
2561 aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2565 AAC_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2569 aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2573 AAC_SETREG4(sc, AAC_RX_ODBR, mask);
2577 aac_fa_clear_istatus(struct aac_softc *sc, int mask)
2581 AAC_SETREG2(sc, AAC_FA_DOORBELL0_CLEAR, mask);
2586 aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2590 AAC_SETREG4(sc, AAC_RKT_ODBR, mask);
2594 * Populate the mailbox and set the command word
2597 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2598 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2602 AAC_SETREG4(sc, AAC_SA_MAILBOX, command);
2603 AAC_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2604 AAC_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2605 AAC_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2606 AAC_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2610 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2611 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2615 AAC_SETREG4(sc, AAC_RX_MAILBOX, command);
2616 AAC_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2617 AAC_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2618 AAC_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2619 AAC_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2623 aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2624 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2628 AAC_SETREG4(sc, AAC_FA_MAILBOX, command);
2630 AAC_SETREG4(sc, AAC_FA_MAILBOX + 4, arg0);
2632 AAC_SETREG4(sc, AAC_FA_MAILBOX + 8, arg1);
2634 AAC_SETREG4(sc, AAC_FA_MAILBOX + 12, arg2);
2636 AAC_SETREG4(sc, AAC_FA_MAILBOX + 16, arg3);
2641 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2642 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2646 AAC_SETREG4(sc, AAC_RKT_MAILBOX, command);
2647 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2648 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2649 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2650 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2654 * Fetch the immediate command status word
2657 aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2661 return(AAC_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2665 aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2669 return(AAC_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2673 aac_fa_get_mailbox(struct aac_softc *sc, int mb)
2679 val = AAC_GETREG4(sc, AAC_FA_MAILBOX + (mb * 4));
2684 aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2688 return(AAC_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2692 * Set/clear interrupt masks
2695 aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2697 debug(2, "%sable interrupts", enable ? "en" : "dis");
2700 AAC_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2702 AAC_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2707 aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2709 debug(2, "%sable interrupts", enable ? "en" : "dis");
2712 if (sc->flags & AAC_FLAGS_NEW_COMM)
2713 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2715 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2717 AAC_SETREG4(sc, AAC_RX_OIMR, ~0);
2722 aac_fa_set_interrupts(struct aac_softc *sc, int enable)
2724 debug(2, "%sable interrupts", enable ? "en" : "dis");
2727 AAC_SETREG2((sc), AAC_FA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2730 AAC_SETREG2((sc), AAC_FA_MASK0, ~0);
2736 aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2738 debug(2, "%sable interrupts", enable ? "en" : "dis");
2741 if (sc->flags & AAC_FLAGS_NEW_COMM)
2742 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2744 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2746 AAC_SETREG4(sc, AAC_RKT_OIMR, ~0);
2751 * New comm. interface: Send command functions
2754 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2756 u_int32_t index, device;
2758 debug(2, "send command (new comm.)");
2760 index = AAC_GETREG4(sc, AAC_RX_IQUE);
2761 if (index == 0xffffffffL)
2762 index = AAC_GETREG4(sc, AAC_RX_IQUE);
2763 if (index == 0xffffffffL)
2765 aac_enqueue_busy(cm);
2767 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2769 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2771 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size);
2772 AAC_SETREG4(sc, AAC_RX_IQUE, index);
2777 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2779 u_int32_t index, device;
2781 debug(2, "send command (new comm.)");
2783 index = AAC_GETREG4(sc, AAC_RKT_IQUE);
2784 if (index == 0xffffffffL)
2785 index = AAC_GETREG4(sc, AAC_RKT_IQUE);
2786 if (index == 0xffffffffL)
2788 aac_enqueue_busy(cm);
2790 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2792 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2794 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size);
2795 AAC_SETREG4(sc, AAC_RKT_IQUE, index);
2800 * New comm. interface: get, set outbound queue index
2803 aac_rx_get_outb_queue(struct aac_softc *sc)
2807 return(AAC_GETREG4(sc, AAC_RX_OQUE));
2811 aac_rkt_get_outb_queue(struct aac_softc *sc)
2815 return(AAC_GETREG4(sc, AAC_RKT_OQUE));
2819 aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2823 AAC_SETREG4(sc, AAC_RX_OQUE, index);
2827 aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2831 AAC_SETREG4(sc, AAC_RKT_OQUE, index);
2835 * Debugging and Diagnostics
2839 * Print some information about the controller.
2842 aac_describe_controller(struct aac_softc *sc)
2844 struct aac_fib *fib;
2845 struct aac_adapter_info *info;
2849 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
2850 aac_alloc_sync_fib(sc, &fib);
2853 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2854 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2855 aac_release_sync_fib(sc);
2856 AAC_LOCK_RELEASE(&sc->aac_io_lock);
2860 /* save the kernel revision structure for later use */
2861 info = (struct aac_adapter_info *)&fib->data[0];
2862 sc->aac_revision = info->KernelRevision;
2864 device_printf(sc->aac_dev, "Adaptec Raid Controller %d.%d.%d-%d\n",
2865 AAC_DRIVER_VERSION >> 24,
2866 (AAC_DRIVER_VERSION >> 16) & 0xFF,
2867 AAC_DRIVER_VERSION & 0xFF,
2871 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2872 "(%dMB cache, %dMB execution), %s\n",
2873 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2874 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2875 info->BufferMem / (1024 * 1024),
2876 info->ExecutionMem / (1024 * 1024),
2877 aac_describe_code(aac_battery_platform,
2878 info->batteryPlatform));
2880 device_printf(sc->aac_dev,
2881 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2882 info->KernelRevision.external.comp.major,
2883 info->KernelRevision.external.comp.minor,
2884 info->KernelRevision.external.comp.dash,
2885 info->KernelRevision.buildNumber,
2886 (u_int32_t)(info->SerialNumber & 0xffffff));
2888 device_printf(sc->aac_dev, "Supported Options=%b\n",
2889 sc->supported_options,
2911 aac_release_sync_fib(sc);
2912 AAC_LOCK_RELEASE(&sc->aac_io_lock);
2916 * Look up a text description of a numeric error code and return a pointer to
2920 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2924 for (i = 0; table[i].string != NULL; i++)
2925 if (table[i].code == code)
2926 return(table[i].string);
2927 return(table[i + 1].string);
2931 * Management Interface
2935 aac_open(struct dev_open_args *ap)
2937 cdev_t dev = ap->a_head.a_dev;
2938 struct aac_softc *sc;
2944 /* Check to make sure the device isn't already open */
2945 if (sc->aac_state & AAC_STATE_OPEN) {
2948 sc->aac_state |= AAC_STATE_OPEN;
2954 aac_close(struct dev_close_args *ap)
2956 cdev_t dev = ap->a_head.a_dev;
2957 struct aac_softc *sc;
2963 /* Mark this unit as no longer open */
2964 sc->aac_state &= ~AAC_STATE_OPEN;
2970 aac_ioctl(struct dev_ioctl_args *ap)
2972 cdev_t dev = ap->a_head.a_dev;
2973 caddr_t arg = ap->a_data;
2974 struct aac_softc *sc = dev->si_drv1;
2980 if (ap->a_cmd == AACIO_STATS) {
2981 union aac_statrequest *as = (union aac_statrequest *)arg;
2983 switch (as->as_item) {
2989 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2990 sizeof(struct aac_qstat));
2999 arg = *(caddr_t *)arg;
3001 switch (ap->a_cmd) {
3002 /* AACIO_STATS already handled above */
3003 case FSACTL_SENDFIB:
3004 debug(1, "FSACTL_SENDFIB");
3005 error = aac_ioctl_sendfib(sc, arg);
3007 case FSACTL_AIF_THREAD:
3008 debug(1, "FSACTL_AIF_THREAD");
3011 case FSACTL_OPEN_GET_ADAPTER_FIB:
3012 debug(1, "FSACTL_OPEN_GET_ADAPTER_FIB");
3014 * Pass the caller out an AdapterFibContext.
3016 * Note that because we only support one opener, we
3017 * basically ignore this. Set the caller's context to a magic
3018 * number just in case.
3020 * The Linux code hands the driver a pointer into kernel space,
3021 * and then trusts it when the caller hands it back. Aiee!
3022 * Here, we give it the proc pointer of the per-adapter aif
3023 * thread. It's only used as a sanity check in other calls.
3025 cookie = (uint32_t)(uintptr_t)sc->aifthread;
3026 error = copyout(&cookie, arg, sizeof(cookie));
3028 case FSACTL_GET_NEXT_ADAPTER_FIB:
3029 debug(1, "FSACTL_GET_NEXT_ADAPTER_FIB");
3030 error = aac_getnext_aif(sc, arg);
3032 case FSACTL_CLOSE_GET_ADAPTER_FIB:
3033 debug(1, "FSACTL_CLOSE_GET_ADAPTER_FIB");
3034 /* don't do anything here */
3036 case FSACTL_MINIPORT_REV_CHECK:
3037 debug(1, "FSACTL_MINIPORT_REV_CHECK");
3038 error = aac_rev_check(sc, arg);
3040 case FSACTL_QUERY_DISK:
3041 debug(1, "FSACTL_QUERY_DISK");
3042 error = aac_query_disk(sc, arg);
3044 case FSACTL_DELETE_DISK:
3046 * We don't trust the underland to tell us when to delete a
3047 * container, rather we rely on an AIF coming from the
3052 case FSACTL_GET_PCI_INFO:
3053 arg = *(caddr_t*)arg;
3054 case FSACTL_LNX_GET_PCI_INFO:
3055 debug(1, "FSACTL_GET_PCI_INFO");
3056 error = aac_get_pci_info(sc, arg);
3059 debug(1, "unsupported cmd 0x%lx\n", ap->a_cmd);
3066 static struct filterops aac_filterops =
3067 { FILTEROP_ISFD, NULL, aac_filter_detach, aac_filter };
3070 aac_kqfilter(struct dev_kqfilter_args *ap)
3072 cdev_t dev = ap->a_head.a_dev;
3073 struct aac_softc *sc = dev->si_drv1;
3074 struct knote *kn = ap->a_kn;
3075 struct klist *klist;
3079 switch (kn->kn_filter) {
3081 kn->kn_fop = &aac_filterops;
3082 kn->kn_hook = (caddr_t)sc;
3085 ap->a_result = EOPNOTSUPP;
3089 klist = &sc->rcv_kq.ki_note;
3090 knote_insert(klist, kn);
3096 aac_filter_detach(struct knote *kn)
3098 struct aac_softc *sc = (struct aac_softc *)kn->kn_hook;
3099 struct klist *klist;
3101 klist = &sc->rcv_kq.ki_note;
3102 knote_remove(klist, kn);
3106 aac_filter(struct knote *kn, long hint)
3108 struct aac_softc *sc = (struct aac_softc *)kn->kn_hook;
3111 AAC_LOCK_ACQUIRE(&sc->aac_aifq_lock);
3112 if (sc->aac_aifq_tail != sc->aac_aifq_head)
3114 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3121 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
3124 switch (event->ev_type) {
3125 case AAC_EVENT_CMFREE:
3126 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3127 if (aac_alloc_command(sc, (struct aac_command **)arg)) {
3128 aac_add_event(sc, event);
3129 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3132 kfree(event, M_AACBUF);
3134 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3142 * Send a FIB supplied from userspace
3145 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
3147 struct aac_command *cm;
3157 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3158 if (aac_alloc_command(sc, &cm)) {
3159 struct aac_event *event;
3161 event = kmalloc(sizeof(struct aac_event), M_AACBUF,
3162 M_INTWAIT | M_ZERO);
3163 event->ev_type = AAC_EVENT_CMFREE;
3164 event->ev_callback = aac_ioctl_event;
3165 event->ev_arg = &cm;
3166 aac_add_event(sc, event);
3167 tsleep_interlock(&cm, 0);
3168 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3169 tsleep(&cm, PINTERLOCKED, "sendfib", 0);
3170 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3172 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3175 * Fetch the FIB header, then re-copy to get data as well.
3177 if ((error = copyin(ufib, cm->cm_fib,
3178 sizeof(struct aac_fib_header))) != 0)
3180 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
3181 if (size > sizeof(struct aac_fib)) {
3182 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %zd)\n",
3183 size, sizeof(struct aac_fib));
3184 size = sizeof(struct aac_fib);
3186 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3188 cm->cm_fib->Header.Size = size;
3189 cm->cm_timestamp = time_second;
3192 * Pass the FIB to the controller, wait for it to complete.
3194 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3195 if ((error = aac_wait_command(cm)) != 0) {
3196 device_printf(sc->aac_dev,
3197 "aac_wait_command return %d\n", error);
3200 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3203 * Copy the FIB and data back out to the caller.
3205 size = cm->cm_fib->Header.Size;
3206 if (size > sizeof(struct aac_fib)) {
3207 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %zd)\n",
3208 size, sizeof(struct aac_fib));
3209 size = sizeof(struct aac_fib);
3211 error = copyout(cm->cm_fib, ufib, size);
3212 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3216 aac_release_command(cm);
3219 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3224 * Handle an AIF sent to us by the controller; queue it for later reference.
3225 * If the queue fills up, then drop the older entries.
3228 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3230 struct aac_aif_command *aif;
3231 struct aac_container *co, *co_next;
3232 struct aac_mntinfo *mi;
3233 struct aac_mntinforesp *mir = NULL;
3236 int count = 0, added = 0, i = 0;
3240 aif = (struct aac_aif_command*)&fib->data[0];
3241 aac_print_aif(sc, aif);
3243 /* Is it an event that we should care about? */
3244 switch (aif->command) {
3245 case AifCmdEventNotify:
3246 switch (aif->data.EN.type) {
3247 case AifEnAddContainer:
3248 case AifEnDeleteContainer:
3250 * A container was added or deleted, but the message
3251 * doesn't tell us anything else! Re-enumerate the
3252 * containers and sort things out.
3254 aac_alloc_sync_fib(sc, &fib);
3255 mi = (struct aac_mntinfo *)&fib->data[0];
3258 * Ask the controller for its containers one at
3260 * XXX What if the controller's list changes
3261 * midway through this enumaration?
3262 * XXX This should be done async.
3264 bzero(mi, sizeof(struct aac_mntinfo));
3265 mi->Command = VM_NameServe;
3266 mi->MntType = FT_FILESYS;
3268 rsize = sizeof(mir);
3269 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
3270 sizeof(struct aac_mntinfo))) {
3271 device_printf(sc->aac_dev,
3272 "Error probing container %d\n", i);
3276 mir = (struct aac_mntinforesp *)&fib->data[0];
3277 /* XXX Need to check if count changed */
3278 count = mir->MntRespCount;
3281 * Check the container against our list.
3282 * co->co_found was already set to 0 in a
3285 if ((mir->Status == ST_OK) &&
3286 (mir->MntTable[0].VolType != CT_NONE)) {
3289 &sc->aac_container_tqh,
3291 if (co->co_mntobj.ObjectId ==
3292 mir->MntTable[0].ObjectId) {
3299 * If the container matched, continue
3308 * This is a new container. Do all the
3309 * appropriate things to set it up.
3311 aac_add_container(sc, mir, 1);
3315 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3316 aac_release_sync_fib(sc);
3319 * Go through our list of containers and see which ones
3320 * were not marked 'found'. Since the controller didn't
3321 * list them they must have been deleted. Do the
3322 * appropriate steps to destroy the device. Also reset
3323 * the co->co_found field.
3325 co = TAILQ_FIRST(&sc->aac_container_tqh);
3326 while (co != NULL) {
3327 if (co->co_found == 0) {
3328 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3330 device_delete_child(sc->aac_dev,
3333 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3334 co_next = TAILQ_NEXT(co, co_link);
3335 AAC_LOCK_ACQUIRE(&sc->
3336 aac_container_lock);
3337 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3339 AAC_LOCK_RELEASE(&sc->
3340 aac_container_lock);
3341 kfree(co, M_AACBUF);
3345 co = TAILQ_NEXT(co, co_link);
3349 /* Attach the newly created containers */
3351 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3353 bus_generic_attach(sc->aac_dev);
3355 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3368 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3369 AAC_LOCK_ACQUIRE(&sc->aac_aifq_lock);
3370 next = (sc->aac_aifq_head + 1) % AAC_AIFQ_LENGTH;
3371 if (next != sc->aac_aifq_tail) {
3372 bcopy(aif, &sc->aac_aifq[next], sizeof(struct aac_aif_command));
3373 sc->aac_aifq_head = next;
3375 /* On the off chance that someone is sleeping for an aif... */
3376 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3377 wakeup(sc->aac_aifq);
3378 /* token may have been lost */
3379 /* Wakeup any poll()ers */
3380 KNOTE(&sc->rcv_kq.ki_note, 0);
3381 /* token may have been lost */
3383 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3389 * Return the Revision of the driver to userspace and check to see if the
3390 * userspace app is possibly compatible. This is extremely bogus since
3391 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3392 * returning what the card reported.
3395 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3397 struct aac_rev_check rev_check;
3398 struct aac_rev_check_resp rev_check_resp;
3404 * Copyin the revision struct from userspace
3406 if ((error = copyin(udata, (caddr_t)&rev_check,
3407 sizeof(struct aac_rev_check))) != 0) {
3411 debug(2, "Userland revision= %d\n",
3412 rev_check.callingRevision.buildNumber);
3415 * Doctor up the response struct.
3417 rev_check_resp.possiblyCompatible = 1;
3418 rev_check_resp.adapterSWRevision.external.ul =
3419 sc->aac_revision.external.ul;
3420 rev_check_resp.adapterSWRevision.buildNumber =
3421 sc->aac_revision.buildNumber;
3423 return(copyout((caddr_t)&rev_check_resp, udata,
3424 sizeof(struct aac_rev_check_resp)));
3428 * Pass the caller the next AIF in their queue
3431 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3433 struct get_adapter_fib_ioctl agf;
3438 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3441 * Check the magic number that we gave the caller.
3443 if (agf.AdapterFibContext != (int)(uintptr_t)sc->aifthread) {
3447 error = aac_return_aif(sc, agf.AifFib);
3449 if ((error == EAGAIN) && (agf.Wait)) {
3450 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3451 while (error == EAGAIN) {
3452 error = tsleep(sc->aac_aifq,
3453 PCATCH, "aacaif", 0);
3455 error = aac_return_aif(sc,
3458 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3466 * Hand the next AIF off the top of the queue out to userspace.
3468 * YYY token could be lost during copyout
3471 aac_return_aif(struct aac_softc *sc, caddr_t uptr)
3477 AAC_LOCK_ACQUIRE(&sc->aac_aifq_lock);
3478 if (sc->aac_aifq_tail == sc->aac_aifq_head) {
3479 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3483 next = (sc->aac_aifq_tail + 1) % AAC_AIFQ_LENGTH;
3484 error = copyout(&sc->aac_aifq[next], uptr,
3485 sizeof(struct aac_aif_command));
3487 device_printf(sc->aac_dev,
3488 "aac_return_aif: copyout returned %d\n", error);
3490 sc->aac_aifq_tail = next;
3492 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3497 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3499 struct aac_pci_info {
3507 pciinf.bus = pci_get_bus(sc->aac_dev);
3508 pciinf.slot = pci_get_slot(sc->aac_dev);
3510 error = copyout((caddr_t)&pciinf, uptr,
3511 sizeof(struct aac_pci_info));
3517 * Give the userland some information about the container. The AAC arch
3518 * expects the driver to be a SCSI passthrough type driver, so it expects
3519 * the containers to have b:t:l numbers. Fake it.
3522 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3524 struct aac_query_disk query_disk;
3525 struct aac_container *co;
3526 struct aac_disk *disk;
3533 error = copyin(uptr, (caddr_t)&query_disk,
3534 sizeof(struct aac_query_disk));
3538 id = query_disk.ContainerNumber;
3542 AAC_LOCK_ACQUIRE(&sc->aac_container_lock);
3543 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3544 if (co->co_mntobj.ObjectId == id)
3549 query_disk.Valid = 0;
3550 query_disk.Locked = 0;
3551 query_disk.Deleted = 1; /* XXX is this right? */
3553 disk = device_get_softc(co->co_disk);
3554 query_disk.Valid = 1;
3556 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3557 query_disk.Deleted = 0;
3558 query_disk.Bus = device_get_unit(sc->aac_dev);
3559 query_disk.Target = disk->unit;
3561 query_disk.UnMapped = 0;
3562 bcopy(disk->ad_dev_t->si_name,
3563 &query_disk.diskDeviceName[0], 10);
3565 AAC_LOCK_RELEASE(&sc->aac_container_lock);
3567 error = copyout((caddr_t)&query_disk, uptr,
3568 sizeof(struct aac_query_disk));
3574 aac_get_bus_info(struct aac_softc *sc)
3576 struct aac_fib *fib;
3577 struct aac_ctcfg *c_cmd;
3578 struct aac_ctcfg_resp *c_resp;
3579 struct aac_vmioctl *vmi;
3580 struct aac_vmi_businf_resp *vmi_resp;
3581 struct aac_getbusinf businfo;
3582 struct aac_sim *caminf;
3584 int i, found, error;
3586 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3587 aac_alloc_sync_fib(sc, &fib);
3588 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3589 bzero(c_cmd, sizeof(struct aac_ctcfg));
3591 c_cmd->Command = VM_ContainerConfig;
3592 c_cmd->cmd = CT_GET_SCSI_METHOD;
3595 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3596 sizeof(struct aac_ctcfg));
3598 device_printf(sc->aac_dev, "Error %d sending "
3599 "VM_ContainerConfig command\n", error);
3600 aac_release_sync_fib(sc);
3601 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3605 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3606 if (c_resp->Status != ST_OK) {
3607 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3609 aac_release_sync_fib(sc);
3610 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3614 sc->scsi_method_id = c_resp->param;
3616 vmi = (struct aac_vmioctl *)&fib->data[0];
3617 bzero(vmi, sizeof(struct aac_vmioctl));
3619 vmi->Command = VM_Ioctl;
3620 vmi->ObjType = FT_DRIVE;
3621 vmi->MethId = sc->scsi_method_id;
3623 vmi->IoctlCmd = GetBusInfo;
3625 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3626 sizeof(struct aac_vmioctl));
3628 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3630 aac_release_sync_fib(sc);
3631 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3635 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3636 if (vmi_resp->Status != ST_OK) {
3637 debug(1, "VM_Ioctl returned %d\n", vmi_resp->Status);
3638 aac_release_sync_fib(sc);
3639 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3643 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3644 aac_release_sync_fib(sc);
3645 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3648 for (i = 0; i < businfo.BusCount; i++) {
3649 if (businfo.BusValid[i] != AAC_BUS_VALID)
3652 caminf = (struct aac_sim *)kmalloc(sizeof(struct aac_sim),
3653 M_AACBUF, M_INTWAIT | M_ZERO);
3655 child = device_add_child(sc->aac_dev, "aacp", -1);
3656 if (child == NULL) {
3657 device_printf(sc->aac_dev,
3658 "device_add_child failed for passthrough bus %d\n",
3660 kfree(caminf, M_AACBUF);
3664 caminf->TargetsPerBus = businfo.TargetsPerBus;
3665 caminf->BusNumber = i;
3666 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3667 caminf->aac_sc = sc;
3668 caminf->sim_dev = child;
3670 device_set_ivars(child, caminf);
3671 device_set_desc(child, "SCSI Passthrough Bus");
3672 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3678 bus_generic_attach(sc->aac_dev);