2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001 Adaptec, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/dev/aac/aac.c,v 1.9.2.14 2003/04/08 13:22:08 scottl Exp $
30 * $DragonFly: src/sys/dev/raid/aac/aac.c,v 1.34 2008/01/20 03:40:35 pavalos Exp $
34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
36 #define AAC_DRIVER_VERSION 0x02000000
37 #define AAC_DRIVERNAME "aac"
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/sysctl.h>
52 #include <sys/devicestat.h>
54 #include <sys/signalvar.h>
56 #include <sys/eventhandler.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
63 #include "aac_ioctl.h"
65 #include "aac_tables.h"
67 static void aac_startup(void *arg);
68 static void aac_add_container(struct aac_softc *sc,
69 struct aac_mntinforesp *mir, int f);
70 static void aac_get_bus_info(struct aac_softc *sc);
71 static int aac_shutdown(device_t dev);
73 /* Command Processing */
74 static void aac_timeout(void *ssc);
75 static int aac_map_command(struct aac_command *cm);
76 static void aac_complete(void *context, int pending);
77 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
78 static void aac_bio_complete(struct aac_command *cm);
79 static int aac_wait_command(struct aac_command *cm);
80 static void aac_command_thread(struct aac_softc *sc);
82 /* Command Buffer Management */
83 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
85 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
87 static int aac_alloc_commands(struct aac_softc *sc);
88 static void aac_free_commands(struct aac_softc *sc);
89 static void aac_unmap_command(struct aac_command *cm);
91 /* Hardware Interface */
92 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
94 static int aac_check_firmware(struct aac_softc *sc);
95 static int aac_init(struct aac_softc *sc);
96 static int aac_sync_command(struct aac_softc *sc, u_int32_t command,
97 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
98 u_int32_t arg3, u_int32_t *sp);
99 static int aac_enqueue_fib(struct aac_softc *sc, int queue,
100 struct aac_command *cm);
101 static int aac_dequeue_fib(struct aac_softc *sc, int queue,
102 u_int32_t *fib_size, struct aac_fib **fib_addr);
103 static int aac_enqueue_response(struct aac_softc *sc, int queue,
104 struct aac_fib *fib);
106 /* Falcon/PPC interface */
107 static int aac_fa_get_fwstatus(struct aac_softc *sc);
108 static void aac_fa_qnotify(struct aac_softc *sc, int qbit);
109 static int aac_fa_get_istatus(struct aac_softc *sc);
110 static void aac_fa_clear_istatus(struct aac_softc *sc, int mask);
111 static void aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command,
112 u_int32_t arg0, u_int32_t arg1,
113 u_int32_t arg2, u_int32_t arg3);
114 static int aac_fa_get_mailbox(struct aac_softc *sc, int mb);
115 static void aac_fa_set_interrupts(struct aac_softc *sc, int enable);
117 struct aac_interface aac_fa_interface = {
121 aac_fa_clear_istatus,
124 aac_fa_set_interrupts,
128 /* StrongARM interface */
129 static int aac_sa_get_fwstatus(struct aac_softc *sc);
130 static void aac_sa_qnotify(struct aac_softc *sc, int qbit);
131 static int aac_sa_get_istatus(struct aac_softc *sc);
132 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask);
133 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
134 u_int32_t arg0, u_int32_t arg1,
135 u_int32_t arg2, u_int32_t arg3);
136 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb);
137 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable);
139 struct aac_interface aac_sa_interface = {
143 aac_sa_clear_istatus,
146 aac_sa_set_interrupts,
150 /* i960Rx interface */
151 static int aac_rx_get_fwstatus(struct aac_softc *sc);
152 static void aac_rx_qnotify(struct aac_softc *sc, int qbit);
153 static int aac_rx_get_istatus(struct aac_softc *sc);
154 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask);
155 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
156 u_int32_t arg0, u_int32_t arg1,
157 u_int32_t arg2, u_int32_t arg3);
158 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb);
159 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable);
160 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
161 static int aac_rx_get_outb_queue(struct aac_softc *sc);
162 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
164 struct aac_interface aac_rx_interface = {
168 aac_rx_clear_istatus,
171 aac_rx_set_interrupts,
173 aac_rx_get_outb_queue,
174 aac_rx_set_outb_queue
177 /* Rocket/MIPS interface */
178 static int aac_rkt_get_fwstatus(struct aac_softc *sc);
179 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit);
180 static int aac_rkt_get_istatus(struct aac_softc *sc);
181 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
182 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
183 u_int32_t arg0, u_int32_t arg1,
184 u_int32_t arg2, u_int32_t arg3);
185 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
186 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
187 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
188 static int aac_rkt_get_outb_queue(struct aac_softc *sc);
189 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
191 struct aac_interface aac_rkt_interface = {
192 aac_rkt_get_fwstatus,
195 aac_rkt_clear_istatus,
198 aac_rkt_set_interrupts,
199 aac_rkt_send_command,
200 aac_rkt_get_outb_queue,
201 aac_rkt_set_outb_queue
204 /* Debugging and Diagnostics */
205 static void aac_describe_controller(struct aac_softc *sc);
206 static char *aac_describe_code(struct aac_code_lookup *table,
209 /* Management Interface */
210 static d_open_t aac_open;
211 static d_close_t aac_close;
212 static d_ioctl_t aac_ioctl;
213 static d_poll_t aac_poll;
214 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) __unused;
215 static void aac_handle_aif(struct aac_softc *sc,
216 struct aac_fib *fib);
217 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
218 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
219 static int aac_return_aif(struct aac_softc *sc, caddr_t uptr);
220 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
221 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
222 static void aac_ioctl_event(struct aac_softc *sc,
223 struct aac_event *event, void *arg);
225 #define AAC_CDEV_MAJOR 150
227 static struct dev_ops aac_ops = {
228 { "aac", AAC_CDEV_MAJOR, 0 },
230 .d_close = aac_close,
231 .d_ioctl = aac_ioctl,
235 DECLARE_DUMMY_MODULE(aac);
237 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
240 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
247 * Initialise the controller and softc
250 aac_attach(struct aac_softc *sc)
255 callout_init(&sc->aac_watchdog);
258 * Initialise per-controller queues.
263 aac_initq_complete(sc);
267 * Initialise command-completion task.
269 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
271 /* mark controller as suspended until we get ourselves organised */
272 sc->aac_state |= AAC_STATE_SUSPEND;
275 * Check that the firmware on the card is supported.
277 if ((error = aac_check_firmware(sc)) != 0)
283 AAC_LOCK_INIT(&sc->aac_aifq_lock, "AAC AIF lock");
284 AAC_LOCK_INIT(&sc->aac_io_lock, "AAC I/O lock");
285 AAC_LOCK_INIT(&sc->aac_container_lock, "AAC container lock");
286 TAILQ_INIT(&sc->aac_container_tqh);
287 TAILQ_INIT(&sc->aac_ev_cmfree);
290 /* Initialize the local AIF queue pointers */
291 sc->aac_aifq_head = sc->aac_aifq_tail = AAC_AIFQ_LENGTH;
294 * Initialise the adapter.
296 if ((error = aac_init(sc)) != 0)
300 * Allocate and connect our interrupt.
303 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ,
306 RF_ACTIVE)) == NULL) {
307 device_printf(sc->aac_dev, "can't allocate interrupt\n");
310 if (sc->flags & AAC_FLAGS_NEW_COMM) {
311 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
313 sc, &sc->aac_intr, NULL)) {
314 device_printf(sc->aac_dev, "can't set up interrupt\n");
318 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
319 INTR_FAST, aac_fast_intr,
320 sc, &sc->aac_intr, NULL)) {
321 device_printf(sc->aac_dev,
322 "can't set up FAST interrupt\n");
323 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
325 sc, &sc->aac_intr, NULL)) {
326 device_printf(sc->aac_dev,
327 "can't set up MPSAFE interrupt\n");
334 * Print a little information about the controller.
336 aac_describe_controller(sc);
339 * Register to probe our containers later.
341 sc->aac_ich.ich_func = aac_startup;
342 sc->aac_ich.ich_arg = sc;
343 sc->aac_ich.ich_desc = "aac";
344 if (config_intrhook_establish(&sc->aac_ich) != 0) {
345 device_printf(sc->aac_dev,
346 "can't establish configuration hook\n");
351 * Make the control device.
353 unit = device_get_unit(sc->aac_dev);
354 sc->aac_dev_t = make_dev(&aac_ops, unit, UID_ROOT, GID_OPERATOR,
355 0640, "aac%d", unit);
356 sc->aac_dev_t->si_drv1 = sc;
357 reference_dev(sc->aac_dev_t);
359 /* Create the AIF thread */
360 if (kthread_create((void(*)(void *))aac_command_thread, sc,
361 &sc->aifthread, "aac%daif", unit))
362 panic("Could not create AIF thread\n");
364 /* Register the shutdown method to only be called post-dump */
365 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, aac_shutdown,
366 sc->aac_dev, SHUTDOWN_PRI_DRIVER)) == NULL)
367 device_printf(sc->aac_dev,
368 "shutdown event registration failed\n");
370 /* Register with CAM for the non-DASD devices */
371 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
372 TAILQ_INIT(&sc->aac_sim_tqh);
373 aac_get_bus_info(sc);
380 aac_add_event(struct aac_softc *sc, struct aac_event *event)
383 switch (event->ev_type & AAC_EVENT_MASK) {
384 case AAC_EVENT_CMFREE:
385 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
388 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
397 * Probe for containers, create disks.
400 aac_startup(void *arg)
402 struct aac_softc *sc;
404 struct aac_mntinfo *mi;
405 struct aac_mntinforesp *mir = NULL;
406 int count = 0, i = 0;
410 sc = (struct aac_softc *)arg;
412 /* disconnect ourselves from the intrhook chain */
413 config_intrhook_disestablish(&sc->aac_ich);
415 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
416 aac_alloc_sync_fib(sc, &fib);
417 mi = (struct aac_mntinfo *)&fib->data[0];
419 /* loop over possible containers */
421 /* request information on this container */
422 bzero(mi, sizeof(struct aac_mntinfo));
423 mi->Command = VM_NameServe;
424 mi->MntType = FT_FILESYS;
426 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
427 sizeof(struct aac_mntinfo))) {
428 device_printf(sc->aac_dev,
429 "error probing container %d", i);
434 mir = (struct aac_mntinforesp *)&fib->data[0];
435 /* XXX Need to check if count changed */
436 count = mir->MntRespCount;
437 aac_add_container(sc, mir, 0);
439 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
441 aac_release_sync_fib(sc);
442 AAC_LOCK_RELEASE(&sc->aac_io_lock);
444 /* poke the bus to actually attach the child devices */
445 if (bus_generic_attach(sc->aac_dev))
446 device_printf(sc->aac_dev, "bus_generic_attach failed\n");
448 /* mark the controller up */
449 sc->aac_state &= ~AAC_STATE_SUSPEND;
451 /* enable interrupts now */
452 AAC_UNMASK_INTERRUPTS(sc);
456 * Create a device to respresent a new container
459 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
461 struct aac_container *co;
465 * Check container volume type for validity. Note that many of
466 * the possible types may never show up.
468 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
469 co = (struct aac_container *)kmalloc(sizeof *co, M_AACBUF,
471 debug(1, "id %x name '%.16s' size %u type %d",
472 mir->MntTable[0].ObjectId,
473 mir->MntTable[0].FileSystemName,
474 mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
476 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
477 device_printf(sc->aac_dev, "device_add_child failed\n");
479 device_set_ivars(child, co);
480 device_set_desc(child, aac_describe_code(aac_container_types,
481 mir->MntTable[0].VolType));
484 bcopy(&mir->MntTable[0], &co->co_mntobj,
485 sizeof(struct aac_mntobj));
486 AAC_LOCK_ACQUIRE(&sc->aac_container_lock);
487 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
488 AAC_LOCK_RELEASE(&sc->aac_container_lock);
493 * Free all of the resources associated with (sc)
495 * Should not be called if the controller is active.
498 aac_free(struct aac_softc *sc)
503 /* remove the control device */
504 if (sc->aac_dev_t != NULL)
505 destroy_dev(sc->aac_dev_t);
507 /* throw away any FIB buffers, discard the FIB DMA tag */
508 aac_free_commands(sc);
509 if (sc->aac_fib_dmat)
510 bus_dma_tag_destroy(sc->aac_fib_dmat);
512 kfree(sc->aac_commands, M_AACBUF);
514 /* destroy the common area */
515 if (sc->aac_common) {
516 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
517 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
518 sc->aac_common_dmamap);
520 if (sc->aac_common_dmat)
521 bus_dma_tag_destroy(sc->aac_common_dmat);
523 /* disconnect the interrupt handler */
525 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
526 if (sc->aac_irq != NULL)
527 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid,
530 /* destroy data-transfer DMA tag */
531 if (sc->aac_buffer_dmat)
532 bus_dma_tag_destroy(sc->aac_buffer_dmat);
534 /* destroy the parent DMA tag */
535 if (sc->aac_parent_dmat)
536 bus_dma_tag_destroy(sc->aac_parent_dmat);
538 /* release the register window mapping */
539 if (sc->aac_regs_resource != NULL) {
540 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
541 sc->aac_regs_rid, sc->aac_regs_resource);
543 dev_ops_remove_minor(&aac_ops, device_get_unit(sc->aac_dev));
547 * Disconnect from the controller completely, in preparation for unload.
550 aac_detach(device_t dev)
552 struct aac_softc *sc;
553 struct aac_container *co;
559 sc = device_get_softc(dev);
561 callout_stop(&sc->aac_watchdog);
563 if (sc->aac_state & AAC_STATE_OPEN)
566 /* Remove the child containers */
567 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
568 error = device_delete_child(dev, co->co_disk);
571 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
575 /* Remove the CAM SIMs */
576 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
577 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
578 error = device_delete_child(dev, sim->sim_dev);
581 kfree(sim, M_AACBUF);
584 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
585 sc->aifflags |= AAC_AIFFLAGS_EXIT;
586 wakeup(sc->aifthread);
587 tsleep(sc->aac_dev, PCATCH, "aacdch", 30 * hz);
590 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
591 panic("Cannot shutdown AIF thread\n");
593 if ((error = aac_shutdown(dev)))
596 EVENTHANDLER_DEREGISTER(shutdown_post_sync, sc->eh);
600 lockuninit(&sc->aac_aifq_lock);
601 lockuninit(&sc->aac_io_lock);
602 lockuninit(&sc->aac_container_lock);
608 * Bring the controller down to a dormant state and detach all child devices.
610 * This function is called before detach or system shutdown.
612 * Note that we can assume that the bioq on the controller is empty, as we won't
613 * allow shutdown if any device is open.
616 aac_shutdown(device_t dev)
618 struct aac_softc *sc;
620 struct aac_close_command *cc;
624 sc = device_get_softc(dev);
626 sc->aac_state |= AAC_STATE_SUSPEND;
629 * Send a Container shutdown followed by a HostShutdown FIB to the
630 * controller to convince it that we don't want to talk to it anymore.
631 * We've been closed and all I/O completed already
633 device_printf(sc->aac_dev, "shutting down controller...");
635 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
636 aac_alloc_sync_fib(sc, &fib);
637 cc = (struct aac_close_command *)&fib->data[0];
639 bzero(cc, sizeof(struct aac_close_command));
640 cc->Command = VM_CloseAll;
641 cc->ContainerId = 0xffffffff;
642 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
643 sizeof(struct aac_close_command)))
644 kprintf("FAILED.\n");
651 * XXX Issuing this command to the controller makes it shut down
652 * but also keeps it from coming back up without a reset of the
653 * PCI bus. This is not desirable if you are just unloading the
654 * driver module with the intent to reload it later.
656 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
658 kprintf("FAILED.\n");
665 AAC_MASK_INTERRUPTS(sc);
666 aac_release_sync_fib(sc);
667 AAC_LOCK_RELEASE(&sc->aac_io_lock);
673 * Bring the controller to a quiescent state, ready for system suspend.
676 aac_suspend(device_t dev)
678 struct aac_softc *sc;
682 sc = device_get_softc(dev);
684 sc->aac_state |= AAC_STATE_SUSPEND;
686 AAC_MASK_INTERRUPTS(sc);
691 * Bring the controller back to a state ready for operation.
694 aac_resume(device_t dev)
696 struct aac_softc *sc;
700 sc = device_get_softc(dev);
702 sc->aac_state &= ~AAC_STATE_SUSPEND;
703 AAC_UNMASK_INTERRUPTS(sc);
708 * Interrupt handler for NEW_COMM interface.
711 aac_new_intr(void *arg)
713 struct aac_softc *sc;
714 u_int32_t index, fast;
715 struct aac_command *cm;
721 sc = (struct aac_softc *)arg;
723 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
725 index = AAC_GET_OUTB_QUEUE(sc);
726 if (index == 0xffffffff)
727 index = AAC_GET_OUTB_QUEUE(sc);
728 if (index == 0xffffffff)
731 if (index == 0xfffffffe) {
732 /* XXX This means that the controller wants
733 * more work. Ignore it for now.
738 fib = (struct aac_fib *)kmalloc(sizeof *fib, M_AACBUF,
741 for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
742 ((u_int32_t *)fib)[i] = AAC_GETREG4(sc, index + i*4);
743 aac_handle_aif(sc, fib);
744 kfree(fib, M_AACBUF);
747 * AIF memory is owned by the adapter, so let it
748 * know that we are done with it.
750 AAC_SET_OUTB_QUEUE(sc, index);
751 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
754 cm = sc->aac_commands + (index >> 2);
757 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
758 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
761 aac_unmap_command(cm);
762 cm->cm_flags |= AAC_CMD_COMPLETED;
764 /* is there a completion handler? */
765 if (cm->cm_complete != NULL) {
768 /* assume that someone is sleeping on this
773 sc->flags &= ~AAC_QUEUE_FRZN;
776 /* see if we can start some more I/O */
777 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
780 AAC_LOCK_RELEASE(&sc->aac_io_lock);
784 aac_fast_intr(void *arg)
786 struct aac_softc *sc;
791 sc = (struct aac_softc *)arg;
794 * Read the status register directly. This is faster than taking the
795 * driver lock and reading the queues directly. It also saves having
796 * to turn parts of the driver lock into a spin mutex, which would be
799 reason = AAC_GET_ISTATUS(sc);
800 AAC_CLEAR_ISTATUS(sc, reason);
802 /* handle completion processing */
803 if (reason & AAC_DB_RESPONSE_READY)
804 taskqueue_enqueue(taskqueue_swi, &sc->aac_task_complete);
806 /* controller wants to talk to us */
807 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
809 * XXX Make sure that we don't get fooled by strange messages
810 * that start with a NULL.
812 if ((reason & AAC_DB_PRINTF) &&
813 (sc->aac_common->ac_printf[0] == 0))
814 sc->aac_common->ac_printf[0] = 32;
817 * This might miss doing the actual wakeup. However, the
818 * msleep that this is waking up has a timeout, so it will
819 * wake up eventually. AIFs and printfs are low enough
820 * priority that they can handle hanging out for a few seconds
823 wakeup(sc->aifthread);
832 * Start as much queued I/O as possible on the controller
835 aac_startio(struct aac_softc *sc)
837 struct aac_command *cm;
841 if (sc->flags & AAC_QUEUE_FRZN)
846 * Try to get a command that's been put off for lack of
849 cm = aac_dequeue_ready(sc);
852 * Try to build a command off the bio queue (ignore error
856 aac_bio_command(sc, &cm);
863 * Try to give the command to the controller. Any error is
864 * catastrophic since it means that bus_dmamap_load() failed.
866 if (aac_map_command(cm) != 0)
867 panic("aac: error mapping command %p\n", cm);
872 * Deliver a command to the controller; allocate controller resources at the
873 * last moment when possible.
876 aac_map_command(struct aac_command *cm)
878 struct aac_softc *sc;
886 /* don't map more than once */
887 if (cm->cm_flags & AAC_CMD_MAPPED)
888 panic("aac: command %p already mapped", cm);
890 if (cm->cm_datalen != 0) {
891 error = bus_dmamap_load(sc->aac_buffer_dmat, cm->cm_datamap,
892 cm->cm_data, cm->cm_datalen,
893 aac_map_command_sg, cm, 0);
894 if (error == EINPROGRESS) {
895 debug(1, "freezing queue\n");
896 sc->flags |= AAC_QUEUE_FRZN;
900 aac_map_command_sg(cm, NULL, 0, 0);
906 * Handle notification of one or more FIBs coming from the controller.
909 aac_command_thread(struct aac_softc *sc)
917 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
918 sc->aifflags = AAC_AIFFLAGS_RUNNING;
920 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
922 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) {
923 tsleep_interlock(sc->aifthread, 0);
924 AAC_LOCK_RELEASE(&sc->aac_io_lock);
925 retval = tsleep(sc->aifthread, PINTERLOCKED,
926 "aifthd", AAC_PERIODIC_INTERVAL * hz);
927 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
930 * First see if any FIBs need to be allocated. This needs
931 * to be called without the driver lock because contigmalloc
932 * will grab Giant, and would result in an LOR.
934 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
935 AAC_LOCK_RELEASE(&sc->aac_io_lock);
936 aac_alloc_commands(sc);
937 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
938 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
943 * While we're here, check to see if any commands are stuck.
944 * This is pretty low-priority, so it's ok if it doesn't
947 if (retval == EWOULDBLOCK)
950 /* Check the hardware printf message buffer */
951 if (sc->aac_common->ac_printf[0] != 0)
952 aac_print_printf(sc);
954 /* Also check to see if the adapter has a command for us. */
955 if (sc->flags & AAC_FLAGS_NEW_COMM)
958 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
962 AAC_PRINT_FIB(sc, fib);
964 switch (fib->Header.Command) {
966 aac_handle_aif(sc, fib);
969 device_printf(sc->aac_dev, "unknown command "
970 "from controller\n");
974 if ((fib->Header.XferState == 0) ||
975 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
979 /* Return the AIF to the controller. */
980 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
981 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
982 *(AAC_FSAStatus*)fib->data = ST_OK;
984 /* XXX Compute the Size field? */
985 size = fib->Header.Size;
986 if (size > sizeof(struct aac_fib)) {
987 size = sizeof(struct aac_fib);
988 fib->Header.Size = size;
991 * Since we did not generate this command, it
992 * cannot go through the normal
993 * enqueue->startio chain.
995 aac_enqueue_response(sc,
996 AAC_ADAP_NORM_RESP_QUEUE,
1001 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1002 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1003 wakeup(sc->aac_dev);
1009 * Process completed commands.
1012 aac_complete(void *context, int pending)
1014 struct aac_softc *sc;
1015 struct aac_command *cm;
1016 struct aac_fib *fib;
1021 sc = (struct aac_softc *)context;
1023 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1025 /* pull completed commands off the queue */
1027 /* look for completed FIBs on our queue */
1028 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1030 break; /* nothing to do */
1032 /* get the command, unmap and queue for later processing */
1033 cm = sc->aac_commands + fib->Header.SenderData;
1035 AAC_PRINT_FIB(sc, fib);
1038 aac_remove_busy(cm);
1039 aac_unmap_command(cm); /* XXX defer? */
1040 cm->cm_flags |= AAC_CMD_COMPLETED;
1042 /* is there a completion handler? */
1043 if (cm->cm_complete != NULL) {
1044 cm->cm_complete(cm);
1046 /* assume that someone is sleeping on this command */
1051 /* see if we can start some more I/O */
1052 sc->flags &= ~AAC_QUEUE_FRZN;
1055 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1059 * Handle a bio submitted from a disk device.
1062 aac_submit_bio(struct aac_disk *ad, struct bio *bio)
1064 struct aac_softc *sc;
1068 bio->bio_driver_info = ad;
1069 sc = ad->ad_controller;
1071 /* queue the BIO and try to get some work done */
1072 aac_enqueue_bio(sc, bio);
1077 * Get a bio and build a command to go with it.
1080 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1082 struct aac_command *cm;
1083 struct aac_fib *fib;
1084 struct aac_disk *ad;
1090 /* get the resources we will need */
1093 if (aac_alloc_command(sc, &cm)) /* get a command */
1095 if ((bio = aac_dequeue_bio(sc)) == NULL)
1098 /* fill out the command */
1100 cm->cm_data = (void *)bp->b_data;
1101 cm->cm_datalen = bp->b_bcount;
1102 cm->cm_complete = aac_bio_complete;
1103 cm->cm_private = bio;
1104 cm->cm_timestamp = time_second;
1105 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1109 fib->Header.Size = sizeof(struct aac_fib_header);
1110 fib->Header.XferState =
1111 AAC_FIBSTATE_HOSTOWNED |
1112 AAC_FIBSTATE_INITIALISED |
1113 AAC_FIBSTATE_EMPTY |
1114 AAC_FIBSTATE_FROMHOST |
1115 AAC_FIBSTATE_REXPECTED |
1117 AAC_FIBSTATE_ASYNC |
1118 AAC_FIBSTATE_FAST_RESPONSE;
1120 /* build the read/write request */
1121 ad = (struct aac_disk *)bio->bio_driver_info;
1123 if (sc->flags & AAC_FLAGS_RAW_IO) {
1124 struct aac_raw_io *raw;
1125 raw = (struct aac_raw_io *)&fib->data[0];
1126 fib->Header.Command = RawIo;
1127 raw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1128 raw->ByteCount = bp->b_bcount;
1129 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1131 raw->BpComplete = 0;
1132 fib->Header.Size += sizeof(struct aac_raw_io);
1133 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1134 if (bp->b_cmd == BUF_CMD_READ) {
1136 cm->cm_flags |= AAC_CMD_DATAIN;
1139 cm->cm_flags |= AAC_CMD_DATAOUT;
1141 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1142 fib->Header.Command = ContainerCommand;
1143 if (bp->b_cmd == BUF_CMD_READ) {
1144 struct aac_blockread *br;
1145 br = (struct aac_blockread *)&fib->data[0];
1146 br->Command = VM_CtBlockRead;
1147 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1148 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1149 br->ByteCount = bp->b_bcount;
1150 fib->Header.Size += sizeof(struct aac_blockread);
1151 cm->cm_sgtable = &br->SgMap;
1152 cm->cm_flags |= AAC_CMD_DATAIN;
1154 struct aac_blockwrite *bw;
1155 bw = (struct aac_blockwrite *)&fib->data[0];
1156 bw->Command = VM_CtBlockWrite;
1157 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1158 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1159 bw->ByteCount = bp->b_bcount;
1160 bw->Stable = CUNSTABLE;
1161 fib->Header.Size += sizeof(struct aac_blockwrite);
1162 cm->cm_flags |= AAC_CMD_DATAOUT;
1163 cm->cm_sgtable = &bw->SgMap;
1166 fib->Header.Command = ContainerCommand64;
1167 if (bp->b_cmd == BUF_CMD_READ) {
1168 struct aac_blockread64 *br;
1169 br = (struct aac_blockread64 *)&fib->data[0];
1170 br->Command = VM_CtHostRead64;
1171 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1172 br->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE;
1173 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1176 fib->Header.Size += sizeof(struct aac_blockread64);
1177 cm->cm_flags |= AAC_CMD_DATAOUT;
1178 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1180 struct aac_blockwrite64 *bw;
1181 bw = (struct aac_blockwrite64 *)&fib->data[0];
1182 bw->Command = VM_CtHostWrite64;
1183 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1184 bw->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE;
1185 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE;
1188 fib->Header.Size += sizeof(struct aac_blockwrite64);
1189 cm->cm_flags |= AAC_CMD_DATAIN;
1190 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1199 aac_enqueue_bio(sc, bio);
1201 aac_release_command(cm);
1206 * Handle a bio-instigated command that has been completed.
1209 aac_bio_complete(struct aac_command *cm)
1211 struct aac_blockread_response *brr;
1212 struct aac_blockwrite_response *bwr;
1216 AAC_FSAStatus status;
1218 /* fetch relevant status and then release the command */
1219 bio = (struct bio *)cm->cm_private;
1221 if (bp->b_cmd == BUF_CMD_READ) {
1222 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1223 status = brr->Status;
1225 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1226 status = bwr->Status;
1228 aac_release_command(cm);
1230 /* fix up the bio based on status */
1231 if (status == ST_OK) {
1236 bp->b_flags |= B_ERROR;
1237 /* pass an error string out to the disk layer */
1238 code = aac_describe_code(aac_command_status_table, status);
1240 aac_biodone(bio, code);
1244 * Dump a block of data to the controller. If the queue is full, tell the
1245 * caller to hold off and wait for the queue to drain.
1248 aac_dump_enqueue(struct aac_disk *ad, u_int64_t lba, void *data, int dumppages)
1250 struct aac_softc *sc;
1251 struct aac_command *cm;
1252 struct aac_fib *fib;
1253 struct aac_blockwrite *bw;
1255 sc = ad->ad_controller;
1258 KKASSERT(lba <= 0x100000000ULL);
1260 if (aac_alloc_command(sc, &cm))
1263 /* fill out the command */
1265 cm->cm_datalen = dumppages * PAGE_SIZE;
1266 cm->cm_complete = NULL;
1267 cm->cm_private = NULL;
1268 cm->cm_timestamp = time_second;
1269 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1273 fib->Header.XferState =
1274 AAC_FIBSTATE_HOSTOWNED |
1275 AAC_FIBSTATE_INITIALISED |
1276 AAC_FIBSTATE_FROMHOST |
1277 AAC_FIBSTATE_REXPECTED |
1279 fib->Header.Command = ContainerCommand;
1280 fib->Header.Size = sizeof(struct aac_fib_header);
1282 bw = (struct aac_blockwrite *)&fib->data[0];
1283 bw->Command = VM_CtBlockWrite;
1284 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1285 bw->BlockNumber = lba;
1286 bw->ByteCount = dumppages * PAGE_SIZE;
1287 bw->Stable = CUNSTABLE; /* XXX what's appropriate here? */
1288 fib->Header.Size += sizeof(struct aac_blockwrite);
1289 cm->cm_flags |= AAC_CMD_DATAOUT;
1290 cm->cm_sgtable = &bw->SgMap;
1292 return (aac_map_command(cm));
1296 * Wait for the card's queue to drain when dumping. Also check for monitor
1300 aac_dump_complete(struct aac_softc *sc)
1302 struct aac_fib *fib;
1303 struct aac_command *cm;
1305 u_int32_t pi, ci, fib_size;
1308 reason = AAC_GET_ISTATUS(sc);
1309 if (reason & AAC_DB_RESPONSE_READY) {
1310 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
1312 if (aac_dequeue_fib(sc,
1313 AAC_HOST_NORM_RESP_QUEUE,
1316 cm = (struct aac_command *)
1317 fib->Header.SenderData;
1319 AAC_PRINT_FIB(sc, fib);
1321 aac_remove_busy(cm);
1322 aac_unmap_command(cm);
1323 aac_enqueue_complete(cm);
1324 aac_release_command(cm);
1328 if (reason & AAC_DB_PRINTF) {
1329 AAC_CLEAR_ISTATUS(sc, AAC_DB_PRINTF);
1330 aac_print_printf(sc);
1332 pi = sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][
1333 AAC_PRODUCER_INDEX];
1334 ci = sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][
1335 AAC_CONSUMER_INDEX];
1342 * Submit a command to the controller, return when it completes.
1343 * XXX This is very dangerous! If the card has gone out to lunch, we could
1344 * be stuck here forever. At the same time, signals are not caught
1345 * because there is a risk that a signal could wakeup the sleep before
1346 * the card has a chance to complete the command. Since there is no way
1347 * to cancel a command that is in progress, we can't protect against the
1348 * card completing a command late and spamming the command and data
1349 * memory. So, we are held hostage until the command completes.
1352 aac_wait_command(struct aac_command *cm)
1354 struct aac_softc *sc;
1361 /* Put the command on the ready queue and get things going */
1362 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1363 aac_enqueue_ready(cm);
1366 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0);
1367 tsleep_interlock(cm, 0);
1368 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1369 error = tsleep(cm, PINTERLOCKED, "aacwait", 0);
1370 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1375 *Command Buffer Management
1379 * Allocate a command.
1382 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1384 struct aac_command *cm;
1388 if ((cm = aac_dequeue_free(sc)) == NULL) {
1389 if (sc->total_fibs < sc->aac_max_fibs) {
1390 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1391 wakeup(sc->aifthread);
1401 * Release a command back to the freelist.
1404 aac_release_command(struct aac_command *cm)
1406 struct aac_event *event;
1407 struct aac_softc *sc;
1411 /* (re)initialise the command/FIB */
1412 cm->cm_sgtable = NULL;
1414 cm->cm_complete = NULL;
1415 cm->cm_private = NULL;
1416 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1417 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1418 cm->cm_fib->Header.Flags = 0;
1419 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1422 * These are duplicated in aac_start to cover the case where an
1423 * intermediate stage may have destroyed them. They're left
1424 * initialised here for debugging purposes only.
1426 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1427 cm->cm_fib->Header.SenderData = 0;
1429 aac_enqueue_free(cm);
1432 event = TAILQ_FIRST(&sc->aac_ev_cmfree);
1433 if (event != NULL) {
1434 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1435 event->ev_callback(sc, event, event->ev_arg);
1440 * Map helper for command/FIB allocation.
1443 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1447 fibphys = (uint64_t *)arg;
1451 *fibphys = segs[0].ds_addr;
1455 * Allocate and initialise commands/FIBs for this adapter.
1458 aac_alloc_commands(struct aac_softc *sc)
1460 struct aac_command *cm;
1461 struct aac_fibmap *fm;
1467 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1470 fm = kmalloc(sizeof(struct aac_fibmap), M_AACBUF, M_INTWAIT | M_ZERO);
1472 /* allocate the FIBs in DMAable memory and load them */
1473 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1474 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1475 device_printf(sc->aac_dev,
1476 "Not enough contiguous memory available.\n");
1477 kfree(fm, M_AACBUF);
1481 /* Ignore errors since this doesn't bounce */
1482 bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1483 sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1484 aac_map_command_helper, &fibphys, 0);
1486 /* initialise constant fields in the command structure */
1487 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1488 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1489 cm = sc->aac_commands + sc->total_fibs;
1490 fm->aac_commands = cm;
1492 cm->cm_fib = (struct aac_fib *)
1493 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1494 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1495 cm->cm_index = sc->total_fibs;
1497 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1498 &cm->cm_datamap)) != 0)
1500 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1501 aac_release_command(cm);
1503 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1507 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
1508 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1509 debug(1, "total_fibs= %d\n", sc->total_fibs);
1510 AAC_LOCK_RELEASE(&sc->aac_io_lock);
1514 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1515 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1516 kfree(fm, M_AACBUF);
1521 * Free FIBs owned by this adapter.
1524 aac_free_commands(struct aac_softc *sc)
1526 struct aac_fibmap *fm;
1527 struct aac_command *cm;
1532 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1534 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1536 * We check against total_fibs to handle partially
1539 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1540 cm = fm->aac_commands + i;
1541 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1543 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1544 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1545 kfree(fm, M_AACBUF);
1550 * Command-mapping helper function - populate this command's s/g table.
1553 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1555 struct aac_softc *sc;
1556 struct aac_command *cm;
1557 struct aac_fib *fib;
1562 cm = (struct aac_command *)arg;
1566 /* copy into the FIB */
1567 if (cm->cm_sgtable != NULL) {
1568 if (fib->Header.Command == RawIo) {
1569 struct aac_sg_tableraw *sg;
1570 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1572 for (i = 0; i < nseg; i++) {
1573 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1574 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1575 sg->SgEntryRaw[i].Next = 0;
1576 sg->SgEntryRaw[i].Prev = 0;
1577 sg->SgEntryRaw[i].Flags = 0;
1579 /* update the FIB size for the s/g count */
1580 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1581 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1582 struct aac_sg_table *sg;
1583 sg = cm->cm_sgtable;
1585 for (i = 0; i < nseg; i++) {
1586 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1587 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1589 /* update the FIB size for the s/g count */
1590 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1592 struct aac_sg_table64 *sg;
1593 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1595 for (i = 0; i < nseg; i++) {
1596 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1597 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1599 /* update the FIB size for the s/g count */
1600 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1604 /* Fix up the address values in the FIB. Use the command array index
1605 * instead of a pointer since these fields are only 32 bits. Shift
1606 * the SenderFibAddress over to make room for the fast response bit
1607 * and for the AIF bit
1609 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1610 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1612 /* save a pointer to the command for speedy reverse-lookup */
1613 cm->cm_fib->Header.SenderData = cm->cm_index;
1615 if (cm->cm_flags & AAC_CMD_DATAIN)
1616 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1617 BUS_DMASYNC_PREREAD);
1618 if (cm->cm_flags & AAC_CMD_DATAOUT)
1619 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1620 BUS_DMASYNC_PREWRITE);
1621 cm->cm_flags |= AAC_CMD_MAPPED;
1623 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1624 int count = 10000000L;
1625 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1627 aac_unmap_command(cm);
1628 sc->flags |= AAC_QUEUE_FRZN;
1629 aac_requeue_ready(cm);
1632 DELAY(5); /* wait 5 usec. */
1635 /* Put the FIB on the outbound queue */
1636 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1637 aac_unmap_command(cm);
1638 sc->flags |= AAC_QUEUE_FRZN;
1639 aac_requeue_ready(cm);
1645 * Unmap a command from controller-visible space.
1648 aac_unmap_command(struct aac_command *cm)
1650 struct aac_softc *sc;
1656 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1659 if (cm->cm_datalen != 0) {
1660 if (cm->cm_flags & AAC_CMD_DATAIN)
1661 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1662 BUS_DMASYNC_POSTREAD);
1663 if (cm->cm_flags & AAC_CMD_DATAOUT)
1664 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1665 BUS_DMASYNC_POSTWRITE);
1667 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1669 cm->cm_flags &= ~AAC_CMD_MAPPED;
1673 * Hardware Interface
1677 * Initialise the adapter.
1680 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1682 struct aac_softc *sc;
1686 sc = (struct aac_softc *)arg;
1688 sc->aac_common_busaddr = segs[0].ds_addr;
1692 aac_check_firmware(struct aac_softc *sc)
1694 u_int32_t major, minor, options = 0, atu_size = 0;
1700 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1701 * firmware version 1.x are not compatible with this driver.
1703 if (sc->flags & AAC_FLAGS_PERC2QC) {
1704 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1706 device_printf(sc->aac_dev,
1707 "Error reading firmware version\n");
1711 /* These numbers are stored as ASCII! */
1712 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1713 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1715 device_printf(sc->aac_dev,
1716 "Firmware version %d.%d is not supported.\n",
1723 * Retrieve the capabilities/supported options word so we know what
1724 * work-arounds to enable. Some firmware revs don't support this
1727 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1728 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1729 device_printf(sc->aac_dev,
1730 "RequestAdapterInfo failed\n");
1734 options = AAC_GET_MAILBOX(sc, 1);
1735 atu_size = AAC_GET_MAILBOX(sc, 2);
1736 sc->supported_options = options;
1738 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1739 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1740 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1741 if (options & AAC_SUPPORTED_NONDASD)
1742 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1743 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1744 && (sizeof(bus_addr_t) > 4)) {
1745 device_printf(sc->aac_dev,
1746 "Enabling 64-bit address support\n");
1747 sc->flags |= AAC_FLAGS_SG_64BIT;
1749 if ((options & AAC_SUPPORTED_NEW_COMM)
1750 && sc->aac_if.aif_send_command)
1751 sc->flags |= AAC_FLAGS_NEW_COMM;
1752 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1753 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1756 /* Check for broken hardware that does a lower number of commands */
1757 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1759 /* Remap mem. resource, if required */
1760 if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1761 atu_size > rman_get_size(sc->aac_regs_resource)) {
1762 bus_release_resource(
1763 sc->aac_dev, SYS_RES_MEMORY,
1764 sc->aac_regs_rid, sc->aac_regs_resource);
1765 sc->aac_regs_resource = bus_alloc_resource(
1766 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid,
1767 0ul, ~0ul, atu_size, RF_ACTIVE);
1768 if (sc->aac_regs_resource == NULL) {
1769 sc->aac_regs_resource = bus_alloc_resource_any(
1770 sc->aac_dev, SYS_RES_MEMORY,
1771 &sc->aac_regs_rid, RF_ACTIVE);
1772 if (sc->aac_regs_resource == NULL) {
1773 device_printf(sc->aac_dev,
1774 "couldn't allocate register window\n");
1777 sc->flags &= ~AAC_FLAGS_NEW_COMM;
1779 sc->aac_btag = rman_get_bustag(sc->aac_regs_resource);
1780 sc->aac_bhandle = rman_get_bushandle(sc->aac_regs_resource);
1783 /* Read preferred settings */
1784 sc->aac_max_fib_size = sizeof(struct aac_fib);
1785 sc->aac_max_sectors = 128; /* 64KB */
1786 if (sc->flags & AAC_FLAGS_SG_64BIT)
1787 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1788 - sizeof(struct aac_blockwrite64)
1789 + sizeof(struct aac_sg_table64))
1790 / sizeof(struct aac_sg_table64);
1792 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1793 - sizeof(struct aac_blockwrite)
1794 + sizeof(struct aac_sg_table))
1795 / sizeof(struct aac_sg_table);
1797 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1798 options = AAC_GET_MAILBOX(sc, 1);
1799 sc->aac_max_fib_size = (options & 0xFFFF);
1800 sc->aac_max_sectors = (options >> 16) << 1;
1801 options = AAC_GET_MAILBOX(sc, 2);
1802 sc->aac_sg_tablesize = (options >> 16);
1803 options = AAC_GET_MAILBOX(sc, 3);
1804 sc->aac_max_fibs = (options & 0xFFFF);
1806 if (sc->aac_max_fib_size > PAGE_SIZE)
1807 sc->aac_max_fib_size = PAGE_SIZE;
1808 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1814 aac_init(struct aac_softc *sc)
1816 struct aac_adapter_init *ip;
1818 u_int32_t code, qoffset;
1824 * First wait for the adapter to come ready.
1828 code = AAC_GET_FWSTATUS(sc);
1829 if (code & AAC_SELF_TEST_FAILED) {
1830 device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1833 if (code & AAC_KERNEL_PANIC) {
1834 device_printf(sc->aac_dev,
1835 "FATAL: controller kernel panic\n");
1838 if (time_second > (then + AAC_BOOT_TIMEOUT)) {
1839 device_printf(sc->aac_dev,
1840 "FATAL: controller not coming ready, "
1841 "status %x\n", code);
1844 } while (!(code & AAC_UP_AND_RUNNING));
1848 * Create DMA tag for mapping buffers into controller-addressable space.
1850 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1851 1, 0, /* algnmnt, boundary */
1852 (sc->flags & AAC_FLAGS_SG_64BIT) ?
1854 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1855 BUS_SPACE_MAXADDR, /* highaddr */
1856 NULL, NULL, /* filter, filterarg */
1857 MAXBSIZE, /* maxsize */
1858 sc->aac_sg_tablesize, /* nsegments */
1859 MAXBSIZE, /* maxsegsize */
1860 BUS_DMA_ALLOCNOW, /* flags */
1861 &sc->aac_buffer_dmat)) {
1862 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
1867 * Create DMA tag for mapping FIBs into controller-addressable space..
1869 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1870 1, 0, /* algnmnt, boundary */
1871 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
1872 BUS_SPACE_MAXADDR_32BIT :
1873 0x7fffffff, /* lowaddr */
1874 BUS_SPACE_MAXADDR, /* highaddr */
1875 NULL, NULL, /* filter, filterarg */
1876 sc->aac_max_fibs_alloc *
1877 sc->aac_max_fib_size, /* maxsize */
1879 sc->aac_max_fibs_alloc *
1880 sc->aac_max_fib_size, /* maxsegsize */
1882 &sc->aac_fib_dmat)) {
1883 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
1888 * Create DMA tag for the common structure and allocate it.
1890 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1891 1, 0, /* algnmnt, boundary */
1892 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
1893 BUS_SPACE_MAXADDR_32BIT :
1894 0x7fffffff, /* lowaddr */
1895 BUS_SPACE_MAXADDR, /* highaddr */
1896 NULL, NULL, /* filter, filterarg */
1897 8192 + sizeof(struct aac_common), /* maxsize */
1899 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1901 &sc->aac_common_dmat)) {
1902 device_printf(sc->aac_dev,
1903 "can't allocate common structure DMA tag\n");
1906 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
1907 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
1908 device_printf(sc->aac_dev, "can't allocate common structure\n");
1912 * Work around a bug in the 2120 and 2200 that cannot DMA commands
1913 * below address 8192 in physical memory.
1914 * XXX If the padding is not needed, can it be put to use instead
1917 bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
1918 sc->aac_common, 8192 + sizeof(*sc->aac_common),
1919 aac_common_map, sc, 0);
1921 if (sc->aac_common_busaddr < 8192) {
1923 (struct aac_common *)((uint8_t *)sc->aac_common + 8192);
1924 sc->aac_common_busaddr += 8192;
1926 bzero(sc->aac_common, sizeof(*sc->aac_common));
1928 /* Allocate some FIBs and associated command structs */
1929 TAILQ_INIT(&sc->aac_fibmap_tqh);
1930 sc->aac_commands = kmalloc(sc->aac_max_fibs * sizeof(struct aac_command),
1931 M_AACBUF, M_INTWAIT | M_ZERO);
1932 while (sc->total_fibs < AAC_PREALLOCATE_FIBS) {
1933 if (aac_alloc_commands(sc) != 0)
1936 if (sc->total_fibs == 0)
1940 * Fill in the init structure. This tells the adapter about the
1941 * physical location of various important shared data structures.
1943 ip = &sc->aac_common->ac_init;
1944 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1945 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1946 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1947 sc->flags |= AAC_FLAGS_RAW_IO;
1949 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1951 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1952 offsetof(struct aac_common, ac_fibs);
1953 ip->AdapterFibsVirtualAddress = 0;
1954 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1955 ip->AdapterFibAlign = sizeof(struct aac_fib);
1957 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1958 offsetof(struct aac_common, ac_printf);
1959 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1962 * The adapter assumes that pages are 4K in size, except on some
1963 * broken firmware versions that do the page->byte conversion twice,
1964 * therefore 'assuming' that this value is in 16MB units (2^24).
1965 * Round up since the granularity is so high.
1967 /* XXX why should the adapter care? */
1968 ip->HostPhysMemPages = ctob((int)Maxmem) / AAC_PAGE_SIZE;
1969 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1970 ip->HostPhysMemPages =
1971 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1973 ip->HostElapsedSeconds = time_second; /* reset later if invalid */
1976 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1977 ip->InitFlags = INITFLAGS_NEW_COMM_SUPPORTED;
1978 device_printf(sc->aac_dev, "New comm. interface enabled\n");
1981 ip->MaxIoCommands = sc->aac_max_fibs;
1982 ip->MaxIoSize = sc->aac_max_sectors << 9;
1983 ip->MaxFibSize = sc->aac_max_fib_size;
1986 * Initialise FIB queues. Note that it appears that the layout of the
1987 * indexes and the segmentation of the entries may be mandated by the
1988 * adapter, which is only told about the base of the queue index fields.
1990 * The initial values of the indices are assumed to inform the adapter
1991 * of the sizes of the respective queues, and theoretically it could
1992 * work out the entire layout of the queue structures from this. We
1993 * take the easy route and just lay this area out like everyone else
1996 * The Linux driver uses a much more complex scheme whereby several
1997 * header records are kept for each queue. We use a couple of generic
1998 * list manipulation functions which 'know' the size of each list by
1999 * virtue of a table.
2001 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
2002 qoffset &= ~(AAC_QUEUE_ALIGN - 1);
2004 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
2005 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
2007 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2008 AAC_HOST_NORM_CMD_ENTRIES;
2009 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2010 AAC_HOST_NORM_CMD_ENTRIES;
2011 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2012 AAC_HOST_HIGH_CMD_ENTRIES;
2013 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2014 AAC_HOST_HIGH_CMD_ENTRIES;
2015 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2016 AAC_ADAP_NORM_CMD_ENTRIES;
2017 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2018 AAC_ADAP_NORM_CMD_ENTRIES;
2019 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
2020 AAC_ADAP_HIGH_CMD_ENTRIES;
2021 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
2022 AAC_ADAP_HIGH_CMD_ENTRIES;
2023 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2024 AAC_HOST_NORM_RESP_ENTRIES;
2025 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2026 AAC_HOST_NORM_RESP_ENTRIES;
2027 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2028 AAC_HOST_HIGH_RESP_ENTRIES;
2029 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2030 AAC_HOST_HIGH_RESP_ENTRIES;
2031 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2032 AAC_ADAP_NORM_RESP_ENTRIES;
2033 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2034 AAC_ADAP_NORM_RESP_ENTRIES;
2035 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
2036 AAC_ADAP_HIGH_RESP_ENTRIES;
2037 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
2038 AAC_ADAP_HIGH_RESP_ENTRIES;
2039 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
2040 &sc->aac_queues->qt_HostNormCmdQueue[0];
2041 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
2042 &sc->aac_queues->qt_HostHighCmdQueue[0];
2043 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
2044 &sc->aac_queues->qt_AdapNormCmdQueue[0];
2045 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
2046 &sc->aac_queues->qt_AdapHighCmdQueue[0];
2047 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
2048 &sc->aac_queues->qt_HostNormRespQueue[0];
2049 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
2050 &sc->aac_queues->qt_HostHighRespQueue[0];
2051 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
2052 &sc->aac_queues->qt_AdapNormRespQueue[0];
2053 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
2054 &sc->aac_queues->qt_AdapHighRespQueue[0];
2057 * Do controller-type-specific initialisation
2059 switch (sc->aac_hwif) {
2060 case AAC_HWIF_I960RX:
2061 AAC_SETREG4(sc, AAC_RX_ODBR, ~0);
2064 AAC_SETREG4(sc, AAC_RKT_ODBR, ~0);
2071 * Give the init structure to the controller.
2073 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
2074 sc->aac_common_busaddr +
2075 offsetof(struct aac_common, ac_init), 0, 0, 0,
2077 device_printf(sc->aac_dev,
2078 "error establishing init structure\n");
2089 * Send a synchronous command to the controller and wait for a result.
2090 * Indicate if the controller completed the command with an error status.
2093 aac_sync_command(struct aac_softc *sc, u_int32_t command,
2094 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2102 /* populate the mailbox */
2103 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2105 /* ensure the sync command doorbell flag is cleared */
2106 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2108 /* then set it to signal the adapter */
2109 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2111 /* spin waiting for the command to complete */
2114 if (time_second > (then + AAC_IMMEDIATE_TIMEOUT)) {
2115 debug(1, "timed out");
2118 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2120 /* clear the completion flag */
2121 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2123 /* get the command status */
2124 status = AAC_GET_MAILBOX(sc, 0);
2128 if (status != AAC_SRB_STS_SUCCESS)
2134 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2135 struct aac_fib *fib, u_int16_t datasize)
2138 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0);
2140 if (datasize > AAC_FIB_DATASIZE)
2144 * Set up the sync FIB
2146 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2147 AAC_FIBSTATE_INITIALISED |
2149 fib->Header.XferState |= xferstate;
2150 fib->Header.Command = command;
2151 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2152 fib->Header.Size = sizeof(struct aac_fib) + datasize;
2153 fib->Header.SenderSize = sizeof(struct aac_fib);
2154 fib->Header.SenderFibAddress = 0; /* Not needed */
2155 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2156 offsetof(struct aac_common,
2160 * Give the FIB to the controller, wait for a response.
2162 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2163 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2164 debug(2, "IO error");
2172 * Adapter-space FIB queue manipulation
2174 * Note that the queue implementation here is a little funky; neither the PI or
2175 * CI will ever be zero. This behaviour is a controller feature.
2181 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2182 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
2183 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2184 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2185 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2186 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
2187 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2188 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2192 * Atomically insert an entry into the nominated queue, returns 0 on success or
2193 * EBUSY if the queue is full.
2195 * Note: it would be more efficient to defer notifying the controller in
2196 * the case where we may be inserting several entries in rapid succession,
2197 * but implementing this usefully may be difficult (it would involve a
2198 * separate queue/notify interface).
2201 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2210 fib_size = cm->cm_fib->Header.Size;
2211 fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2213 /* get the producer/consumer indices */
2214 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2215 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2217 /* wrap the queue? */
2218 if (pi >= aac_qinfo[queue].size)
2221 /* check for queue full */
2222 if ((pi + 1) == ci) {
2227 * To avoid a race with its completion interrupt, place this command on
2228 * the busy queue prior to advertising it to the controller.
2230 aac_enqueue_busy(cm);
2234 /* populate queue entry */
2235 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2236 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2238 /* update producer index */
2239 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2241 /* notify the adapter if we know how */
2242 if (aac_qinfo[queue].notify != 0)
2243 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2252 * Atomically remove one entry from the nominated queue, returns 0 on
2253 * success or ENOENT if the queue is empty.
2256 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2257 struct aac_fib **fib_addr)
2260 u_int32_t fib_index;
2266 /* get the producer/consumer indices */
2267 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2268 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2270 /* check for queue empty */
2276 /* wrap the pi so the following test works */
2277 if (pi >= aac_qinfo[queue].size)
2284 /* wrap the queue? */
2285 if (ci >= aac_qinfo[queue].size)
2288 /* fetch the entry */
2289 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2292 case AAC_HOST_NORM_CMD_QUEUE:
2293 case AAC_HOST_HIGH_CMD_QUEUE:
2295 * The aq_fib_addr is only 32 bits wide so it can't be counted
2296 * on to hold an address. For AIF's, the adapter assumes
2297 * that it's giving us an address into the array of AIF fibs.
2298 * Therefore, we have to convert it to an index.
2300 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2301 sizeof(struct aac_fib);
2302 *fib_addr = &sc->aac_common->ac_fibs[fib_index];
2305 case AAC_HOST_NORM_RESP_QUEUE:
2306 case AAC_HOST_HIGH_RESP_QUEUE:
2308 struct aac_command *cm;
2311 * As above, an index is used instead of an actual address.
2312 * Gotta shift the index to account for the fast response
2313 * bit. No other correction is needed since this value was
2314 * originally provided by the driver via the SenderFibAddress
2317 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2318 cm = sc->aac_commands + (fib_index >> 2);
2319 *fib_addr = cm->cm_fib;
2322 * Is this a fast response? If it is, update the fib fields in
2323 * local memory since the whole fib isn't DMA'd back up.
2325 if (fib_index & 0x01) {
2326 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2327 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2332 panic("Invalid queue in aac_dequeue_fib()");
2336 /* update consumer index */
2337 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2339 /* if we have made the queue un-full, notify the adapter */
2340 if (notify && (aac_qinfo[queue].notify != 0))
2341 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2349 * Put our response to an Adapter Initialed Fib on the response queue
2352 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2361 /* Tell the adapter where the FIB is */
2362 fib_size = fib->Header.Size;
2363 fib_addr = fib->Header.SenderFibAddress;
2364 fib->Header.ReceiverFibAddress = fib_addr;
2366 /* get the producer/consumer indices */
2367 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2368 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2370 /* wrap the queue? */
2371 if (pi >= aac_qinfo[queue].size)
2374 /* check for queue full */
2375 if ((pi + 1) == ci) {
2380 /* populate queue entry */
2381 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2382 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2384 /* update producer index */
2385 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2387 /* notify the adapter if we know how */
2388 if (aac_qinfo[queue].notify != 0)
2389 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2398 * Check for commands that have been outstanding for a suspiciously long time,
2399 * and complain about them.
2402 aac_timeout(void *xsc)
2404 struct aac_softc *sc = xsc;
2405 struct aac_command *cm;
2409 * Traverse the busy command list, bitch about late commands once
2413 deadline = time_second - AAC_CMD_TIMEOUT;
2414 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2415 if ((cm->cm_timestamp < deadline)
2416 /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) {
2417 cm->cm_flags |= AAC_CMD_TIMEDOUT;
2418 device_printf(sc->aac_dev,
2419 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2420 cm, (int)(time_second-cm->cm_timestamp));
2421 AAC_PRINT_FIB(sc, cm->cm_fib);
2426 code = AAC_GET_FWSTATUS(sc);
2427 if (code != AAC_UP_AND_RUNNING) {
2428 device_printf(sc->aac_dev, "WARNING! Controller is no "
2429 "longer running! code= 0x%x\n", code);
2436 * Interface Function Vectors
2440 * Read the current firmware status word.
2443 aac_sa_get_fwstatus(struct aac_softc *sc)
2447 return(AAC_GETREG4(sc, AAC_SA_FWSTATUS));
2451 aac_rx_get_fwstatus(struct aac_softc *sc)
2455 return(AAC_GETREG4(sc, AAC_RX_FWSTATUS));
2459 aac_fa_get_fwstatus(struct aac_softc *sc)
2465 val = AAC_GETREG4(sc, AAC_FA_FWSTATUS);
2470 aac_rkt_get_fwstatus(struct aac_softc *sc)
2474 return(AAC_GETREG4(sc, AAC_RKT_FWSTATUS));
2478 * Notify the controller of a change in a given queue
2482 aac_sa_qnotify(struct aac_softc *sc, int qbit)
2486 AAC_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2490 aac_rx_qnotify(struct aac_softc *sc, int qbit)
2494 AAC_SETREG4(sc, AAC_RX_IDBR, qbit);
2498 aac_fa_qnotify(struct aac_softc *sc, int qbit)
2502 AAC_SETREG2(sc, AAC_FA_DOORBELL1, qbit);
2507 aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2511 AAC_SETREG4(sc, AAC_RKT_IDBR, qbit);
2515 * Get the interrupt reason bits
2518 aac_sa_get_istatus(struct aac_softc *sc)
2522 return(AAC_GETREG2(sc, AAC_SA_DOORBELL0));
2526 aac_rx_get_istatus(struct aac_softc *sc)
2530 return(AAC_GETREG4(sc, AAC_RX_ODBR));
2534 aac_fa_get_istatus(struct aac_softc *sc)
2540 val = AAC_GETREG2(sc, AAC_FA_DOORBELL0);
2545 aac_rkt_get_istatus(struct aac_softc *sc)
2549 return(AAC_GETREG4(sc, AAC_RKT_ODBR));
2553 * Clear some interrupt reason bits
2556 aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2560 AAC_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2564 aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2568 AAC_SETREG4(sc, AAC_RX_ODBR, mask);
2572 aac_fa_clear_istatus(struct aac_softc *sc, int mask)
2576 AAC_SETREG2(sc, AAC_FA_DOORBELL0_CLEAR, mask);
2581 aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2585 AAC_SETREG4(sc, AAC_RKT_ODBR, mask);
2589 * Populate the mailbox and set the command word
2592 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2593 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2597 AAC_SETREG4(sc, AAC_SA_MAILBOX, command);
2598 AAC_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2599 AAC_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2600 AAC_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2601 AAC_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2605 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2606 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2610 AAC_SETREG4(sc, AAC_RX_MAILBOX, command);
2611 AAC_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2612 AAC_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2613 AAC_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2614 AAC_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2618 aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2619 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2623 AAC_SETREG4(sc, AAC_FA_MAILBOX, command);
2625 AAC_SETREG4(sc, AAC_FA_MAILBOX + 4, arg0);
2627 AAC_SETREG4(sc, AAC_FA_MAILBOX + 8, arg1);
2629 AAC_SETREG4(sc, AAC_FA_MAILBOX + 12, arg2);
2631 AAC_SETREG4(sc, AAC_FA_MAILBOX + 16, arg3);
2636 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2637 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2641 AAC_SETREG4(sc, AAC_RKT_MAILBOX, command);
2642 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2643 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2644 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2645 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2649 * Fetch the immediate command status word
2652 aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2656 return(AAC_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2660 aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2664 return(AAC_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2668 aac_fa_get_mailbox(struct aac_softc *sc, int mb)
2674 val = AAC_GETREG4(sc, AAC_FA_MAILBOX + (mb * 4));
2679 aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2683 return(AAC_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2687 * Set/clear interrupt masks
2690 aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2692 debug(2, "%sable interrupts", enable ? "en" : "dis");
2695 AAC_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2697 AAC_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2702 aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2704 debug(2, "%sable interrupts", enable ? "en" : "dis");
2707 if (sc->flags & AAC_FLAGS_NEW_COMM)
2708 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2710 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2712 AAC_SETREG4(sc, AAC_RX_OIMR, ~0);
2717 aac_fa_set_interrupts(struct aac_softc *sc, int enable)
2719 debug(2, "%sable interrupts", enable ? "en" : "dis");
2722 AAC_SETREG2((sc), AAC_FA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2725 AAC_SETREG2((sc), AAC_FA_MASK0, ~0);
2731 aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2733 debug(2, "%sable interrupts", enable ? "en" : "dis");
2736 if (sc->flags & AAC_FLAGS_NEW_COMM)
2737 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2739 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2741 AAC_SETREG4(sc, AAC_RKT_OIMR, ~0);
2746 * New comm. interface: Send command functions
2749 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2751 u_int32_t index, device;
2753 debug(2, "send command (new comm.)");
2755 index = AAC_GETREG4(sc, AAC_RX_IQUE);
2756 if (index == 0xffffffffL)
2757 index = AAC_GETREG4(sc, AAC_RX_IQUE);
2758 if (index == 0xffffffffL)
2760 aac_enqueue_busy(cm);
2762 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2764 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2766 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size);
2767 AAC_SETREG4(sc, AAC_RX_IQUE, index);
2772 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2774 u_int32_t index, device;
2776 debug(2, "send command (new comm.)");
2778 index = AAC_GETREG4(sc, AAC_RKT_IQUE);
2779 if (index == 0xffffffffL)
2780 index = AAC_GETREG4(sc, AAC_RKT_IQUE);
2781 if (index == 0xffffffffL)
2783 aac_enqueue_busy(cm);
2785 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2787 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2789 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size);
2790 AAC_SETREG4(sc, AAC_RKT_IQUE, index);
2795 * New comm. interface: get, set outbound queue index
2798 aac_rx_get_outb_queue(struct aac_softc *sc)
2802 return(AAC_GETREG4(sc, AAC_RX_OQUE));
2806 aac_rkt_get_outb_queue(struct aac_softc *sc)
2810 return(AAC_GETREG4(sc, AAC_RKT_OQUE));
2814 aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2818 AAC_SETREG4(sc, AAC_RX_OQUE, index);
2822 aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2826 AAC_SETREG4(sc, AAC_RKT_OQUE, index);
2830 * Debugging and Diagnostics
2834 * Print some information about the controller.
2837 aac_describe_controller(struct aac_softc *sc)
2839 struct aac_fib *fib;
2840 struct aac_adapter_info *info;
2844 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
2845 aac_alloc_sync_fib(sc, &fib);
2848 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2849 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2850 aac_release_sync_fib(sc);
2851 AAC_LOCK_RELEASE(&sc->aac_io_lock);
2855 /* save the kernel revision structure for later use */
2856 info = (struct aac_adapter_info *)&fib->data[0];
2857 sc->aac_revision = info->KernelRevision;
2859 device_printf(sc->aac_dev, "Adaptec Raid Controller %d.%d.%d-%d\n",
2860 AAC_DRIVER_VERSION >> 24,
2861 (AAC_DRIVER_VERSION >> 16) & 0xFF,
2862 AAC_DRIVER_VERSION & 0xFF,
2866 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2867 "(%dMB cache, %dMB execution), %s\n",
2868 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2869 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2870 info->BufferMem / (1024 * 1024),
2871 info->ExecutionMem / (1024 * 1024),
2872 aac_describe_code(aac_battery_platform,
2873 info->batteryPlatform));
2875 device_printf(sc->aac_dev,
2876 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2877 info->KernelRevision.external.comp.major,
2878 info->KernelRevision.external.comp.minor,
2879 info->KernelRevision.external.comp.dash,
2880 info->KernelRevision.buildNumber,
2881 (u_int32_t)(info->SerialNumber & 0xffffff));
2883 device_printf(sc->aac_dev, "Supported Options=%b\n",
2884 sc->supported_options,
2906 aac_release_sync_fib(sc);
2907 AAC_LOCK_RELEASE(&sc->aac_io_lock);
2911 * Look up a text description of a numeric error code and return a pointer to
2915 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2919 for (i = 0; table[i].string != NULL; i++)
2920 if (table[i].code == code)
2921 return(table[i].string);
2922 return(table[i + 1].string);
2926 * Management Interface
2930 aac_open(struct dev_open_args *ap)
2932 cdev_t dev = ap->a_head.a_dev;
2933 struct aac_softc *sc;
2939 /* Check to make sure the device isn't already open */
2940 if (sc->aac_state & AAC_STATE_OPEN) {
2943 sc->aac_state |= AAC_STATE_OPEN;
2949 aac_close(struct dev_close_args *ap)
2951 cdev_t dev = ap->a_head.a_dev;
2952 struct aac_softc *sc;
2958 /* Mark this unit as no longer open */
2959 sc->aac_state &= ~AAC_STATE_OPEN;
2965 aac_ioctl(struct dev_ioctl_args *ap)
2967 cdev_t dev = ap->a_head.a_dev;
2968 caddr_t arg = ap->a_data;
2969 struct aac_softc *sc = dev->si_drv1;
2975 if (ap->a_cmd == AACIO_STATS) {
2976 union aac_statrequest *as = (union aac_statrequest *)arg;
2978 switch (as->as_item) {
2984 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2985 sizeof(struct aac_qstat));
2994 arg = *(caddr_t *)arg;
2996 switch (ap->a_cmd) {
2997 /* AACIO_STATS already handled above */
2998 case FSACTL_SENDFIB:
2999 debug(1, "FSACTL_SENDFIB");
3000 error = aac_ioctl_sendfib(sc, arg);
3002 case FSACTL_AIF_THREAD:
3003 debug(1, "FSACTL_AIF_THREAD");
3006 case FSACTL_OPEN_GET_ADAPTER_FIB:
3007 debug(1, "FSACTL_OPEN_GET_ADAPTER_FIB");
3009 * Pass the caller out an AdapterFibContext.
3011 * Note that because we only support one opener, we
3012 * basically ignore this. Set the caller's context to a magic
3013 * number just in case.
3015 * The Linux code hands the driver a pointer into kernel space,
3016 * and then trusts it when the caller hands it back. Aiee!
3017 * Here, we give it the proc pointer of the per-adapter aif
3018 * thread. It's only used as a sanity check in other calls.
3020 cookie = (uint32_t)(uintptr_t)sc->aifthread;
3021 error = copyout(&cookie, arg, sizeof(cookie));
3023 case FSACTL_GET_NEXT_ADAPTER_FIB:
3024 debug(1, "FSACTL_GET_NEXT_ADAPTER_FIB");
3025 error = aac_getnext_aif(sc, arg);
3027 case FSACTL_CLOSE_GET_ADAPTER_FIB:
3028 debug(1, "FSACTL_CLOSE_GET_ADAPTER_FIB");
3029 /* don't do anything here */
3031 case FSACTL_MINIPORT_REV_CHECK:
3032 debug(1, "FSACTL_MINIPORT_REV_CHECK");
3033 error = aac_rev_check(sc, arg);
3035 case FSACTL_QUERY_DISK:
3036 debug(1, "FSACTL_QUERY_DISK");
3037 error = aac_query_disk(sc, arg);
3039 case FSACTL_DELETE_DISK:
3041 * We don't trust the underland to tell us when to delete a
3042 * container, rather we rely on an AIF coming from the
3047 case FSACTL_GET_PCI_INFO:
3048 arg = *(caddr_t*)arg;
3049 case FSACTL_LNX_GET_PCI_INFO:
3050 debug(1, "FSACTL_GET_PCI_INFO");
3051 error = aac_get_pci_info(sc, arg);
3054 debug(1, "unsupported cmd 0x%lx\n", ap->a_cmd);
3062 aac_poll(struct dev_poll_args *ap)
3064 cdev_t dev = ap->a_head.a_dev;
3065 struct aac_softc *sc;
3071 AAC_LOCK_ACQUIRE(&sc->aac_aifq_lock);
3072 if ((ap->a_events & (POLLRDNORM | POLLIN)) != 0) {
3073 if (sc->aac_aifq_tail != sc->aac_aifq_head)
3074 revents |= ap->a_events & (POLLIN | POLLRDNORM);
3076 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3079 if (ap->a_events & (POLLIN | POLLRDNORM))
3080 selrecord(curthread, &sc->rcv_select);
3082 ap->a_events = revents;
3087 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
3090 switch (event->ev_type) {
3091 case AAC_EVENT_CMFREE:
3092 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3093 if (aac_alloc_command(sc, (struct aac_command **)arg)) {
3094 aac_add_event(sc, event);
3095 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3098 kfree(event, M_AACBUF);
3100 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3108 * Send a FIB supplied from userspace
3111 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
3113 struct aac_command *cm;
3123 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3124 if (aac_alloc_command(sc, &cm)) {
3125 struct aac_event *event;
3127 event = kmalloc(sizeof(struct aac_event), M_AACBUF,
3128 M_INTWAIT | M_ZERO);
3129 event->ev_type = AAC_EVENT_CMFREE;
3130 event->ev_callback = aac_ioctl_event;
3131 event->ev_arg = &cm;
3132 aac_add_event(sc, event);
3133 tsleep_interlock(&cm, 0);
3134 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3135 tsleep(&cm, PINTERLOCKED, "sendfib", 0);
3136 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3138 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3141 * Fetch the FIB header, then re-copy to get data as well.
3143 if ((error = copyin(ufib, cm->cm_fib,
3144 sizeof(struct aac_fib_header))) != 0)
3146 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
3147 if (size > sizeof(struct aac_fib)) {
3148 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %zd)\n",
3149 size, sizeof(struct aac_fib));
3150 size = sizeof(struct aac_fib);
3152 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3154 cm->cm_fib->Header.Size = size;
3155 cm->cm_timestamp = time_second;
3158 * Pass the FIB to the controller, wait for it to complete.
3160 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3161 if ((error = aac_wait_command(cm)) != 0) {
3162 device_printf(sc->aac_dev,
3163 "aac_wait_command return %d\n", error);
3166 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3169 * Copy the FIB and data back out to the caller.
3171 size = cm->cm_fib->Header.Size;
3172 if (size > sizeof(struct aac_fib)) {
3173 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %zd)\n",
3174 size, sizeof(struct aac_fib));
3175 size = sizeof(struct aac_fib);
3177 error = copyout(cm->cm_fib, ufib, size);
3178 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3182 aac_release_command(cm);
3185 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3190 * Handle an AIF sent to us by the controller; queue it for later reference.
3191 * If the queue fills up, then drop the older entries.
3194 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3196 struct aac_aif_command *aif;
3197 struct aac_container *co, *co_next;
3198 struct aac_mntinfo *mi;
3199 struct aac_mntinforesp *mir = NULL;
3202 int count = 0, added = 0, i = 0;
3206 aif = (struct aac_aif_command*)&fib->data[0];
3207 aac_print_aif(sc, aif);
3209 /* Is it an event that we should care about? */
3210 switch (aif->command) {
3211 case AifCmdEventNotify:
3212 switch (aif->data.EN.type) {
3213 case AifEnAddContainer:
3214 case AifEnDeleteContainer:
3216 * A container was added or deleted, but the message
3217 * doesn't tell us anything else! Re-enumerate the
3218 * containers and sort things out.
3220 aac_alloc_sync_fib(sc, &fib);
3221 mi = (struct aac_mntinfo *)&fib->data[0];
3224 * Ask the controller for its containers one at
3226 * XXX What if the controller's list changes
3227 * midway through this enumaration?
3228 * XXX This should be done async.
3230 bzero(mi, sizeof(struct aac_mntinfo));
3231 mi->Command = VM_NameServe;
3232 mi->MntType = FT_FILESYS;
3234 rsize = sizeof(mir);
3235 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
3236 sizeof(struct aac_mntinfo))) {
3237 device_printf(sc->aac_dev,
3238 "Error probing container %d\n", i);
3242 mir = (struct aac_mntinforesp *)&fib->data[0];
3243 /* XXX Need to check if count changed */
3244 count = mir->MntRespCount;
3247 * Check the container against our list.
3248 * co->co_found was already set to 0 in a
3251 if ((mir->Status == ST_OK) &&
3252 (mir->MntTable[0].VolType != CT_NONE)) {
3255 &sc->aac_container_tqh,
3257 if (co->co_mntobj.ObjectId ==
3258 mir->MntTable[0].ObjectId) {
3265 * If the container matched, continue
3274 * This is a new container. Do all the
3275 * appropriate things to set it up.
3277 aac_add_container(sc, mir, 1);
3281 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3282 aac_release_sync_fib(sc);
3285 * Go through our list of containers and see which ones
3286 * were not marked 'found'. Since the controller didn't
3287 * list them they must have been deleted. Do the
3288 * appropriate steps to destroy the device. Also reset
3289 * the co->co_found field.
3291 co = TAILQ_FIRST(&sc->aac_container_tqh);
3292 while (co != NULL) {
3293 if (co->co_found == 0) {
3294 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3296 device_delete_child(sc->aac_dev,
3299 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3300 co_next = TAILQ_NEXT(co, co_link);
3301 AAC_LOCK_ACQUIRE(&sc->
3302 aac_container_lock);
3303 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3305 AAC_LOCK_RELEASE(&sc->
3306 aac_container_lock);
3307 kfree(co, M_AACBUF);
3311 co = TAILQ_NEXT(co, co_link);
3315 /* Attach the newly created containers */
3317 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3319 bus_generic_attach(sc->aac_dev);
3321 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3334 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3335 AAC_LOCK_ACQUIRE(&sc->aac_aifq_lock);
3336 next = (sc->aac_aifq_head + 1) % AAC_AIFQ_LENGTH;
3337 if (next != sc->aac_aifq_tail) {
3338 bcopy(aif, &sc->aac_aifq[next], sizeof(struct aac_aif_command));
3339 sc->aac_aifq_head = next;
3341 /* On the off chance that someone is sleeping for an aif... */
3342 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3343 wakeup(sc->aac_aifq);
3344 /* token may have been lost */
3345 /* Wakeup any poll()ers */
3346 selwakeup(&sc->rcv_select);
3347 /* token may have been lost */
3349 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3355 * Return the Revision of the driver to userspace and check to see if the
3356 * userspace app is possibly compatible. This is extremely bogus since
3357 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3358 * returning what the card reported.
3361 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3363 struct aac_rev_check rev_check;
3364 struct aac_rev_check_resp rev_check_resp;
3370 * Copyin the revision struct from userspace
3372 if ((error = copyin(udata, (caddr_t)&rev_check,
3373 sizeof(struct aac_rev_check))) != 0) {
3377 debug(2, "Userland revision= %d\n",
3378 rev_check.callingRevision.buildNumber);
3381 * Doctor up the response struct.
3383 rev_check_resp.possiblyCompatible = 1;
3384 rev_check_resp.adapterSWRevision.external.ul =
3385 sc->aac_revision.external.ul;
3386 rev_check_resp.adapterSWRevision.buildNumber =
3387 sc->aac_revision.buildNumber;
3389 return(copyout((caddr_t)&rev_check_resp, udata,
3390 sizeof(struct aac_rev_check_resp)));
3394 * Pass the caller the next AIF in their queue
3397 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3399 struct get_adapter_fib_ioctl agf;
3404 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3407 * Check the magic number that we gave the caller.
3409 if (agf.AdapterFibContext != (int)(uintptr_t)sc->aifthread) {
3413 error = aac_return_aif(sc, agf.AifFib);
3415 if ((error == EAGAIN) && (agf.Wait)) {
3416 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3417 while (error == EAGAIN) {
3418 error = tsleep(sc->aac_aifq,
3419 PCATCH, "aacaif", 0);
3421 error = aac_return_aif(sc,
3424 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3432 * Hand the next AIF off the top of the queue out to userspace.
3434 * YYY token could be lost during copyout
3437 aac_return_aif(struct aac_softc *sc, caddr_t uptr)
3443 AAC_LOCK_ACQUIRE(&sc->aac_aifq_lock);
3444 if (sc->aac_aifq_tail == sc->aac_aifq_head) {
3445 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3449 next = (sc->aac_aifq_tail + 1) % AAC_AIFQ_LENGTH;
3450 error = copyout(&sc->aac_aifq[next], uptr,
3451 sizeof(struct aac_aif_command));
3453 device_printf(sc->aac_dev,
3454 "aac_return_aif: copyout returned %d\n", error);
3456 sc->aac_aifq_tail = next;
3458 AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
3463 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3465 struct aac_pci_info {
3473 pciinf.bus = pci_get_bus(sc->aac_dev);
3474 pciinf.slot = pci_get_slot(sc->aac_dev);
3476 error = copyout((caddr_t)&pciinf, uptr,
3477 sizeof(struct aac_pci_info));
3483 * Give the userland some information about the container. The AAC arch
3484 * expects the driver to be a SCSI passthrough type driver, so it expects
3485 * the containers to have b:t:l numbers. Fake it.
3488 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3490 struct aac_query_disk query_disk;
3491 struct aac_container *co;
3492 struct aac_disk *disk;
3499 error = copyin(uptr, (caddr_t)&query_disk,
3500 sizeof(struct aac_query_disk));
3504 id = query_disk.ContainerNumber;
3508 AAC_LOCK_ACQUIRE(&sc->aac_container_lock);
3509 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3510 if (co->co_mntobj.ObjectId == id)
3515 query_disk.Valid = 0;
3516 query_disk.Locked = 0;
3517 query_disk.Deleted = 1; /* XXX is this right? */
3519 disk = device_get_softc(co->co_disk);
3520 query_disk.Valid = 1;
3522 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3523 query_disk.Deleted = 0;
3524 query_disk.Bus = device_get_unit(sc->aac_dev);
3525 query_disk.Target = disk->unit;
3527 query_disk.UnMapped = 0;
3528 bcopy(disk->ad_dev_t->si_name,
3529 &query_disk.diskDeviceName[0], 10);
3531 AAC_LOCK_RELEASE(&sc->aac_container_lock);
3533 error = copyout((caddr_t)&query_disk, uptr,
3534 sizeof(struct aac_query_disk));
3540 aac_get_bus_info(struct aac_softc *sc)
3542 struct aac_fib *fib;
3543 struct aac_ctcfg *c_cmd;
3544 struct aac_ctcfg_resp *c_resp;
3545 struct aac_vmioctl *vmi;
3546 struct aac_vmi_businf_resp *vmi_resp;
3547 struct aac_getbusinf businfo;
3548 struct aac_sim *caminf;
3550 int i, found, error;
3552 AAC_LOCK_ACQUIRE(&sc->aac_io_lock);
3553 aac_alloc_sync_fib(sc, &fib);
3554 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3555 bzero(c_cmd, sizeof(struct aac_ctcfg));
3557 c_cmd->Command = VM_ContainerConfig;
3558 c_cmd->cmd = CT_GET_SCSI_METHOD;
3561 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3562 sizeof(struct aac_ctcfg));
3564 device_printf(sc->aac_dev, "Error %d sending "
3565 "VM_ContainerConfig command\n", error);
3566 aac_release_sync_fib(sc);
3567 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3571 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3572 if (c_resp->Status != ST_OK) {
3573 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3575 aac_release_sync_fib(sc);
3576 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3580 sc->scsi_method_id = c_resp->param;
3582 vmi = (struct aac_vmioctl *)&fib->data[0];
3583 bzero(vmi, sizeof(struct aac_vmioctl));
3585 vmi->Command = VM_Ioctl;
3586 vmi->ObjType = FT_DRIVE;
3587 vmi->MethId = sc->scsi_method_id;
3589 vmi->IoctlCmd = GetBusInfo;
3591 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3592 sizeof(struct aac_vmioctl));
3594 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3596 aac_release_sync_fib(sc);
3597 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3601 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3602 if (vmi_resp->Status != ST_OK) {
3603 debug(1, "VM_Ioctl returned %d\n", vmi_resp->Status);
3604 aac_release_sync_fib(sc);
3605 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3609 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3610 aac_release_sync_fib(sc);
3611 AAC_LOCK_RELEASE(&sc->aac_io_lock);
3614 for (i = 0; i < businfo.BusCount; i++) {
3615 if (businfo.BusValid[i] != AAC_BUS_VALID)
3618 caminf = (struct aac_sim *)kmalloc(sizeof(struct aac_sim),
3619 M_AACBUF, M_INTWAIT | M_ZERO);
3621 child = device_add_child(sc->aac_dev, "aacp", -1);
3622 if (child == NULL) {
3623 device_printf(sc->aac_dev,
3624 "device_add_child failed for passthrough bus %d\n",
3626 kfree(caminf, M_AACBUF);
3630 caminf->TargetsPerBus = businfo.TargetsPerBus;
3631 caminf->BusNumber = i;
3632 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3633 caminf->aac_sc = sc;
3634 caminf->sim_dev = child;
3636 device_set_ivars(child, caminf);
3637 device_set_desc(child, "SCSI Passthrough Bus");
3638 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3644 bus_generic_attach(sc->aac_dev);