2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * $FreeBSD: src/sys/dev/amr/amr.c,v 1.97 2012/04/20 20:27:31 jhb Exp $
61 * Driver for the AMI MegaRaid family of controllers.
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysmsg.h>
77 #include <machine/cpu.h>
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
83 #include <dev/raid/amr/amrio.h>
84 #include <dev/raid/amr/amrreg.h>
85 #include <dev/raid/amr/amrvar.h>
86 #define AMR_DEFINE_TABLES
87 #include <dev/raid/amr/amr_tables.h>
89 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
91 static d_open_t amr_open;
92 static d_close_t amr_close;
93 static d_ioctl_t amr_ioctl;
95 static struct dev_ops amr_ops = {
102 int linux_no_adapter = 0;
104 * Initialisation, bus interface.
106 static void amr_startup(void *arg);
111 static int amr_query_controller(struct amr_softc *sc);
112 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
113 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
114 static void amr_completeio(struct amr_command *ac);
115 static int amr_support_ext_cdb(struct amr_softc *sc);
118 * Command buffer allocation.
120 static void amr_alloccmd_cluster(struct amr_softc *sc);
121 static void amr_freecmd_cluster(struct amr_command_cluster *acc);
124 * Command processing.
126 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
127 static int amr_wait_command(struct amr_command *ac);
128 static int amr_mapcmd(struct amr_command *ac);
129 static void amr_unmapcmd(struct amr_command *ac);
130 static int amr_start(struct amr_command *ac);
131 static void amr_complete(void *context, ac_qhead_t *head);
132 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
133 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
134 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
135 static void amr_abort_load(struct amr_command *ac);
141 static void amr_periodic(void *data);
145 * Interface-specific shims
147 static int amr_quartz_submit_command(struct amr_command *ac);
148 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
149 static int amr_quartz_poll_command(struct amr_command *ac);
150 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
152 static int amr_std_submit_command(struct amr_command *ac);
153 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
154 static int amr_std_poll_command(struct amr_command *ac);
155 static void amr_std_attach_mailbox(struct amr_softc *sc);
157 #ifdef AMR_BOARD_INIT
158 static int amr_quartz_init(struct amr_softc *sc);
159 static int amr_std_init(struct amr_softc *sc);
165 static void amr_describe_controller(struct amr_softc *sc);
168 static void amr_printcommand(struct amr_command *ac);
172 static void amr_init_sysctl(struct amr_softc *sc);
173 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
174 int32_t flag, struct sysmsg *sm);
176 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
178 /********************************************************************************
179 ********************************************************************************
181 ********************************************************************************
182 ********************************************************************************/
184 /********************************************************************************
185 ********************************************************************************
187 ********************************************************************************
188 ********************************************************************************/
190 /********************************************************************************
191 * Initialise the controller and softc.
194 amr_attach(struct amr_softc *sc)
201 * Initialise per-controller queues.
203 amr_init_qhead(&sc->amr_freecmds);
204 amr_init_qhead(&sc->amr_ready);
205 TAILQ_INIT(&sc->amr_cmd_clusters);
206 bioq_init(&sc->amr_bioq);
208 debug(2, "queue init done");
211 * Configure for this controller type.
213 if (AMR_IS_QUARTZ(sc)) {
214 sc->amr_submit_command = amr_quartz_submit_command;
215 sc->amr_get_work = amr_quartz_get_work;
216 sc->amr_poll_command = amr_quartz_poll_command;
217 sc->amr_poll_command1 = amr_quartz_poll_command1;
219 sc->amr_submit_command = amr_std_submit_command;
220 sc->amr_get_work = amr_std_get_work;
221 sc->amr_poll_command = amr_std_poll_command;
222 amr_std_attach_mailbox(sc);
225 #ifdef AMR_BOARD_INIT
226 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
231 * Allocate initial commands.
233 amr_alloccmd_cluster(sc);
236 * Quiz controller for features and limits.
238 if (amr_query_controller(sc))
241 debug(2, "controller query complete");
244 * preallocate the remaining commands.
246 while (sc->amr_nextslot < sc->amr_maxio)
247 amr_alloccmd_cluster(sc);
252 sysctl_ctx_init(&sc->amr_sysctl_ctx);
253 sc->amr_sysctl_tree = SYSCTL_ADD_NODE(&sc->amr_sysctl_ctx,
254 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
255 device_get_nameunit(sc->amr_dev), CTLFLAG_RD, 0, "");
256 if (sc->amr_sysctl_tree == NULL) {
257 device_printf(sc->amr_dev, "can't add sysctl node\n");
263 * Attach our 'real' SCSI channels to CAM.
265 child = device_add_child(sc->amr_dev, "amrp", -1);
266 sc->amr_pass = child;
268 device_set_softc(child, sc);
269 device_set_desc(child, "SCSI Passthrough Bus");
270 bus_generic_attach(sc->amr_dev);
274 * Create the control device.
276 sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
277 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
278 sc->amr_dev_t->si_drv1 = sc;
280 if (device_get_unit(sc->amr_dev) == 0)
281 make_dev_alias(sc->amr_dev_t, "megadev0");
284 * Schedule ourselves to bring the controller up once interrupts are
287 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
288 sc->amr_ich.ich_func = amr_startup;
289 sc->amr_ich.ich_arg = sc;
290 sc->amr_ich.ich_desc = "amr";
291 if (config_intrhook_establish(&sc->amr_ich) != 0) {
292 device_printf(sc->amr_dev, "can't establish configuration hook\n");
297 * Print a little information about the controller.
299 amr_describe_controller(sc);
301 debug(2, "attach complete");
305 /********************************************************************************
306 * Locate disk resources and attach children to them.
309 amr_startup(void *arg)
311 struct amr_softc *sc = (struct amr_softc *)arg;
312 struct amr_logdrive *dr;
316 callout_init(&sc->amr_timeout);
318 /* pull ourselves off the intrhook chain */
319 if (sc->amr_ich.ich_func)
320 config_intrhook_disestablish(&sc->amr_ich);
321 sc->amr_ich.ich_func = NULL;
323 /* get up-to-date drive information */
324 if (amr_query_controller(sc)) {
325 device_printf(sc->amr_dev, "can't scan controller for drives\n");
329 /* iterate over available drives */
330 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
331 /* are we already attached to this drive? */
332 if (dr->al_disk == 0) {
333 /* generate geometry information */
334 if (dr->al_size > 0x200000) { /* extended translation? */
341 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
343 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
344 if (dr->al_disk == 0)
345 device_printf(sc->amr_dev, "device_add_child failed\n");
346 device_set_ivars(dr->al_disk, dr);
350 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
351 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
353 /* mark controller back up */
354 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
356 /* interrupts will be enabled before we do anything more */
357 sc->amr_state |= AMR_STATE_INTEN;
361 * Start the timeout routine.
363 sc->amr_timeout = timeout(amr_periodic, sc, hz);
370 amr_init_sysctl(struct amr_softc *sc)
373 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
374 SYSCTL_CHILDREN(sc->amr_sysctl_tree),
375 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
377 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
378 SYSCTL_CHILDREN(sc->amr_sysctl_tree),
379 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
381 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
382 SYSCTL_CHILDREN(sc->amr_sysctl_tree),
383 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
385 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
386 SYSCTL_CHILDREN(sc->amr_sysctl_tree),
387 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
392 /*******************************************************************************
393 * Free resources associated with a controller instance
396 amr_free(struct amr_softc *sc)
398 struct amr_command_cluster *acc;
400 /* detach from CAM */
401 if (sc->amr_pass != NULL)
402 device_delete_child(sc->amr_dev, sc->amr_pass);
404 /* cancel status timeout */
405 callout_stop(&sc->amr_timeout);
407 /* throw away any command buffers */
408 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
409 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
410 amr_freecmd_cluster(acc);
413 /* destroy control device */
414 if(sc->amr_dev_t != NULL)
415 destroy_dev(sc->amr_dev_t);
416 dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev));
418 #if 0 /* XXX swildner */
419 if (mtx_initialized(&sc->amr_hw_lock))
420 mtx_destroy(&sc->amr_hw_lock);
422 if (mtx_initialized(&sc->amr_list_lock))
423 mtx_destroy(&sc->amr_list_lock);
426 if (sc->amr_sysctl_tree != NULL)
427 sysctl_ctx_free(&sc->amr_sysctl_ctx);
429 lockuninit(&sc->amr_hw_lock);
430 lockuninit(&sc->amr_list_lock);
433 /*******************************************************************************
434 * Receive a bio structure from a child device and queue it on a particular
435 * disk resource, then poke the disk resource to start as much work as it can.
438 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
442 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
443 amr_enqueue_bio(sc, bio);
445 lockmgr(&sc->amr_list_lock, LK_RELEASE);
449 /********************************************************************************
450 * Accept an open operation on the control device.
453 amr_open(struct dev_open_args *ap)
455 cdev_t dev = ap->a_head.a_dev;
456 int unit = minor(dev);
457 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
461 sc->amr_state |= AMR_STATE_OPEN;
465 /********************************************************************************
466 * Accept the last close on the control device.
469 amr_close(struct dev_close_args *ap)
471 cdev_t dev = ap->a_head.a_dev;
472 int unit = minor(dev);
473 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
477 sc->amr_state &= ~AMR_STATE_OPEN;
481 /********************************************************************************
482 * Handle controller-specific control operations.
485 amr_rescan_drives(struct cdev *dev)
487 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
490 sc->amr_state |= AMR_STATE_REMAP_LD;
491 while (sc->amr_busyslots) {
492 device_printf(sc->amr_dev, "idle controller\n");
496 /* mark ourselves as in-shutdown */
497 sc->amr_state |= AMR_STATE_SHUTDOWN;
499 /* flush controller */
500 device_printf(sc->amr_dev, "flushing cache...");
501 kprintf("%s\n", amr_flush(sc) ? "failed" : "done");
503 /* delete all our child devices */
504 for(i = 0 ; i < AMR_MAXLD; i++) {
505 if(sc->amr_drive[i].al_disk != 0) {
506 if((error = device_delete_child(sc->amr_dev,
507 sc->amr_drive[i].al_disk)) != 0)
510 sc->amr_drive[i].al_disk = 0;
519 * Bug-for-bug compatibility with Linux!
520 * Some apps will send commands with inlen and outlen set to 0,
521 * even though they expect data to be transfered to them from the
522 * card. Linux accidentally allows this by allocating a 4KB
523 * buffer for the transfer anyways, but it then throws it away
524 * without copying it back to the app.
526 * The amr(4) firmware relies on this feature. In fact, it assumes
527 * the buffer is always a power of 2 up to a max of 64k. There is
528 * also at least one case where it assumes a buffer less than 16k is
529 * greater than 16k. Force a minimum buffer size of 32k and round
530 * sizes between 32k and 64k up to 64k as a workaround.
533 amr_ioctl_buffer_length(unsigned long len)
536 if (len <= 32 * 1024)
538 if (len <= 64 * 1024)
544 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
547 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
548 struct amr_command *ac;
549 struct amr_mailbox *mb;
550 struct amr_linux_ioctl ali;
553 int adapter, len, ac_flags = 0;
554 int logical_drives_changed = 0;
555 u_int32_t linux_version = 0x02100000;
557 struct amr_passthrough *ap; /* 60 bytes */
564 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
566 switch (ali.ui.fcs.opcode) {
568 switch(ali.ui.fcs.subopcode) {
570 copyout(&linux_version, (void *)(uintptr_t)ali.data,
571 sizeof(linux_version));
576 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
577 sizeof(linux_no_adapter));
578 sm->sm_result.iresult = linux_no_adapter;
583 kprintf("Unknown subopcode\n");
591 if (ali.ui.fcs.opcode == 0x80)
592 len = max(ali.outlen, ali.inlen);
594 len = ali.ui.fcs.length;
596 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
598 mb = (void *)&ali.mbox[0];
600 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
601 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
602 if (sc->amr_allow_vol_config == 0) {
606 logical_drives_changed = 1;
609 if (ali.mbox[0] == AMR_CMD_PASS) {
610 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
611 while ((ac = amr_alloccmd(sc)) == NULL)
612 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
613 lockmgr(&sc->amr_list_lock, LK_RELEASE);
614 ap = &ac->ac_ccb->ccb_pthru;
616 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
617 sizeof(struct amr_passthrough));
621 if (ap->ap_data_transfer_length)
622 dp = kmalloc(ap->ap_data_transfer_length, M_AMR,
626 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
627 dp, ap->ap_data_transfer_length);
632 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
633 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
634 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
635 ac->ac_flags = ac_flags;
638 ac->ac_length = ap->ap_data_transfer_length;
639 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
641 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
642 error = amr_wait_command(ac);
643 lockmgr(&sc->amr_list_lock, LK_RELEASE);
647 status = ac->ac_status;
648 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
653 error = copyout(dp, temp, ap->ap_data_transfer_length);
657 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
663 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
664 kprintf("No AMR_CMD_PASS_64\n");
667 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
668 kprintf("No AMR_CMD_EXTPASS\n");
672 len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
674 dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO);
677 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
682 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
683 while ((ac = amr_alloccmd(sc)) == NULL)
684 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
686 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
687 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
688 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
692 ac->ac_flags = ac_flags;
694 error = amr_wait_command(ac);
695 lockmgr(&sc->amr_list_lock, LK_RELEASE);
699 status = ac->ac_status;
700 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
702 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
708 if (logical_drives_changed)
709 amr_rescan_drives(dev);
715 debug(1, "unknown linux ioctl 0x%lx", cmd);
716 kprintf("unknown linux ioctl 0x%lx\n", cmd);
722 * At this point, we know that there is a lock held and that these
723 * objects have been allocated.
725 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
728 lockmgr(&sc->amr_list_lock, LK_RELEASE);
735 amr_ioctl(struct dev_ioctl_args *ap)
737 cdev_t dev = ap->a_head.a_dev;
738 caddr_t addr = ap->a_data;
739 u_long cmd = ap->a_cmd;
740 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
743 struct amr_user_ioctl *au;
744 #ifdef AMR_IO_COMMAND32
745 struct amr_user_ioctl32 *au32;
749 struct amr_command *ac;
750 struct amr_mailbox_ioctl *mbi;
751 void *dp, *au_buffer;
752 unsigned long au_length, real_length;
753 unsigned char *au_cmd;
754 int *au_statusp, au_direction;
756 struct amr_passthrough *_ap; /* 60 bytes */
757 int logical_drives_changed = 0;
761 arg._p = (void *)addr;
771 debug(1, "AMR_IO_VERSION");
772 *arg.result = AMR_IO_VERSION_NUMBER;
775 #ifdef AMR_IO_COMMAND32
777 * Accept ioctl-s from 32-bit binaries on non-32-bit
778 * platforms, such as AMD. LSI's MEGAMGR utility is
779 * the only example known today... -mi
781 case AMR_IO_COMMAND32:
782 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
783 au_cmd = arg.au32->au_cmd;
784 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
785 au_length = arg.au32->au_length;
786 au_direction = arg.au32->au_direction;
787 au_statusp = &arg.au32->au_status;
792 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
793 au_cmd = arg.au->au_cmd;
794 au_buffer = (void *)arg.au->au_buffer;
795 au_length = arg.au->au_length;
796 au_direction = arg.au->au_direction;
797 au_statusp = &arg.au->au_status;
801 case 0xc06e6d00: /* Linux emulation */
804 struct amr_linux_ioctl ali;
807 devclass = devclass_find("amr");
808 if (devclass == NULL)
811 error = copyin(addr, &ali, sizeof(ali));
814 if (ali.ui.fcs.opcode == 0x82)
817 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
819 sc = devclass_get_softc(devclass, adapter);
823 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg));
826 debug(1, "unknown ioctl 0x%lx", cmd);
830 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
831 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
832 if (sc->amr_allow_vol_config == 0) {
836 logical_drives_changed = 1;
839 /* handle inbound data buffer */
840 real_length = amr_ioctl_buffer_length(au_length);
841 if (au_length != 0 && au_cmd[0] != 0x06) {
842 if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
846 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
850 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
853 /* Allocate this now before the mutex gets held */
855 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
856 while ((ac = amr_alloccmd(sc)) == NULL)
857 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
859 /* handle SCSI passthrough command */
860 if (au_cmd[0] == AMR_CMD_PASS) {
863 _ap = &ac->ac_ccb->ccb_pthru;
864 bzero(_ap, sizeof(struct amr_passthrough));
868 _ap->ap_cdb_length = len;
869 bcopy(au_cmd + 3, _ap->ap_cdb, len);
871 /* build passthrough */
872 _ap->ap_timeout = au_cmd[len + 3] & 0x07;
873 _ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
874 _ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
875 _ap->ap_logical_drive_no = au_cmd[len + 4];
876 _ap->ap_channel = au_cmd[len + 5];
877 _ap->ap_scsi_id = au_cmd[len + 6];
878 _ap->ap_request_sense_length = 14;
879 _ap->ap_data_transfer_length = au_length;
880 /* XXX what about the request-sense area? does the caller want it? */
883 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
884 ac->ac_flags = AMR_CMD_CCB;
887 /* direct command to controller */
888 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
890 /* copy pertinent mailbox items */
891 mbi->mb_command = au_cmd[0];
892 mbi->mb_channel = au_cmd[1];
893 mbi->mb_param = au_cmd[2];
894 mbi->mb_pad[0] = au_cmd[3];
895 mbi->mb_drive = au_cmd[4];
899 /* build the command */
901 ac->ac_length = real_length;
902 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
904 /* run the command */
905 error = amr_wait_command(ac);
906 lockmgr(&sc->amr_list_lock, LK_RELEASE);
910 /* copy out data and set status */
911 if (au_length != 0) {
912 error = copyout(dp, au_buffer, au_length);
914 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
916 debug(2, "%p status 0x%x", dp, ac->ac_status);
917 *au_statusp = ac->ac_status;
921 * At this point, we know that there is a lock held and that these
922 * objects have been allocated.
924 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
927 lockmgr(&sc->amr_list_lock, LK_RELEASE);
931 if (logical_drives_changed)
932 amr_rescan_drives(dev);
938 /********************************************************************************
939 ********************************************************************************
941 ********************************************************************************
942 ********************************************************************************/
944 /********************************************************************************
945 * Perform a periodic check of the controller status
948 amr_periodic(void *data)
950 struct amr_softc *sc = (struct amr_softc *)data;
954 /* XXX perform periodic status checks here */
956 /* compensate for missed interrupts */
960 callout_reset(&sc->amr_timeout, hz, amr_periodic, sc);
964 /********************************************************************************
965 ********************************************************************************
967 ********************************************************************************
968 ********************************************************************************/
970 /********************************************************************************
971 * Interrogate the controller for the operational parameters we require.
974 amr_query_controller(struct amr_softc *sc)
976 struct amr_enquiry3 *aex;
977 struct amr_prodinfo *ap;
978 struct amr_enquiry *ae;
983 * Greater than 10 byte cdb support
985 sc->support_ext_cdb = amr_support_ext_cdb(sc);
987 if(sc->support_ext_cdb) {
988 debug(2,"supports extended CDBs.");
992 * Try to issue an ENQUIRY3 command
994 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
995 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
998 * Fetch current state of logical drives.
1000 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
1001 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
1002 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
1003 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
1004 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1005 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1010 * Get product info for channel count.
1012 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1013 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1016 sc->amr_maxdrives = 40;
1017 sc->amr_maxchan = ap->ap_nschan;
1018 sc->amr_maxio = ap->ap_maxio;
1019 sc->amr_type |= AMR_TYPE_40LD;
1022 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1026 sc->amr_ld_del_supported = 1;
1027 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1031 /* failed, try the 8LD ENQUIRY commands */
1032 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1033 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1034 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1037 ae->ae_signature = 0;
1041 * Fetch current state of logical drives.
1043 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1044 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1045 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1046 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1047 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1048 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1051 sc->amr_maxdrives = 8;
1052 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1053 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1058 * Mark remaining drives as unused.
1060 for (; ldrv < AMR_MAXLD; ldrv++)
1061 sc->amr_drive[ldrv].al_size = 0xffffffff;
1064 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1065 * the controller's reported value, and lockups have been seen when we do.
1067 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1072 /********************************************************************************
1073 * Run a generic enquiry-style command.
1076 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1078 struct amr_command *ac;
1088 /* get ourselves a command buffer */
1089 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1090 ac = amr_alloccmd(sc);
1091 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1094 /* allocate the response structure */
1095 if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1097 /* set command flags */
1099 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1101 /* point the command at our data */
1102 ac->ac_data = result;
1103 ac->ac_length = bufsize;
1105 /* build the command proper */
1106 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1112 /* can't assume that interrupts are going to work here, so play it safe */
1113 if (sc->amr_poll_command(ac))
1115 error = ac->ac_status;
1116 *status = ac->ac_status;
1119 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1122 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1123 if ((error != 0) && (result != NULL)) {
1124 kfree(result, M_AMR);
1130 /********************************************************************************
1131 * Flush the controller's internal cache, return status.
1134 amr_flush(struct amr_softc *sc)
1136 struct amr_command *ac;
1139 /* get ourselves a command buffer */
1141 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1142 ac = amr_alloccmd(sc);
1143 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1146 /* set command flags */
1147 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1149 /* build the command proper */
1150 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1152 /* we have to poll, as the system may be going down or otherwise damaged */
1153 if (sc->amr_poll_command(ac))
1155 error = ac->ac_status;
1158 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1161 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1165 /********************************************************************************
1166 * Detect extented cdb >> greater than 10 byte cdb support
1167 * returns '1' means this support exist
1168 * returns '0' means this support doesn't exist
1171 amr_support_ext_cdb(struct amr_softc *sc)
1173 struct amr_command *ac;
1177 /* get ourselves a command buffer */
1179 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1180 ac = amr_alloccmd(sc);
1181 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1184 /* set command flags */
1185 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1187 /* build the command proper */
1188 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1193 /* we have to poll, as the system may be going down or otherwise damaged */
1194 if (sc->amr_poll_command(ac))
1196 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1201 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1204 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1208 /********************************************************************************
1209 * Try to find I/O work for the controller from one or more of the work queues.
1211 * We make the assumption that if the controller is not ready to take a command
1212 * at some given time, it will generate an interrupt at some later time when
1216 amr_startio(struct amr_softc *sc)
1218 struct amr_command *ac;
1220 /* spin until something prevents us from doing any work */
1223 /* Don't bother to queue commands no bounce buffers are available. */
1224 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1227 /* try to get a ready command */
1228 ac = amr_dequeue_ready(sc);
1230 /* if that failed, build a command from a bio */
1232 (void)amr_bio_command(sc, &ac);
1234 /* if that failed, build a command from a ccb */
1235 if ((ac == NULL) && (sc->amr_cam_command != NULL))
1236 sc->amr_cam_command(sc, &ac);
1238 /* if we don't have anything to do, give up */
1242 /* try to give the command to the controller; if this fails save it for later and give up */
1243 if (amr_start(ac)) {
1244 debug(2, "controller busy, command deferred");
1245 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1251 /********************************************************************************
1252 * Handle completion of an I/O command.
1255 amr_completeio(struct amr_command *ac)
1257 struct amr_softc *sc = ac->ac_sc;
1258 static struct timeval lastfail;
1260 struct buf *bp = ac->ac_bio->bio_buf;
1262 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1264 bp->b_flags |= B_ERROR;
1266 if (ppsratecheck(&lastfail, &curfail, 1))
1267 device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status);
1268 /* amr_printcommand(ac);*/
1270 amrd_intr(ac->ac_bio);
1271 lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE);
1273 lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE);
1276 /********************************************************************************
1277 ********************************************************************************
1279 ********************************************************************************
1280 ********************************************************************************/
1282 /********************************************************************************
1283 * Convert a bio off the top of the bio queue into a command.
1286 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1288 struct amr_command *ac;
1289 struct amrd_softc *amrd;
1301 if ((ac = amr_alloccmd(sc)) == NULL)
1304 /* get a bio to work on */
1305 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1310 /* connect the bio to the command */
1312 ac->ac_complete = amr_completeio;
1314 ac->ac_data = bp->b_data;
1315 ac->ac_length = bp->b_bcount;
1317 switch (bp->b_cmd) {
1319 ac->ac_flags |= AMR_CMD_DATAIN;
1320 if (AMR_IS_SG64(sc)) {
1321 cmd = AMR_CMD_LREAD64;
1322 ac->ac_flags |= AMR_CMD_SG64;
1324 cmd = AMR_CMD_LREAD;
1327 ac->ac_flags |= AMR_CMD_DATAOUT;
1328 if (AMR_IS_SG64(sc)) {
1329 cmd = AMR_CMD_LWRITE64;
1330 ac->ac_flags |= AMR_CMD_SG64;
1332 cmd = AMR_CMD_LWRITE;
1335 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1336 cmd = AMR_CMD_FLUSH;
1339 panic("Invalid bio command");
1341 amrd = (struct amrd_softc *)bio->bio_driver_info;
1342 driveno = amrd->amrd_drive - sc->amr_drive;
1343 blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1345 ac->ac_mailbox.mb_command = cmd;
1346 if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) {
1347 ac->ac_mailbox.mb_blkcount = blkcount;
1348 ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE;
1349 if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) {
1350 device_printf(sc->amr_dev,
1351 "I/O beyond end of unit (%lld,%d > %lu)\n",
1352 (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount,
1353 (u_long)sc->amr_drive[driveno].al_size);
1356 ac->ac_mailbox.mb_drive = driveno;
1357 if (sc->amr_state & AMR_STATE_REMAP_LD)
1358 ac->ac_mailbox.mb_drive |= 0x80;
1360 /* we fill in the s/g related data when the command is mapped */
1367 /********************************************************************************
1368 * Take a command, submit it to the controller and sleep until it completes
1369 * or fails. Interrupts must be enabled, returns nonzero on error.
1372 amr_wait_command(struct amr_command *ac)
1375 struct amr_softc *sc = ac->ac_sc;
1379 ac->ac_complete = NULL;
1380 ac->ac_flags |= AMR_CMD_SLEEP;
1381 if ((error = amr_start(ac)) != 0) {
1385 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1386 error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0);
1392 /********************************************************************************
1393 * Take a command, submit it to the controller and busy-wait for it to return.
1394 * Returns nonzero on error. Can be safely called with interrupts enabled.
1397 amr_std_poll_command(struct amr_command *ac)
1399 struct amr_softc *sc = ac->ac_sc;
1404 ac->ac_complete = NULL;
1405 if ((error = amr_start(ac)) != 0)
1411 * Poll for completion, although the interrupt handler may beat us to it.
1412 * Note that the timeout here is somewhat arbitrary.
1416 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1417 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1420 /* XXX the slot is now marked permanently busy */
1422 device_printf(sc->amr_dev, "polled command timeout\n");
1428 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1430 struct amr_command *ac = arg;
1431 struct amr_softc *sc = ac->ac_sc;
1435 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1436 ac->ac_status = AMR_STATUS_ABORTED;
1440 amr_setup_sg(arg, segs, nsegs, err);
1442 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1443 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1444 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1445 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1446 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1447 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1449 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1450 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1451 if (AC_IS_SG64(ac)) {
1453 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1456 sc->amr_poll_command1(sc, ac);
1459 /********************************************************************************
1460 * Take a command, submit it to the controller and busy-wait for it to return.
1461 * Returns nonzero on error. Can be safely called with interrupts enabled.
1464 amr_quartz_poll_command(struct amr_command *ac)
1466 struct amr_softc *sc = ac->ac_sc;
1473 if (AC_IS_SG64(ac)) {
1474 ac->ac_tag = sc->amr_buffer64_dmat;
1475 ac->ac_datamap = ac->ac_dma64map;
1477 ac->ac_tag = sc->amr_buffer_dmat;
1478 ac->ac_datamap = ac->ac_dmamap;
1481 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1482 if (ac->ac_data != NULL && ac->ac_length != 0) {
1483 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1484 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1488 error = amr_quartz_poll_command1(sc, ac);
1495 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1499 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
1500 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1502 while (sc->amr_busyslots) {
1503 lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz);
1509 if(sc->amr_busyslots) {
1510 device_printf(sc->amr_dev, "adapter is busy\n");
1511 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1512 if (ac->ac_data != NULL) {
1513 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1520 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1522 /* clear the poll/ack fields in the mailbox */
1523 sc->amr_mailbox->mb_ident = 0xFE;
1524 sc->amr_mailbox->mb_nstatus = 0xFF;
1525 sc->amr_mailbox->mb_status = 0xFF;
1526 sc->amr_mailbox->mb_poll = 0;
1527 sc->amr_mailbox->mb_ack = 0;
1528 sc->amr_mailbox->mb_busy = 1;
1530 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1532 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1534 while(sc->amr_mailbox->mb_status == 0xFF)
1536 ac->ac_status=sc->amr_mailbox->mb_status;
1537 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1538 while(sc->amr_mailbox->mb_poll != 0x77)
1540 sc->amr_mailbox->mb_poll = 0;
1541 sc->amr_mailbox->mb_ack = 0x77;
1543 /* acknowledge that we have the commands */
1544 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1545 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1547 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1549 /* unmap the command's data buffer */
1550 if (ac->ac_flags & AMR_CMD_DATAIN) {
1551 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1553 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1554 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1556 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1562 amr_freeslot(struct amr_command *ac)
1564 struct amr_softc *sc = ac->ac_sc;
1570 if (sc->amr_busycmd[slot] == NULL)
1571 panic("amr: slot %d not busy?", slot);
1573 sc->amr_busycmd[slot] = NULL;
1574 atomic_subtract_int(&sc->amr_busyslots, 1);
1579 /********************************************************************************
1580 * Map/unmap (ac)'s data in the controller's addressable space as required.
1582 * These functions may be safely called multiple times on a given command.
1585 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1587 struct amr_command *ac = (struct amr_command *)arg;
1588 struct amr_sgentry *sg;
1589 struct amr_sg64entry *sg64;
1594 /* get base address of s/g table */
1595 sg = ac->ac_sg.sg32;
1596 sg64 = ac->ac_sg.sg64;
1598 if (AC_IS_SG64(ac)) {
1599 ac->ac_nsegments = nsegments;
1600 ac->ac_mb_physaddr = 0xffffffff;
1601 for (i = 0; i < nsegments; i++, sg64++) {
1602 sg64->sg_addr = segs[i].ds_addr;
1603 sg64->sg_count = segs[i].ds_len;
1606 /* decide whether we need to populate the s/g table */
1607 if (nsegments < 2) {
1608 ac->ac_nsegments = 0;
1609 ac->ac_mb_physaddr = segs[0].ds_addr;
1611 ac->ac_nsegments = nsegments;
1612 ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1613 for (i = 0; i < nsegments; i++, sg++) {
1614 sg->sg_addr = segs[i].ds_addr;
1615 sg->sg_count = segs[i].ds_len;
1621 if (ac->ac_flags & AMR_CMD_DATAIN)
1622 flags |= BUS_DMASYNC_PREREAD;
1623 if (ac->ac_flags & AMR_CMD_DATAOUT)
1624 flags |= BUS_DMASYNC_PREWRITE;
1625 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1626 ac->ac_flags |= AMR_CMD_MAPPED;
1630 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1632 struct amr_command *ac = arg;
1633 struct amr_softc *sc = ac->ac_sc;
1637 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1642 amr_setup_sg(arg, segs, nsegs, err);
1644 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1645 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1646 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1647 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1648 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1649 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1651 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1652 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1653 if (AC_IS_SG64(ac)) {
1655 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1658 if (sc->amr_submit_command(ac) == EBUSY) {
1660 amr_requeue_ready(ac);
1665 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1667 struct amr_command *ac = arg;
1668 struct amr_softc *sc = ac->ac_sc;
1669 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1670 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1673 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1678 /* Set up the mailbox portion of the command to point at the ccb */
1679 ac->ac_mailbox.mb_nsgelem = 0;
1680 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1682 amr_setup_sg(arg, segs, nsegs, err);
1684 switch (ac->ac_mailbox.mb_command) {
1685 case AMR_CMD_EXTPASS:
1686 aep->ap_no_sg_elements = ac->ac_nsegments;
1687 aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1690 ap->ap_no_sg_elements = ac->ac_nsegments;
1691 ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1694 panic("Unknown ccb command");
1697 if (sc->amr_submit_command(ac) == EBUSY) {
1699 amr_requeue_ready(ac);
1704 amr_mapcmd(struct amr_command *ac)
1706 bus_dmamap_callback_t *cb;
1707 struct amr_softc *sc = ac->ac_sc;
1711 if (AC_IS_SG64(ac)) {
1712 ac->ac_tag = sc->amr_buffer64_dmat;
1713 ac->ac_datamap = ac->ac_dma64map;
1715 ac->ac_tag = sc->amr_buffer_dmat;
1716 ac->ac_datamap = ac->ac_dmamap;
1719 if (ac->ac_flags & AMR_CMD_CCB)
1722 cb = amr_setup_data;
1724 /* if the command involves data at all, and hasn't been mapped */
1725 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1726 /* map the data buffers into bus space and build the s/g list */
1727 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1728 ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1729 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1732 if (sc->amr_submit_command(ac) == EBUSY) {
1734 amr_requeue_ready(ac);
1742 amr_unmapcmd(struct amr_command *ac)
1748 /* if the command involved data at all and was mapped */
1749 if (ac->ac_flags & AMR_CMD_MAPPED) {
1751 if (ac->ac_data != NULL) {
1754 if (ac->ac_flags & AMR_CMD_DATAIN)
1755 flag |= BUS_DMASYNC_POSTREAD;
1756 if (ac->ac_flags & AMR_CMD_DATAOUT)
1757 flag |= BUS_DMASYNC_POSTWRITE;
1759 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1760 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1763 ac->ac_flags &= ~AMR_CMD_MAPPED;
1768 amr_abort_load(struct amr_command *ac)
1771 struct amr_softc *sc = ac->ac_sc;
1773 KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0);
1775 ac->ac_status = AMR_STATUS_ABORTED;
1776 amr_init_qhead(&head);
1777 amr_enqueue_completed(ac, &head);
1779 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1780 amr_complete(sc, &head);
1781 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1784 /********************************************************************************
1785 * Take a command and give it to the controller, returns 0 if successful, or
1786 * EBUSY if the command should be retried later.
1789 amr_start(struct amr_command *ac)
1791 struct amr_softc *sc;
1797 /* mark command as busy so that polling consumer can tell */
1799 ac->ac_flags |= AMR_CMD_BUSY;
1801 /* get a command slot (freed in amr_done) */
1803 if (sc->amr_busycmd[slot] != NULL)
1804 panic("amr: slot %d busy?", slot);
1805 sc->amr_busycmd[slot] = ac;
1806 atomic_add_int(&sc->amr_busyslots, 1);
1808 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1809 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1811 * Memroy resources are short, so free the slot and let this be tried
1820 /********************************************************************************
1821 * Extract one or more completed commands from the controller (sc)
1823 * Returns nonzero if any commands on the work queue were marked as completed.
1827 amr_done(struct amr_softc *sc)
1830 struct amr_command *ac;
1831 struct amr_mailbox mbox;
1836 /* See if there's anything for us to do */
1838 amr_init_qhead(&head);
1840 /* loop collecting completed commands */
1842 /* poll for a completed command's identifier and status */
1843 if (sc->amr_get_work(sc, &mbox)) {
1846 /* iterate over completed commands in this result */
1847 for (i = 0; i < mbox.mb_nstatus; i++) {
1848 /* get pointer to busy command */
1849 idx = mbox.mb_completed[i] - 1;
1850 ac = sc->amr_busycmd[idx];
1852 /* really a busy command? */
1855 /* pull the command from the busy index */
1858 /* save status for later use */
1859 ac->ac_status = mbox.mb_status;
1860 amr_enqueue_completed(ac, &head);
1861 debug(3, "completed command with status %x", mbox.mb_status);
1863 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1867 break; /* no work */
1870 /* handle completion and timeouts */
1871 amr_complete(sc, &head);
1876 /********************************************************************************
1877 * Do completion processing on done commands on (sc)
1881 amr_complete(void *context, ac_qhead_t *head)
1883 struct amr_softc *sc = (struct amr_softc *)context;
1884 struct amr_command *ac;
1888 /* pull completed commands off the queue */
1890 ac = amr_dequeue_completed(sc, head);
1894 /* unmap the command's data buffer */
1898 * Is there a completion handler?
1900 if (ac->ac_complete != NULL) {
1901 /* unbusy the command */
1902 ac->ac_flags &= ~AMR_CMD_BUSY;
1903 ac->ac_complete(ac);
1906 * Is someone sleeping on this one?
1909 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1910 ac->ac_flags &= ~AMR_CMD_BUSY;
1911 if (ac->ac_flags & AMR_CMD_SLEEP) {
1912 /* unbusy the command */
1915 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1918 if(!sc->amr_busyslots) {
1923 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1924 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1926 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1929 /********************************************************************************
1930 ********************************************************************************
1931 Command Buffer Management
1932 ********************************************************************************
1933 ********************************************************************************/
1935 /********************************************************************************
1936 * Get a new command buffer.
1938 * This may return NULL in low-memory cases.
1940 * If possible, we recycle a command buffer that's been used before.
1942 struct amr_command *
1943 amr_alloccmd(struct amr_softc *sc)
1945 struct amr_command *ac;
1949 ac = amr_dequeue_free(sc);
1951 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1955 /* clear out significant fields */
1957 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1961 ac->ac_complete = NULL;
1964 ac->ac_datamap = NULL;
1968 /********************************************************************************
1969 * Release a command buffer for recycling.
1972 amr_releasecmd(struct amr_command *ac)
1976 amr_enqueue_free(ac);
1979 /********************************************************************************
1980 * Allocate a new command cluster and initialise it.
1983 amr_alloccmd_cluster(struct amr_softc *sc)
1985 struct amr_command_cluster *acc;
1986 struct amr_command *ac;
1990 * If we haven't found the real limit yet, let us have a couple of
1991 * commands in order to be able to probe.
1993 if (sc->amr_maxio == 0)
1996 if (sc->amr_nextslot > sc->amr_maxio)
1998 acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
2000 nextslot = sc->amr_nextslot;
2001 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
2002 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2003 lockmgr(&sc->amr_list_lock, LK_RELEASE);
2004 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2005 ac = &acc->acc_command[i];
2007 ac->ac_slot = nextslot;
2010 * The SG table for each slot is a fixed size and is assumed to
2011 * to hold 64-bit s/g objects when the driver is configured to do
2012 * 64-bit DMA. 32-bit DMA commands still use the same table, but
2013 * cast down to 32-bit objects.
2015 if (AMR_IS_SG64(sc)) {
2016 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2017 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2018 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2020 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2021 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2022 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2025 ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
2026 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
2027 (ac->ac_slot * sizeof(union amr_ccb));
2029 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
2031 if (AMR_IS_SG64(sc) &&
2032 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
2035 if (++nextslot > sc->amr_maxio)
2038 sc->amr_nextslot = nextslot;
2042 /********************************************************************************
2043 * Free a command cluster
2046 amr_freecmd_cluster(struct amr_command_cluster *acc)
2048 struct amr_softc *sc = acc->acc_command[0].ac_sc;
2051 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2052 if (acc->acc_command[i].ac_sc == NULL)
2054 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2055 if (AMR_IS_SG64(sc))
2056 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2061 /********************************************************************************
2062 ********************************************************************************
2063 Interface-specific Shims
2064 ********************************************************************************
2065 ********************************************************************************/
2067 /********************************************************************************
2068 * Tell the controller that the mailbox contains a valid command
2071 amr_quartz_submit_command(struct amr_command *ac)
2073 struct amr_softc *sc = ac->ac_sc;
2074 static struct timeval lastfail;
2078 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2079 while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2081 /* This is a no-op read that flushes pending mailbox updates */
2084 if (sc->amr_mailbox->mb_busy) {
2085 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2086 if (ac->ac_retries++ > 1000) {
2087 if (ppsratecheck(&lastfail, &curfail, 1))
2088 device_printf(sc->amr_dev, "Too many retries on command %p. "
2089 "Controller is likely dead\n", ac);
2096 * Save the slot number so that we can locate this command when complete.
2097 * Note that ident = 0 seems to be special, so we don't use it.
2099 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2100 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2101 sc->amr_mailbox->mb_busy = 1;
2102 sc->amr_mailbox->mb_poll = 0;
2103 sc->amr_mailbox->mb_ack = 0;
2104 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2105 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2107 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2108 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2113 amr_std_submit_command(struct amr_command *ac)
2115 struct amr_softc *sc = ac->ac_sc;
2116 static struct timeval lastfail;
2119 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2120 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2121 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2122 if (ac->ac_retries++ > 1000) {
2123 if (ppsratecheck(&lastfail, &curfail, 1))
2124 device_printf(sc->amr_dev, "Too many retries on command %p. "
2125 "Controller is likely dead\n", ac);
2132 * Save the slot number so that we can locate this command when complete.
2133 * Note that ident = 0 seems to be special, so we don't use it.
2135 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2136 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2137 sc->amr_mailbox->mb_busy = 1;
2138 sc->amr_mailbox->mb_poll = 0;
2139 sc->amr_mailbox->mb_ack = 0;
2141 AMR_SPOST_COMMAND(sc);
2142 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2146 /********************************************************************************
2147 * Claim any work that the controller has completed; acknowledge completion,
2148 * save details of the completion in (mbsave)
2151 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2156 u_int8_t completed[46];
2162 /* work waiting for us? */
2163 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2165 /* acknowledge interrupt */
2166 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2168 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2170 sc->amr_mailbox->mb_nstatus = 0xff;
2172 /* wait until fw wrote out all completions */
2173 for (i = 0; i < nstatus; i++) {
2174 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2176 sc->amr_mailbox->mb_completed[i] = 0xff;
2179 /* Save information for later processing */
2180 mbsave->mb_nstatus = nstatus;
2181 mbsave->mb_status = sc->amr_mailbox->mb_status;
2182 sc->amr_mailbox->mb_status = 0xff;
2184 for (i = 0; i < nstatus; i++)
2185 mbsave->mb_completed[i] = completed[i];
2187 /* acknowledge that we have the commands */
2188 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2191 #ifndef AMR_QUARTZ_GOFASTER
2193 * This waits for the controller to notice that we've taken the
2194 * command from it. It's very inefficient, and we shouldn't do it,
2195 * but if we remove this code, we stop completing commands under
2198 * Peter J says we shouldn't do this. The documentation says we
2199 * should. Who is right?
2201 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2202 ; /* XXX aiee! what if it dies? */
2206 worked = 1; /* got some work */
2213 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2222 /* check for valid interrupt status */
2223 istat = AMR_SGET_ISTAT(sc);
2224 if ((istat & AMR_SINTR_VALID) != 0) {
2225 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2227 /* save mailbox, which contains a list of completed commands */
2228 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2230 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2237 /********************************************************************************
2238 * Notify the controller of the mailbox location.
2241 amr_std_attach_mailbox(struct amr_softc *sc)
2244 /* program the mailbox physical address */
2245 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2246 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2247 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2248 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2249 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2251 /* clear any outstanding interrupt and enable interrupts proper */
2252 AMR_SACK_INTERRUPT(sc);
2253 AMR_SENABLE_INTR(sc);
2256 #ifdef AMR_BOARD_INIT
2257 /********************************************************************************
2258 * Initialise the controller
2261 amr_quartz_init(struct amr_softc *sc)
2263 int status, ostatus;
2265 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2270 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2271 if (status != ostatus) {
2272 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2276 case AMR_QINIT_NOMEM:
2279 case AMR_QINIT_SCAN:
2280 /* XXX we could print channel/target here */
2288 amr_std_init(struct amr_softc *sc)
2290 int status, ostatus;
2292 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2297 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2298 if (status != ostatus) {
2299 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2303 case AMR_SINIT_NOMEM:
2306 case AMR_SINIT_INPROG:
2307 /* XXX we could print channel/target here? */
2315 /********************************************************************************
2316 ********************************************************************************
2318 ********************************************************************************
2319 ********************************************************************************/
2321 /********************************************************************************
2322 * Identify the controller and print some information about it.
2325 amr_describe_controller(struct amr_softc *sc)
2327 struct amr_prodinfo *ap;
2328 struct amr_enquiry *ae;
2333 * Try to get 40LD product info, which tells us what the card is labelled as.
2335 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2336 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2337 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2345 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2347 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2348 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2350 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2353 * Try to work it out based on the PCI signatures.
2355 switch (pci_get_device(sc->amr_dev)) {
2357 prod = "Series 428";
2360 prod = "Series 434";
2363 prod = "unknown controller";
2367 device_printf(sc->amr_dev, "<unsupported controller>\n");
2372 * HP NetRaid controllers have a special encoding of the firmware and
2373 * BIOS versions. The AMI version seems to have it as strings whereas
2374 * the HP version does it with a leading uppercase character and two
2378 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2379 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2380 ae->ae_adapter.aa_firmware[1] < ' ' &&
2381 ae->ae_adapter.aa_firmware[0] < ' ' &&
2382 ae->ae_adapter.aa_bios[2] >= 'A' &&
2383 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2384 ae->ae_adapter.aa_bios[1] < ' ' &&
2385 ae->ae_adapter.aa_bios[0] < ' ') {
2387 /* this looks like we have an HP NetRaid version of the MegaRaid */
2389 if(ae->ae_signature == AMR_SIG_438) {
2390 /* the AMI 438 is a NetRaid 3si in HP-land */
2391 prod = "HP NetRaid 3si";
2394 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2395 prod, ae->ae_adapter.aa_firmware[2],
2396 ae->ae_adapter.aa_firmware[1],
2397 ae->ae_adapter.aa_firmware[0],
2398 ae->ae_adapter.aa_bios[2],
2399 ae->ae_adapter.aa_bios[1],
2400 ae->ae_adapter.aa_bios[0],
2401 ae->ae_adapter.aa_memorysize);
2403 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2404 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2405 ae->ae_adapter.aa_memorysize);
2411 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2413 struct amr_command *ac;
2418 sc->amr_state |= AMR_STATE_INTEN;
2420 /* get ourselves a command buffer */
2421 if ((ac = amr_alloccmd(sc)) == NULL)
2423 /* set command flags */
2424 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2426 /* point the command at our data */
2428 ac->ac_length = blks * AMR_BLKSIZE;
2430 /* build the command proper */
2431 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2432 ac->ac_mailbox.mb_blkcount = blks;
2433 ac->ac_mailbox.mb_lba = lba;
2434 ac->ac_mailbox.mb_drive = unit;
2436 /* can't assume that interrupts are going to work here, so play it safe */
2437 if (sc->amr_poll_command(ac))
2439 error = ac->ac_status;
2445 sc->amr_state &= ~AMR_STATE_INTEN;
2452 /********************************************************************************
2453 * Print the command (ac) in human-readable format
2457 amr_printcommand(struct amr_command *ac)
2459 struct amr_softc *sc = ac->ac_sc;
2460 struct amr_sgentry *sg;
2463 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2464 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2465 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2466 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2467 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2468 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2469 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2470 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2472 /* get base address of s/g table */
2473 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2474 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2475 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);