2 * Copyright (c) 2003-04 3ware, Inc.
3 * Copyright (c) 2000 Michael Smith
4 * Copyright (c) 2000 BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $DragonFly: src/sys/dev/raid/twa/twa_freebsd.c,v 1.9 2005/10/12 17:35:54 dillon Exp $
33 * 3ware driver for 9000 series storage controllers.
35 * Author: Vinod Kashyap
39 #include "twa_includes.h"
41 static void twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
42 int nsegments, int error);
43 static void twa_setup_request_dmamap(void *arg, bus_dma_segment_t *segs,
44 int nsegments, int error);
46 MALLOC_DEFINE(TWA_MALLOC_CLASS, "twa commands", "twa commands");
49 static d_open_t twa_open;
50 static d_close_t twa_close;
51 static d_ioctl_t twa_ioctl_wrapper;
53 static struct cdevsw twa_cdevsw = {
71 static devclass_t twa_devclass;
75 * Function name: twa_open
76 * Description: Called when the controller is opened.
77 * Simply marks the controller as open.
79 * Input: dev -- control device corresponding to the ctlr
80 * flags -- mode of open
81 * fmt -- device type (character/block etc.)
82 * proc -- current process
84 * Return value: 0 -- success
88 twa_open(dev_t dev, int flags, int fmt, d_thread_t *proc)
90 int unit = minor(dev);
91 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
93 sc->twa_state |= TWA_STATE_OPEN;
100 * Function name: twa_close
101 * Description: Called when the controller is closed.
102 * Simply marks the controller as not open.
104 * Input: dev -- control device corresponding to the ctlr
105 * flags -- mode of corresponding open
106 * fmt -- device type (character/block etc.)
107 * proc -- current process
109 * Return value: 0 -- success
113 twa_close(dev_t dev, int flags, int fmt, d_thread_t *proc)
115 int unit = minor(dev);
116 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
118 sc->twa_state &= ~TWA_STATE_OPEN;
125 * Function name: twa_ioctl_wrapper
126 * Description: Called when an ioctl is posted to the controller.
127 * Simply calls the ioctl handler.
129 * Input: dev -- control device corresponding to the ctlr
131 * buf -- ptr to buffer in kernel memory, which is
132 * a copy of the input buffer in user-space
133 * flags -- mode of corresponding open
134 * proc -- current process
135 * Output: buf -- ptr to buffer in kernel memory, which will
136 * be copied to the output buffer in user-space
137 * Return value: 0 -- success
141 twa_ioctl_wrapper(dev_t dev, u_long cmd, caddr_t buf,
142 int flags, d_thread_t *proc)
144 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
146 return(twa_ioctl(sc, cmd, buf));
151 static int twa_probe (device_t dev);
152 static int twa_attach (device_t dev);
153 static void twa_free (struct twa_softc *sc);
154 static int twa_detach (device_t dev);
155 static int twa_shutdown (device_t dev);
156 static int twa_suspend (device_t dev);
157 static int twa_resume (device_t dev);
158 static void twa_pci_intr(void *arg);
159 static void twa_intrhook (void *arg);
161 static device_method_t twa_methods[] = {
162 /* Device interface */
163 DEVMETHOD(device_probe, twa_probe),
164 DEVMETHOD(device_attach, twa_attach),
165 DEVMETHOD(device_detach, twa_detach),
166 DEVMETHOD(device_shutdown, twa_shutdown),
167 DEVMETHOD(device_suspend, twa_suspend),
168 DEVMETHOD(device_resume, twa_resume),
170 DEVMETHOD(bus_print_child, bus_generic_print_child),
171 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
175 static driver_t twa_pci_driver = {
178 sizeof(struct twa_softc)
181 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
186 * Function name: twa_probe
187 * Description: Called at driver load time. Claims 9000 ctlrs.
189 * Input: dev -- bus device corresponding to the ctlr
191 * Return value: <= 0 -- success
195 twa_probe(device_t dev)
197 static u_int8_t first_ctlr = 1;
199 twa_dbg_print(3, "entered");
201 if ((pci_get_vendor(dev) == TWA_VENDOR_ID) &&
202 (pci_get_device(dev) == TWA_DEVICE_ID_9K)) {
203 device_set_desc(dev, TWA_DEVICE_NAME);
204 /* Print the driver version only once. */
206 printf("3ware device driver for 9000 series storage controllers, version: %s\n",
207 TWA_DRIVER_VERSION_STRING);
218 * Function name: twa_attach
219 * Description: Allocates pci resources; updates sc; adds a node to the
220 * sysctl tree to expose the driver version; makes calls
221 * to initialize ctlr, and to attach to CAM.
223 * Input: dev -- bus device corresponding to the ctlr
225 * Return value: 0 -- success
229 twa_attach(device_t dev)
231 struct twa_softc *sc = device_get_softc(dev);
237 twa_dbg_dprint_enter(3, sc);
239 /* Initialize the softc structure. */
240 sc->twa_bus_dev = dev;
242 sysctl_ctx_init(&sc->twa_sysctl_ctx);
243 sc->twa_sysctl_tree = SYSCTL_ADD_NODE(&sc->twa_sysctl_ctx,
244 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
245 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
246 if (sc->twa_sysctl_tree == NULL) {
247 twa_printf(sc, "Cannot add sysctl tree node.\n");
250 SYSCTL_ADD_STRING(&sc->twa_sysctl_ctx, SYSCTL_CHILDREN(sc->twa_sysctl_tree),
251 OID_AUTO, "driver_version", CTLFLAG_RD,
252 TWA_DRIVER_VERSION_STRING, 0, "TWA driver version");
254 /* Make sure we are going to be able to talk to this board. */
255 command = pci_read_config(dev, PCIR_COMMAND, 2);
256 if ((command & PCIM_CMD_PORTEN) == 0) {
257 twa_printf(sc, "Register window not available.\n");
261 /* Force the busmaster enable bit on, in case the BIOS forgot. */
262 command |= PCIM_CMD_BUSMASTEREN;
263 pci_write_config(dev, PCIR_COMMAND, command, 2);
265 /* Allocate the PCI register window. */
266 res_id = TWA_IO_CONFIG_REG;
267 if ((sc->twa_io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &res_id,
268 0, ~0, 1, RF_ACTIVE)) == NULL) {
269 twa_printf(sc, "can't allocate register window.\n");
273 sc->twa_bus_tag = rman_get_bustag(sc->twa_io_res);
274 sc->twa_bus_handle = rman_get_bushandle(sc->twa_io_res);
276 /* Allocate and connect our interrupt. */
278 if ((sc->twa_irq_res = bus_alloc_resource(sc->twa_bus_dev, SYS_RES_IRQ,
280 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
281 twa_printf(sc, "Can't allocate interrupt.\n");
285 if (bus_setup_intr(sc->twa_bus_dev, sc->twa_irq_res, 0,
286 twa_pci_intr, sc, &sc->twa_intr_handle, NULL)) {
287 twa_printf(sc, "Can't set up interrupt.\n");
292 /* Initialize the driver for this controller. */
293 if ((error = twa_setup(sc))) {
298 /* Print some information about the controller and configuration. */
299 twa_describe_controller(sc);
301 /* Create the control device. */
302 cdevsw_add(&twa_cdevsw, -1, device_get_unit(sc->twa_bus_dev));
303 xdev = make_dev(&twa_cdevsw, device_get_unit(sc->twa_bus_dev),
304 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
305 "twa%d", device_get_unit(sc->twa_bus_dev));
309 * Schedule ourselves to bring the controller up once interrupts are
310 * available. This isn't strictly necessary, since we disable
311 * interrupts while probing the controller, but it is more in keeping
312 * with common practice for other disk devices.
314 sc->twa_ich.ich_func = twa_intrhook;
315 sc->twa_ich.ich_arg = sc;
316 sc->twa_ich.ich_desc = "twa";
317 if (config_intrhook_establish(&sc->twa_ich) != 0) {
318 twa_printf(sc, "Can't establish configuration hook.\n");
323 if ((error = twa_cam_setup(sc))) {
333 * Function name: twa_free
334 * Description: Performs clean-up at the time of going down.
336 * Input: sc -- ptr to per ctlr structure
341 twa_free(struct twa_softc *sc)
343 struct twa_request *tr;
345 twa_dbg_dprint_enter(3, sc);
347 /* Detach from CAM */
350 /* Destroy dma handles. */
352 bus_dmamap_unload(sc->twa_dma_tag, sc->twa_cmd_map);
353 while ((tr = twa_dequeue_free(sc)) != NULL)
354 bus_dmamap_destroy(sc->twa_dma_tag, tr->tr_dma_map);
356 /* Free all memory allocated so far. */
358 free(sc->twa_req_buf, TWA_MALLOC_CLASS);
359 if (sc->twa_cmd_pkt_buf)
360 bus_dmamem_free(sc->twa_dma_tag, sc->twa_cmd_pkt_buf,
362 if (sc->twa_aen_queue[0])
363 free (sc->twa_aen_queue[0], M_DEVBUF);
365 /* Destroy the data-transfer DMA tag. */
367 bus_dma_tag_destroy(sc->twa_dma_tag);
369 /* Disconnect the interrupt handler. */
370 if (sc->twa_intr_handle)
371 bus_teardown_intr(sc->twa_bus_dev, sc->twa_irq_res,
372 sc->twa_intr_handle);
373 if (sc->twa_irq_res != NULL)
374 bus_release_resource(sc->twa_bus_dev, SYS_RES_IRQ,
377 /* Release the register window mapping. */
378 if (sc->twa_io_res != NULL)
379 bus_release_resource(sc->twa_bus_dev, SYS_RES_IOPORT,
380 TWA_IO_CONFIG_REG, sc->twa_io_res);
382 cdevsw_remove(&twa_cdevsw, -1, device_get_unit(sc->twa_bus_dev));
384 sysctl_ctx_free(&sc->twa_sysctl_ctx);
390 * Function name: twa_detach
391 * Description: Called when the controller is being detached from
394 * Input: dev -- bus device corresponding to the ctlr
396 * Return value: 0 -- success
400 twa_detach(device_t dev)
402 struct twa_softc *sc = device_get_softc(dev);
405 twa_dbg_dprint_enter(3, sc);
409 if (sc->twa_state & TWA_STATE_OPEN)
412 /* Shut the controller down. */
413 if ((error = twa_shutdown(dev)))
416 /* Free all resources associated with this controller. */
428 * Function name: twa_shutdown
429 * Description: Called at unload/shutdown time. Lets the controller
430 * know that we are going down.
432 * Input: dev -- bus device corresponding to the ctlr
434 * Return value: 0 -- success
438 twa_shutdown(device_t dev)
440 struct twa_softc *sc = device_get_softc(dev);
443 twa_dbg_dprint_enter(3, sc);
447 /* Disconnect from the controller. */
448 error = twa_deinit_ctlr(sc);
457 * Function name: twa_suspend
458 * Description: Called to suspend I/O before hot-swapping PCI ctlrs.
459 * Doesn't do much as of now.
461 * Input: dev -- bus device corresponding to the ctlr
463 * Return value: 0 -- success
467 twa_suspend(device_t dev)
469 struct twa_softc *sc = device_get_softc(dev);
471 twa_dbg_dprint_enter(3, sc);
474 sc->twa_state |= TWA_STATE_SUSPEND;
476 twa_disable_interrupts(sc);
485 * Function name: twa_resume
486 * Description: Called to resume I/O after hot-swapping PCI ctlrs.
487 * Doesn't do much as of now.
489 * Input: dev -- bus device corresponding to the ctlr
491 * Return value: 0 -- success
495 twa_resume(device_t dev)
497 struct twa_softc *sc = device_get_softc(dev);
499 twa_dbg_dprint_enter(3, sc);
501 sc->twa_state &= ~TWA_STATE_SUSPEND;
502 twa_enable_interrupts(sc);
510 * Function name: twa_pci_intr
511 * Description: Interrupt handler. Wrapper for twa_interrupt.
513 * Input: arg -- ptr to per ctlr structure
518 twa_pci_intr(void *arg)
520 struct twa_softc *sc = (struct twa_softc *)arg;
528 * Function name: twa_intrhook
529 * Description: Callback for us to enable interrupts.
531 * Input: arg -- ptr to per ctlr structure
536 twa_intrhook(void *arg)
538 struct twa_softc *sc = (struct twa_softc *)arg;
540 twa_dbg_dprint(4, sc, "twa_intrhook Entered");
542 /* Pull ourselves off the intrhook chain. */
543 config_intrhook_disestablish(&sc->twa_ich);
545 /* Enable interrupts. */
546 twa_enable_interrupts(sc);
552 * Function name: twa_write_pci_config
553 * Description: Writes to the PCI config space.
555 * Input: sc -- ptr to per ctlr structure
556 * value -- value to be written
557 * size -- # of bytes to be written
562 twa_write_pci_config(struct twa_softc *sc, u_int32_t value, int size)
564 pci_write_config(sc->twa_bus_dev, PCIR_STATUS, value, size);
570 * Function name: twa_alloc_req_pkts
571 * Description: Allocates memory for, and initializes request pkts,
572 * and queues them in the free queue.
574 * Input: sc -- ptr to per ctlr structure
575 * num_reqs-- # of request pkts to allocate and initialize.
577 * Return value: 0 -- success
581 twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
583 struct twa_request *tr;
586 sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request),
587 TWA_MALLOC_CLASS, M_INTWAIT);
589 /* Allocate the bus DMA tag appropriate for PCI. */
590 if (bus_dma_tag_create(NULL, /* parent */
591 TWA_ALIGNMENT, /* alignment */
593 BUS_SPACE_MAXADDR, /* lowaddr */
594 BUS_SPACE_MAXADDR, /* highaddr */
595 NULL, NULL, /* filter, filterarg */
597 (sizeof(struct twa_command_packet)),/* maxsize */
598 TWA_MAX_SG_ELEMENTS, /* nsegments */
599 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
600 BUS_DMA_ALLOCNOW, /* flags */
601 &sc->twa_dma_tag /* tag */)) {
602 twa_printf(sc, "Can't allocate DMA tag.\n");
606 /* Allocate memory for cmd pkts. */
607 if (bus_dmamem_alloc(sc->twa_dma_tag,
608 (void *)(&(sc->twa_cmd_pkt_buf)),
609 BUS_DMA_WAITOK, &(sc->twa_cmd_map)))
612 bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
614 num_reqs * sizeof(struct twa_command_packet),
615 twa_setup_request_dmamap, sc, 0);
616 bzero(sc->twa_req_buf, num_reqs * sizeof(struct twa_request));
617 bzero(sc->twa_cmd_pkt_buf,
618 num_reqs * sizeof(struct twa_command_packet));
620 for (i = 0; i < num_reqs; i++) {
621 tr = &(sc->twa_req_buf[i]);
622 tr->tr_command = &(sc->twa_cmd_pkt_buf[i]);
623 tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
624 (i * sizeof(struct twa_command_packet));
625 tr->tr_request_id = i;
627 sc->twa_lookup[i] = tr;
630 * Create a map for data buffers. maxsize (256 * 1024) used in
631 * bus_dma_tag_create above should suffice the bounce page needs
632 * for data buffers, since the max I/O size we support is 128KB.
633 * If we supported I/O's bigger than 256KB, we would have to
634 * create a second dma_tag, with the appropriate maxsize.
636 if (bus_dmamap_create(sc->twa_dma_tag, 0,
640 /* Insert request into the free queue. */
641 twa_release_request(tr);
649 * Function name: twa_fillin_sgl
650 * Description: Fills in the scatter/gather list.
652 * Input: sgl -- ptr to sg list
653 * segs -- ptr to fill the sg list from
654 * nsegments--# of segments
659 twa_fillin_sgl(struct twa_sg *sgl, bus_dma_segment_t *segs, int nsegments)
663 for (i = 0; i < nsegments; i++) {
664 sgl[i].address = segs[i].ds_addr;
665 sgl[i].length = segs[i].ds_len;
672 * Function name: twa_setup_data_dmamap
673 * Description: Callback of bus_dmamap_load for the buffer associated
674 * with data. Updates the cmd pkt (size/sgl_entries
675 * fields, as applicable) to reflect the number of sg
678 * Input: arg -- ptr to request pkt
679 * segs -- ptr to a list of segment descriptors
680 * nsegments--# of segments
681 * error -- 0 if no errors encountered before callback,
682 * non-zero if errors were encountered
687 twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
688 int nsegments, int error)
690 struct twa_request *tr = (struct twa_request *)arg;
691 struct twa_command_packet *cmdpkt = tr->tr_command;
692 struct twa_command_9k *cmd9k;
693 union twa_command_7k *cmd7k;
696 twa_dbg_dprint_enter(10, tr->tr_sc);
698 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) &&
699 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL))
700 twa_allow_new_requests(tr->tr_sc, (void *)(tr->tr_private));
702 if (error == EFBIG) {
703 tr->tr_error = error;
707 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
708 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
709 twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
710 cmd9k->sgl_entries += nsegments - 1;
712 /* It's a 7000 command packet. */
713 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
714 if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
715 twa_fillin_sgl((struct twa_sg *)
716 (((u_int32_t *)cmd7k) + sgl_offset),
718 /* Modify the size field, based on sg address size. */
719 cmd7k->generic.size +=
720 ((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
723 if (tr->tr_flags & TWA_CMD_DATA_IN)
724 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
725 BUS_DMASYNC_PREREAD);
726 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
728 * If we're using an alignment buffer, and we're
729 * writing data, copy the real data out.
731 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
732 bcopy(tr->tr_real_data, tr->tr_data, tr->tr_real_length);
733 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
734 BUS_DMASYNC_PREWRITE);
736 error = twa_submit_io(tr);
740 twa_unmap_request(tr);
742 * If the caller had been returned EINPROGRESS, and he has
743 * registered a callback for handling completion, the callback
744 * will never get called because we were unable to submit the
745 * request. So, free up the request right here.
747 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
748 twa_release_request(tr);
755 * Function name: twa_setup_request_dmamap
756 * Description: Callback of bus_dmamap_load for the buffer associated
759 * Input: arg -- ptr to request pkt
760 * segs -- ptr to a list of segment descriptors
761 * nsegments--# of segments
762 * error -- 0 if no errors encountered before callback,
763 * non-zero if errors were encountered
768 twa_setup_request_dmamap(void *arg, bus_dma_segment_t *segs,
769 int nsegments, int error)
771 struct twa_softc *sc = (struct twa_softc *)arg;
773 twa_dbg_dprint_enter(10, sc);
775 sc->twa_cmd_pkt_phys = segs[0].ds_addr;
781 * Function name: twa_map_request
782 * Description: Maps a cmd pkt and data associated with it, into
785 * Input: tr -- ptr to request pkt
787 * Return value: 0 -- success
791 twa_map_request(struct twa_request *tr)
793 struct twa_softc *sc = tr->tr_sc;
796 twa_dbg_dprint_enter(10, sc);
798 /* If the command involves data, map that too. */
799 if (tr->tr_data != NULL) {
801 * It's sufficient for the data pointer to be 4-byte aligned
802 * to work with 9000. However, if 4-byte aligned addresses
803 * are passed to bus_dmamap_load, we can get back sg elements
804 * that are not 512-byte multiples in size. So, we will let
805 * only those buffers that are 512-byte aligned to pass
806 * through, and bounce the rest, so as to make sure that we
807 * always get back sg elements that are 512-byte multiples
810 * DragonFly's malloc only guarentees X alignment when X is
811 * a power of 2, otherwise we would have to use contigalloc,
812 * which is nasty. Use malloc.
814 if (((vm_offset_t)tr->tr_data % 512) || (tr->tr_length % 512)) {
815 tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED;
816 tr->tr_real_data = tr->tr_data; /* save original data pointer */
817 tr->tr_real_length = tr->tr_length; /* save original data length */
819 while (tr->tr_length < tr->tr_real_length)
821 tr->tr_data = malloc(tr->tr_length, TWA_MALLOC_CLASS, M_INTWAIT);
825 * Map the data buffer into bus space and build the s/g list.
827 if ((error = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map,
828 tr->tr_data, tr->tr_length,
829 twa_setup_data_dmamap, tr,
831 if (error == EINPROGRESS) {
832 tr->tr_flags |= TWA_CMD_IN_PROGRESS;
833 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL)
834 twa_disallow_new_requests(sc);
837 /* Free alignment buffer if it was used. */
838 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
839 free(tr->tr_data, TWA_MALLOC_CLASS);
840 tr->tr_data = tr->tr_real_data; /* restore 'real' data pointer */
841 tr->tr_length = tr->tr_real_length;/* restore 'real' data length */
845 error = tr->tr_error;
848 if ((error = twa_submit_io(tr)))
849 twa_unmap_request(tr);
857 * Function name: twa_unmap_request
858 * Description: Undoes the mapping done by twa_map_request.
860 * Input: tr -- ptr to request pkt
865 twa_unmap_request(struct twa_request *tr)
867 struct twa_softc *sc = tr->tr_sc;
870 twa_dbg_dprint_enter(10, sc);
872 /* If the command involved data, unmap that too. */
873 if (tr->tr_data != NULL) {
874 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K)
875 cmd_status = tr->tr_command->command.cmd_pkt_9k.status;
877 cmd_status = tr->tr_command->command.cmd_pkt_7k.generic.status;
879 if (tr->tr_flags & TWA_CMD_DATA_IN) {
880 bus_dmamap_sync(sc->twa_dma_tag,
881 tr->tr_dma_map, BUS_DMASYNC_POSTREAD);
884 * If we are using a bounce buffer, and we are reading
885 * data, copy the real data in.
887 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
889 bcopy(tr->tr_data, tr->tr_real_data,
892 if (tr->tr_flags & TWA_CMD_DATA_OUT)
893 bus_dmamap_sync(sc->twa_dma_tag, tr->tr_dma_map,
894 BUS_DMASYNC_POSTWRITE);
896 bus_dmamap_unload(sc->twa_dma_tag, tr->tr_dma_map);
899 /* Free alignment buffer if it was used. */
900 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
901 free(tr->tr_data, TWA_MALLOC_CLASS);
902 tr->tr_data = tr->tr_real_data; /* restore 'real' data pointer */
903 tr->tr_length = tr->tr_real_length;/* restore 'real' data length */
910 void twa_report(void);
911 void twa_reset_stats(void);
912 void twa_print_request(struct twa_request *tr, int req_type);
917 * Function name: twa_report
918 * Description: For being called from ddb. Prints controller stats,
919 * and requests, if any, that are in the wrong queue.
928 struct twa_softc *sc;
929 struct twa_request *tr;
933 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
934 twa_print_controller(sc);
935 TAILQ_FOREACH(tr, &sc->twa_busy, tr_link)
936 twa_print_request(tr, TWA_CMD_BUSY);
937 TAILQ_FOREACH(tr, &sc->twa_complete, tr_link)
938 twa_print_request(tr, TWA_CMD_COMPLETE);
946 * Function name: twa_reset_stats
947 * Description: For being called from ddb.
948 * Resets some controller stats.
955 twa_reset_stats(void)
957 struct twa_softc *sc;
961 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
962 sc->twa_qstats[TWAQ_FREE].q_max = 0;
963 sc->twa_qstats[TWAQ_BUSY].q_max = 0;
964 sc->twa_qstats[TWAQ_PENDING].q_max = 0;
965 sc->twa_qstats[TWAQ_COMPLETE].q_max = 0;
973 * Function name: twa_print_request
974 * Description: Prints a given request if it's in the wrong queue.
976 * Input: tr -- ptr to request pkt
977 * req_type-- expected status of the given request
982 twa_print_request(struct twa_request *tr, int req_type)
984 struct twa_softc *sc = tr->tr_sc;
985 struct twa_command_packet *cmdpkt = tr->tr_command;
986 struct twa_command_9k *cmd9k;
987 union twa_command_7k *cmd7k;
991 if (tr->tr_status != req_type) {
992 twa_printf(sc, "Invalid %s request %p in queue! req_type = %x, queue_type = %x\n",
993 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ? "INTERNAL" : "EXTERNAL",
994 tr, tr->tr_status, req_type);
996 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
997 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
998 cmd_phys_addr = cmd9k->sg_list[0].address;
999 twa_printf(sc, "9K cmd = %x %x %x %x %x %x %x %x %x\n",
1000 cmd9k->command.opcode,
1001 cmd9k->command.reserved,
1008 cmd9k->sg_list[0].length);
1009 cdb = (u_int8_t *)(cmdpkt->command.cmd_pkt_9k.cdb);
1010 twa_printf(sc, "cdb = %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
1011 cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7],
1012 cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]);
1014 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
1015 twa_printf(sc, "7K cmd = %x %x %x %x %x %x %x %x %x\n",
1016 cmd7k->generic.opcode,
1017 cmd7k->generic.sgl_offset,
1018 cmd7k->generic.size,
1019 cmd7k->generic.request_id,
1020 cmd7k->generic.unit,
1021 cmd7k->generic.host_id,
1022 cmd7k->generic.status,
1023 cmd7k->generic.flags,
1024 cmd7k->generic.count);
1027 cmd_phys_addr = (int)(tr->tr_cmd_phys);
1028 twa_printf(sc, "cmdphys=0x%x data=%p length=0x%x\n",
1029 cmd_phys_addr, tr->tr_data, tr->tr_length);
1030 twa_printf(sc, "req_id=0x%x flags=0x%x callback=%p private=%p\n",
1031 tr->tr_request_id, tr->tr_flags,
1032 tr->tr_callback, tr->tr_private);