DEVFS - remove dev_ops_add(), dev_ops_get(), and get_dev()
[dragonfly.git] / sys / dev / raid / twa / twa_freebsd.c
CommitLineData
ab443496
DR
1/*-
2 * Copyright (c) 2003-04 3ware, Inc.
3 * Copyright (c) 2000 Michael Smith
4 * Copyright (c) 2000 BSDi
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
e3869ec7 29 * $DragonFly: src/sys/dev/raid/twa/twa_freebsd.c,v 1.14 2006/12/22 23:26:24 swildner Exp $
ab443496
DR
30 */
31
32/*
33 * 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 */
37
38
39#include "twa_includes.h"
40
41static void twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
42 int nsegments, int error);
43static void twa_setup_request_dmamap(void *arg, bus_dma_segment_t *segs,
44 int nsegments, int error);
45
46MALLOC_DEFINE(TWA_MALLOC_CLASS, "twa commands", "twa commands");
47
48
49static d_open_t twa_open;
50static d_close_t twa_close;
51static d_ioctl_t twa_ioctl_wrapper;
52
fef8985e
MD
53static struct dev_ops twa_ops = {
54 { "twa", TWA_CDEV_MAJOR, 0 },
55 .d_open = twa_open,
56 .d_close = twa_close,
57 .d_ioctl = twa_ioctl_wrapper,
ab443496
DR
58};
59
60static devclass_t twa_devclass;
61
62
63/*
64 * Function name: twa_open
65 * Description: Called when the controller is opened.
66 * Simply marks the controller as open.
67 *
68 * Input: dev -- control device corresponding to the ctlr
69 * flags -- mode of open
70 * fmt -- device type (character/block etc.)
71 * proc -- current process
72 * Output: None
73 * Return value: 0 -- success
74 * non-zero-- failure
75 */
76static int
fef8985e 77twa_open(struct dev_open_args *ap)
ab443496 78{
b13267a5 79 cdev_t dev = ap->a_head.a_dev;
ab443496
DR
80 int unit = minor(dev);
81 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
82
83 sc->twa_state |= TWA_STATE_OPEN;
84 return(0);
85}
86
87
88
89/*
90 * Function name: twa_close
91 * Description: Called when the controller is closed.
92 * Simply marks the controller as not open.
93 *
94 * Input: dev -- control device corresponding to the ctlr
95 * flags -- mode of corresponding open
96 * fmt -- device type (character/block etc.)
97 * proc -- current process
98 * Output: None
99 * Return value: 0 -- success
100 * non-zero-- failure
101 */
102static int
fef8985e 103twa_close(struct dev_close_args *ap)
ab443496 104{
b13267a5 105 cdev_t dev = ap->a_head.a_dev;
ab443496
DR
106 int unit = minor(dev);
107 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
108
109 sc->twa_state &= ~TWA_STATE_OPEN;
110 return(0);
111}
112
113
114
115/*
116 * Function name: twa_ioctl_wrapper
117 * Description: Called when an ioctl is posted to the controller.
118 * Simply calls the ioctl handler.
119 *
120 * Input: dev -- control device corresponding to the ctlr
121 * cmd -- ioctl cmd
122 * buf -- ptr to buffer in kernel memory, which is
123 * a copy of the input buffer in user-space
124 * flags -- mode of corresponding open
125 * proc -- current process
126 * Output: buf -- ptr to buffer in kernel memory, which will
127 * be copied to the output buffer in user-space
128 * Return value: 0 -- success
129 * non-zero-- failure
130 */
131static int
fef8985e 132twa_ioctl_wrapper(struct dev_ioctl_args *ap)
ab443496 133{
b13267a5 134 cdev_t dev = ap->a_head.a_dev;
fef8985e 135 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
ab443496 136
fef8985e 137 return(twa_ioctl(sc, ap->a_cmd, ap->a_data));
ab443496
DR
138}
139
140
141
142static int twa_probe (device_t dev);
143static int twa_attach (device_t dev);
144static void twa_free (struct twa_softc *sc);
145static int twa_detach (device_t dev);
146static int twa_shutdown (device_t dev);
147static int twa_suspend (device_t dev);
148static int twa_resume (device_t dev);
149static void twa_pci_intr(void *arg);
150static void twa_intrhook (void *arg);
151
152static device_method_t twa_methods[] = {
153 /* Device interface */
154 DEVMETHOD(device_probe, twa_probe),
155 DEVMETHOD(device_attach, twa_attach),
156 DEVMETHOD(device_detach, twa_detach),
157 DEVMETHOD(device_shutdown, twa_shutdown),
158 DEVMETHOD(device_suspend, twa_suspend),
159 DEVMETHOD(device_resume, twa_resume),
160
161 DEVMETHOD(bus_print_child, bus_generic_print_child),
162 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
163 {0, 0}
164};
165
166static driver_t twa_pci_driver = {
167 "twa",
168 twa_methods,
169 sizeof(struct twa_softc)
170};
171
172DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
173
174
175
176/*
177 * Function name: twa_probe
178 * Description: Called at driver load time. Claims 9000 ctlrs.
179 *
180 * Input: dev -- bus device corresponding to the ctlr
181 * Output: None
182 * Return value: <= 0 -- success
183 * > 0 -- failure
184 */
185static int
186twa_probe(device_t dev)
187{
188 static u_int8_t first_ctlr = 1;
189
190 twa_dbg_print(3, "entered");
191
192 if ((pci_get_vendor(dev) == TWA_VENDOR_ID) &&
193 (pci_get_device(dev) == TWA_DEVICE_ID_9K)) {
194 device_set_desc(dev, TWA_DEVICE_NAME);
195 /* Print the driver version only once. */
196 if (first_ctlr) {
e3869ec7 197 kprintf("3ware device driver for 9000 series storage controllers, version: %s\n",
ab443496
DR
198 TWA_DRIVER_VERSION_STRING);
199 first_ctlr = 0;
200 }
201 return(0);
202 }
203 return(ENXIO);
204}
205
206
207
208/*
209 * Function name: twa_attach
210 * Description: Allocates pci resources; updates sc; adds a node to the
211 * sysctl tree to expose the driver version; makes calls
212 * to initialize ctlr, and to attach to CAM.
213 *
214 * Input: dev -- bus device corresponding to the ctlr
215 * Output: None
216 * Return value: 0 -- success
217 * non-zero-- failure
218 */
219static int
220twa_attach(device_t dev)
221{
222 struct twa_softc *sc = device_get_softc(dev);
223 u_int32_t command;
224 int res_id;
225 int error;
b13267a5 226 cdev_t xdev;
ab443496
DR
227
228 twa_dbg_dprint_enter(3, sc);
229
230 /* Initialize the softc structure. */
231 sc->twa_bus_dev = dev;
232
233 sysctl_ctx_init(&sc->twa_sysctl_ctx);
234 sc->twa_sysctl_tree = SYSCTL_ADD_NODE(&sc->twa_sysctl_ctx,
235 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
236 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
237 if (sc->twa_sysctl_tree == NULL) {
238 twa_printf(sc, "Cannot add sysctl tree node.\n");
239 return(ENXIO);
240 }
241 SYSCTL_ADD_STRING(&sc->twa_sysctl_ctx, SYSCTL_CHILDREN(sc->twa_sysctl_tree),
242 OID_AUTO, "driver_version", CTLFLAG_RD,
243 TWA_DRIVER_VERSION_STRING, 0, "TWA driver version");
244
245 /* Make sure we are going to be able to talk to this board. */
246 command = pci_read_config(dev, PCIR_COMMAND, 2);
247 if ((command & PCIM_CMD_PORTEN) == 0) {
248 twa_printf(sc, "Register window not available.\n");
249 return(ENXIO);
250 }
251
252 /* Force the busmaster enable bit on, in case the BIOS forgot. */
253 command |= PCIM_CMD_BUSMASTEREN;
254 pci_write_config(dev, PCIR_COMMAND, command, 2);
255
256 /* Allocate the PCI register window. */
257 res_id = TWA_IO_CONFIG_REG;
258 if ((sc->twa_io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &res_id,
259 0, ~0, 1, RF_ACTIVE)) == NULL) {
260 twa_printf(sc, "can't allocate register window.\n");
261 twa_free(sc);
262 return(ENXIO);
263 }
264 sc->twa_bus_tag = rman_get_bustag(sc->twa_io_res);
265 sc->twa_bus_handle = rman_get_bushandle(sc->twa_io_res);
266
267 /* Allocate and connect our interrupt. */
268 res_id = 0;
269 if ((sc->twa_irq_res = bus_alloc_resource(sc->twa_bus_dev, SYS_RES_IRQ,
270 &res_id, 0, ~0, 1,
271 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
272 twa_printf(sc, "Can't allocate interrupt.\n");
273 twa_free(sc);
274 return(ENXIO);
275 }
ee61f228 276 if (bus_setup_intr(sc->twa_bus_dev, sc->twa_irq_res, 0,
e9cb6d99 277 twa_pci_intr, sc, &sc->twa_intr_handle, NULL)) {
ab443496
DR
278 twa_printf(sc, "Can't set up interrupt.\n");
279 twa_free(sc);
280 return(ENXIO);
281 }
282
283 /* Initialize the driver for this controller. */
284 if ((error = twa_setup(sc))) {
285 twa_free(sc);
286 return(error);
287 }
288
289 /* Print some information about the controller and configuration. */
290 twa_describe_controller(sc);
291
292 /* Create the control device. */
fef8985e 293 xdev = make_dev(&twa_ops, device_get_unit(sc->twa_bus_dev),
e4c9c0c8
MD
294 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
295 "twa%d", device_get_unit(sc->twa_bus_dev));
296 xdev->si_drv1 = sc;
ab443496
DR
297
298 /*
299 * Schedule ourselves to bring the controller up once interrupts are
300 * available. This isn't strictly necessary, since we disable
301 * interrupts while probing the controller, but it is more in keeping
302 * with common practice for other disk devices.
303 */
304 sc->twa_ich.ich_func = twa_intrhook;
305 sc->twa_ich.ich_arg = sc;
a1e26a0c 306 sc->twa_ich.ich_desc = "twa";
ab443496
DR
307 if (config_intrhook_establish(&sc->twa_ich) != 0) {
308 twa_printf(sc, "Can't establish configuration hook.\n");
309 twa_free(sc);
310 return(ENXIO);
311 }
312
313 if ((error = twa_cam_setup(sc))) {
314 twa_free(sc);
315 return(error);
316 }
317 return(0);
318}
319
320
321
322/*
323 * Function name: twa_free
324 * Description: Performs clean-up at the time of going down.
325 *
326 * Input: sc -- ptr to per ctlr structure
327 * Output: None
328 * Return value: None
329 */
330static void
331twa_free(struct twa_softc *sc)
332{
333 struct twa_request *tr;
334
335 twa_dbg_dprint_enter(3, sc);
336
337 /* Detach from CAM */
338 twa_cam_detach(sc);
339
340 /* Destroy dma handles. */
341
342 bus_dmamap_unload(sc->twa_dma_tag, sc->twa_cmd_map);
343 while ((tr = twa_dequeue_free(sc)) != NULL)
344 bus_dmamap_destroy(sc->twa_dma_tag, tr->tr_dma_map);
345
346 /* Free all memory allocated so far. */
347 if (sc->twa_req_buf)
efda3bd0 348 kfree(sc->twa_req_buf, TWA_MALLOC_CLASS);
ab443496
DR
349 if (sc->twa_cmd_pkt_buf)
350 bus_dmamem_free(sc->twa_dma_tag, sc->twa_cmd_pkt_buf,
351 sc->twa_cmd_map);
352 if (sc->twa_aen_queue[0])
efda3bd0 353 kfree (sc->twa_aen_queue[0], M_DEVBUF);
ab443496
DR
354
355 /* Destroy the data-transfer DMA tag. */
356 if (sc->twa_dma_tag)
357 bus_dma_tag_destroy(sc->twa_dma_tag);
358
359 /* Disconnect the interrupt handler. */
360 if (sc->twa_intr_handle)
361 bus_teardown_intr(sc->twa_bus_dev, sc->twa_irq_res,
362 sc->twa_intr_handle);
363 if (sc->twa_irq_res != NULL)
364 bus_release_resource(sc->twa_bus_dev, SYS_RES_IRQ,
365 0, sc->twa_irq_res);
366
367 /* Release the register window mapping. */
368 if (sc->twa_io_res != NULL)
369 bus_release_resource(sc->twa_bus_dev, SYS_RES_IOPORT,
370 TWA_IO_CONFIG_REG, sc->twa_io_res);
371
cd29885a 372 dev_ops_remove_minor(&twa_ops, device_get_unit(sc->twa_bus_dev));
ab443496
DR
373
374 sysctl_ctx_free(&sc->twa_sysctl_ctx);
375}
376
377
378
379/*
380 * Function name: twa_detach
381 * Description: Called when the controller is being detached from
382 * the pci bus.
383 *
384 * Input: dev -- bus device corresponding to the ctlr
385 * Output: None
386 * Return value: 0 -- success
387 * non-zero-- failure
388 */
389static int
390twa_detach(device_t dev)
391{
392 struct twa_softc *sc = device_get_softc(dev);
ab443496
DR
393 int error;
394
395 twa_dbg_dprint_enter(3, sc);
396
397 error = EBUSY;
5eb77fd5 398 crit_enter();
ab443496
DR
399 if (sc->twa_state & TWA_STATE_OPEN)
400 goto out;
401
402 /* Shut the controller down. */
403 if ((error = twa_shutdown(dev)))
404 goto out;
405
406 /* Free all resources associated with this controller. */
407 twa_free(sc);
408 error = 0;
409
410out:
5eb77fd5 411 crit_exit();
ab443496
DR
412 return(error);
413}
414
415
416
417/*
418 * Function name: twa_shutdown
419 * Description: Called at unload/shutdown time. Lets the controller
420 * know that we are going down.
421 *
422 * Input: dev -- bus device corresponding to the ctlr
423 * Output: None
424 * Return value: 0 -- success
425 * non-zero-- failure
426 */
427static int
428twa_shutdown(device_t dev)
429{
430 struct twa_softc *sc = device_get_softc(dev);
ab443496
DR
431 int error = 0;
432
433 twa_dbg_dprint_enter(3, sc);
434
5eb77fd5 435 crit_enter();
ab443496
DR
436
437 /* Disconnect from the controller. */
438 error = twa_deinit_ctlr(sc);
439
5eb77fd5 440 crit_exit();
ab443496
DR
441 return(error);
442}
443
444
445
446/*
447 * Function name: twa_suspend
448 * Description: Called to suspend I/O before hot-swapping PCI ctlrs.
449 * Doesn't do much as of now.
450 *
451 * Input: dev -- bus device corresponding to the ctlr
452 * Output: None
453 * Return value: 0 -- success
454 * non-zero-- failure
455 */
456static int
457twa_suspend(device_t dev)
458{
459 struct twa_softc *sc = device_get_softc(dev);
ab443496
DR
460
461 twa_dbg_dprint_enter(3, sc);
462
5eb77fd5 463 crit_enter();
ab443496
DR
464 sc->twa_state |= TWA_STATE_SUSPEND;
465
466 twa_disable_interrupts(sc);
5eb77fd5 467 crit_exit();
ab443496
DR
468
469 return(1);
470}
471
472
473
474/*
475 * Function name: twa_resume
476 * Description: Called to resume I/O after hot-swapping PCI ctlrs.
477 * Doesn't do much as of now.
478 *
479 * Input: dev -- bus device corresponding to the ctlr
480 * Output: None
481 * Return value: 0 -- success
482 * non-zero-- failure
483 */
484static int
485twa_resume(device_t dev)
486{
487 struct twa_softc *sc = device_get_softc(dev);
488
489 twa_dbg_dprint_enter(3, sc);
490
491 sc->twa_state &= ~TWA_STATE_SUSPEND;
492 twa_enable_interrupts(sc);
493
494 return(1);
495}
496
497
498
499/*
500 * Function name: twa_pci_intr
501 * Description: Interrupt handler. Wrapper for twa_interrupt.
502 *
503 * Input: arg -- ptr to per ctlr structure
504 * Output: None
505 * Return value: None
506 */
507static void
508twa_pci_intr(void *arg)
509{
510 struct twa_softc *sc = (struct twa_softc *)arg;
511
512 twa_interrupt(sc);
513}
514
515
516
517/*
518 * Function name: twa_intrhook
519 * Description: Callback for us to enable interrupts.
520 *
521 * Input: arg -- ptr to per ctlr structure
522 * Output: None
523 * Return value: None
524 */
525static void
526twa_intrhook(void *arg)
527{
528 struct twa_softc *sc = (struct twa_softc *)arg;
529
530 twa_dbg_dprint(4, sc, "twa_intrhook Entered");
531
532 /* Pull ourselves off the intrhook chain. */
533 config_intrhook_disestablish(&sc->twa_ich);
534
535 /* Enable interrupts. */
536 twa_enable_interrupts(sc);
537}
538
539
540
541/*
542 * Function name: twa_write_pci_config
543 * Description: Writes to the PCI config space.
544 *
545 * Input: sc -- ptr to per ctlr structure
546 * value -- value to be written
547 * size -- # of bytes to be written
548 * Output: None
549 * Return value: None
550 */
551void
552twa_write_pci_config(struct twa_softc *sc, u_int32_t value, int size)
553{
554 pci_write_config(sc->twa_bus_dev, PCIR_STATUS, value, size);
555}
556
557
558
559/*
560 * Function name: twa_alloc_req_pkts
561 * Description: Allocates memory for, and initializes request pkts,
562 * and queues them in the free queue.
563 *
564 * Input: sc -- ptr to per ctlr structure
565 * num_reqs-- # of request pkts to allocate and initialize.
566 * Output: None
567 * Return value: 0 -- success
568 * non-zero-- failure
569 */
570int
571twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
572{
573 struct twa_request *tr;
574 int i;
575
77652cad 576 sc->twa_req_buf = kmalloc(num_reqs * sizeof(struct twa_request),
076ae0ab 577 TWA_MALLOC_CLASS, M_INTWAIT);
ab443496
DR
578
579 /* Allocate the bus DMA tag appropriate for PCI. */
580 if (bus_dma_tag_create(NULL, /* parent */
581 TWA_ALIGNMENT, /* alignment */
582 0, /* boundary */
583 BUS_SPACE_MAXADDR, /* lowaddr */
8ecd088b 584 BUS_SPACE_MAXADDR, /* highaddr */
ab443496
DR
585 NULL, NULL, /* filter, filterarg */
586 TWA_Q_LENGTH *
587 (sizeof(struct twa_command_packet)),/* maxsize */
588 TWA_MAX_SG_ELEMENTS, /* nsegments */
589 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
590 BUS_DMA_ALLOCNOW, /* flags */
591 &sc->twa_dma_tag /* tag */)) {
592 twa_printf(sc, "Can't allocate DMA tag.\n");
593 return(ENOMEM);
594 }
595
596 /* Allocate memory for cmd pkts. */
597 if (bus_dmamem_alloc(sc->twa_dma_tag,
598 (void *)(&(sc->twa_cmd_pkt_buf)),
599 BUS_DMA_WAITOK, &(sc->twa_cmd_map)))
600 return(ENOMEM);
601
602 bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
603 sc->twa_cmd_pkt_buf,
604 num_reqs * sizeof(struct twa_command_packet),
605 twa_setup_request_dmamap, sc, 0);
606 bzero(sc->twa_req_buf, num_reqs * sizeof(struct twa_request));
607 bzero(sc->twa_cmd_pkt_buf,
608 num_reqs * sizeof(struct twa_command_packet));
609
610 for (i = 0; i < num_reqs; i++) {
611 tr = &(sc->twa_req_buf[i]);
612 tr->tr_command = &(sc->twa_cmd_pkt_buf[i]);
613 tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
614 (i * sizeof(struct twa_command_packet));
615 tr->tr_request_id = i;
616 tr->tr_sc = sc;
617 sc->twa_lookup[i] = tr;
618
619 /*
620 * Create a map for data buffers. maxsize (256 * 1024) used in
621 * bus_dma_tag_create above should suffice the bounce page needs
622 * for data buffers, since the max I/O size we support is 128KB.
623 * If we supported I/O's bigger than 256KB, we would have to
624 * create a second dma_tag, with the appropriate maxsize.
625 */
626 if (bus_dmamap_create(sc->twa_dma_tag, 0,
627 &tr->tr_dma_map))
628 return(ENOMEM);
629
630 /* Insert request into the free queue. */
631 twa_release_request(tr);
632 }
633 return(0);
634}
635
636
637
638/*
639 * Function name: twa_fillin_sgl
640 * Description: Fills in the scatter/gather list.
641 *
642 * Input: sgl -- ptr to sg list
643 * segs -- ptr to fill the sg list from
644 * nsegments--# of segments
645 * Output: None
646 * Return value: None
647 */
648static void
649twa_fillin_sgl(struct twa_sg *sgl, bus_dma_segment_t *segs, int nsegments)
650{
651 int i;
652
653 for (i = 0; i < nsegments; i++) {
654 sgl[i].address = segs[i].ds_addr;
655 sgl[i].length = segs[i].ds_len;
656 }
657}
658
659
660
661/*
662 * Function name: twa_setup_data_dmamap
663 * Description: Callback of bus_dmamap_load for the buffer associated
664 * with data. Updates the cmd pkt (size/sgl_entries
665 * fields, as applicable) to reflect the number of sg
666 * elements.
667 *
668 * Input: arg -- ptr to request pkt
669 * segs -- ptr to a list of segment descriptors
670 * nsegments--# of segments
671 * error -- 0 if no errors encountered before callback,
672 * non-zero if errors were encountered
673 * Output: None
674 * Return value: None
675 */
676static void
677twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
678 int nsegments, int error)
679{
680 struct twa_request *tr = (struct twa_request *)arg;
681 struct twa_command_packet *cmdpkt = tr->tr_command;
682 struct twa_command_9k *cmd9k;
683 union twa_command_7k *cmd7k;
684 u_int8_t sgl_offset;
685
686 twa_dbg_dprint_enter(10, tr->tr_sc);
687
688 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) &&
689 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL))
690 twa_allow_new_requests(tr->tr_sc, (void *)(tr->tr_private));
691
692 if (error == EFBIG) {
693 tr->tr_error = error;
694 goto out;
695 }
696
697 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
698 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
699 twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
700 cmd9k->sgl_entries += nsegments - 1;
701 } else {
702 /* It's a 7000 command packet. */
703 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
704 if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
705 twa_fillin_sgl((struct twa_sg *)
706 (((u_int32_t *)cmd7k) + sgl_offset),
707 segs, nsegments);
708 /* Modify the size field, based on sg address size. */
709 cmd7k->generic.size +=
710 ((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
711 }
712
713 if (tr->tr_flags & TWA_CMD_DATA_IN)
714 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
715 BUS_DMASYNC_PREREAD);
716 if (tr->tr_flags & TWA_CMD_DATA_OUT) {
717 /*
718 * If we're using an alignment buffer, and we're
719 * writing data, copy the real data out.
720 */
721 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
722 bcopy(tr->tr_real_data, tr->tr_data, tr->tr_real_length);
723 bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
724 BUS_DMASYNC_PREWRITE);
725 }
726 error = twa_submit_io(tr);
727
728out:
729 if (error) {
730 twa_unmap_request(tr);
731 /*
732 * If the caller had been returned EINPROGRESS, and he has
733 * registered a callback for handling completion, the callback
734 * will never get called because we were unable to submit the
735 * request. So, free up the request right here.
736 */
737 if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
738 twa_release_request(tr);
739 }
740}
741
742
743
744/*
745 * Function name: twa_setup_request_dmamap
746 * Description: Callback of bus_dmamap_load for the buffer associated
747 * with a cmd pkt.
748 *
749 * Input: arg -- ptr to request pkt
750 * segs -- ptr to a list of segment descriptors
751 * nsegments--# of segments
752 * error -- 0 if no errors encountered before callback,
753 * non-zero if errors were encountered
754 * Output: None
755 * Return value: None
756 */
757static void
758twa_setup_request_dmamap(void *arg, bus_dma_segment_t *segs,
759 int nsegments, int error)
760{
761 struct twa_softc *sc = (struct twa_softc *)arg;
762
763 twa_dbg_dprint_enter(10, sc);
764
765 sc->twa_cmd_pkt_phys = segs[0].ds_addr;
766}
767
768
769
770/*
771 * Function name: twa_map_request
772 * Description: Maps a cmd pkt and data associated with it, into
773 * DMA'able memory.
774 *
775 * Input: tr -- ptr to request pkt
776 * Output: None
777 * Return value: 0 -- success
778 * non-zero-- failure
779 */
780int
781twa_map_request(struct twa_request *tr)
782{
783 struct twa_softc *sc = tr->tr_sc;
784 int error = 0;
785
786 twa_dbg_dprint_enter(10, sc);
787
788 /* If the command involves data, map that too. */
789 if (tr->tr_data != NULL) {
790 /*
791 * It's sufficient for the data pointer to be 4-byte aligned
792 * to work with 9000. However, if 4-byte aligned addresses
793 * are passed to bus_dmamap_load, we can get back sg elements
794 * that are not 512-byte multiples in size. So, we will let
795 * only those buffers that are 512-byte aligned to pass
796 * through, and bounce the rest, so as to make sure that we
797 * always get back sg elements that are 512-byte multiples
798 * in size.
a2f0dcc5
MD
799 *
800 * DragonFly's malloc only guarentees X alignment when X is
801 * a power of 2, otherwise we would have to use contigalloc,
802 * which is nasty. Use malloc.
ab443496
DR
803 */
804 if (((vm_offset_t)tr->tr_data % 512) || (tr->tr_length % 512)) {
805 tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED;
806 tr->tr_real_data = tr->tr_data; /* save original data pointer */
807 tr->tr_real_length = tr->tr_length; /* save original data length */
a2f0dcc5
MD
808 tr->tr_length = 512;
809 while (tr->tr_length < tr->tr_real_length)
810 tr->tr_length <<= 1;
efda3bd0 811 tr->tr_data = kmalloc(tr->tr_length, TWA_MALLOC_CLASS, M_INTWAIT);
ab443496
DR
812 }
813
814 /*
815 * Map the data buffer into bus space and build the s/g list.
816 */
817 if ((error = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map,
818 tr->tr_data, tr->tr_length,
819 twa_setup_data_dmamap, tr,
820 BUS_DMA_WAITOK))) {
821 if (error == EINPROGRESS) {
822 tr->tr_flags |= TWA_CMD_IN_PROGRESS;
823 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL)
824 twa_disallow_new_requests(sc);
825 error = 0;
826 } else {
827 /* Free alignment buffer if it was used. */
828 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
efda3bd0 829 kfree(tr->tr_data, TWA_MALLOC_CLASS);
ab443496
DR
830 tr->tr_data = tr->tr_real_data; /* restore 'real' data pointer */
831 tr->tr_length = tr->tr_real_length;/* restore 'real' data length */
832 }
833 }
834 } else
835 error = tr->tr_error;
836
837 } else
838 if ((error = twa_submit_io(tr)))
839 twa_unmap_request(tr);
840
841 return(error);
842}
843
844
845
846/*
847 * Function name: twa_unmap_request
848 * Description: Undoes the mapping done by twa_map_request.
849 *
850 * Input: tr -- ptr to request pkt
851 * Output: None
852 * Return value: None
853 */
854void
855twa_unmap_request(struct twa_request *tr)
856{
857 struct twa_softc *sc = tr->tr_sc;
858 u_int8_t cmd_status;
859
860 twa_dbg_dprint_enter(10, sc);
861
862 /* If the command involved data, unmap that too. */
863 if (tr->tr_data != NULL) {
864 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K)
865 cmd_status = tr->tr_command->command.cmd_pkt_9k.status;
866 else
867 cmd_status = tr->tr_command->command.cmd_pkt_7k.generic.status;
868
869 if (tr->tr_flags & TWA_CMD_DATA_IN) {
870 bus_dmamap_sync(sc->twa_dma_tag,
871 tr->tr_dma_map, BUS_DMASYNC_POSTREAD);
872
873 /*
874 * If we are using a bounce buffer, and we are reading
875 * data, copy the real data in.
876 */
877 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
878 if (cmd_status == 0)
879 bcopy(tr->tr_data, tr->tr_real_data,
880 tr->tr_real_length);
881 }
882 if (tr->tr_flags & TWA_CMD_DATA_OUT)
883 bus_dmamap_sync(sc->twa_dma_tag, tr->tr_dma_map,
884 BUS_DMASYNC_POSTWRITE);
885
886 bus_dmamap_unload(sc->twa_dma_tag, tr->tr_dma_map);
887 }
888
889 /* Free alignment buffer if it was used. */
890 if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
efda3bd0 891 kfree(tr->tr_data, TWA_MALLOC_CLASS);
ab443496
DR
892 tr->tr_data = tr->tr_real_data; /* restore 'real' data pointer */
893 tr->tr_length = tr->tr_real_length;/* restore 'real' data length */
894 }
895}
896
897
898
899#ifdef TWA_DEBUG
900void twa_report(void);
901void twa_reset_stats(void);
902void twa_print_request(struct twa_request *tr, int req_type);
903
904
905
906/*
907 * Function name: twa_report
908 * Description: For being called from ddb. Prints controller stats,
909 * and requests, if any, that are in the wrong queue.
910 *
911 * Input: None
912 * Output: None
913 * Return value: None
914 */
915void
916twa_report(void)
917{
918 struct twa_softc *sc;
919 struct twa_request *tr;
ab443496
DR
920 int i;
921
5eb77fd5 922 crit_enter();
ab443496
DR
923 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
924 twa_print_controller(sc);
925 TAILQ_FOREACH(tr, &sc->twa_busy, tr_link)
926 twa_print_request(tr, TWA_CMD_BUSY);
927 TAILQ_FOREACH(tr, &sc->twa_complete, tr_link)
928 twa_print_request(tr, TWA_CMD_COMPLETE);
929 }
5eb77fd5 930 crit_exit();
ab443496
DR
931}
932
933
934
935/*
936 * Function name: twa_reset_stats
937 * Description: For being called from ddb.
938 * Resets some controller stats.
939 *
940 * Input: None
941 * Output: None
942 * Return value: None
943 */
944void
945twa_reset_stats(void)
946{
947 struct twa_softc *sc;
ab443496
DR
948 int i;
949
5eb77fd5 950 crit_enter();
ab443496
DR
951 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
952 sc->twa_qstats[TWAQ_FREE].q_max = 0;
953 sc->twa_qstats[TWAQ_BUSY].q_max = 0;
954 sc->twa_qstats[TWAQ_PENDING].q_max = 0;
955 sc->twa_qstats[TWAQ_COMPLETE].q_max = 0;
956 }
5eb77fd5 957 crit_exit();
ab443496
DR
958}
959
960
961
962/*
963 * Function name: twa_print_request
964 * Description: Prints a given request if it's in the wrong queue.
965 *
966 * Input: tr -- ptr to request pkt
967 * req_type-- expected status of the given request
968 * Output: None
969 * Return value: None
970 */
971void
972twa_print_request(struct twa_request *tr, int req_type)
973{
974 struct twa_softc *sc = tr->tr_sc;
975 struct twa_command_packet *cmdpkt = tr->tr_command;
976 struct twa_command_9k *cmd9k;
977 union twa_command_7k *cmd7k;
978 u_int8_t *cdb;
979 int cmd_phys_addr;
980
981 if (tr->tr_status != req_type) {
982 twa_printf(sc, "Invalid %s request %p in queue! req_type = %x, queue_type = %x\n",
983 (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ? "INTERNAL" : "EXTERNAL",
984 tr, tr->tr_status, req_type);
985
986 if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
987 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
988 cmd_phys_addr = cmd9k->sg_list[0].address;
989 twa_printf(sc, "9K cmd = %x %x %x %x %x %x %x %x %x\n",
990 cmd9k->command.opcode,
991 cmd9k->command.reserved,
992 cmd9k->unit,
993 cmd9k->request_id,
994 cmd9k->status,
995 cmd9k->sgl_offset,
996 cmd9k->sgl_entries,
997 cmd_phys_addr,
998 cmd9k->sg_list[0].length);
999 cdb = (u_int8_t *)(cmdpkt->command.cmd_pkt_9k.cdb);
1000 twa_printf(sc, "cdb = %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
1001 cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7],
1002 cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]);
1003 } else {
1004 cmd7k = &(cmdpkt->command.cmd_pkt_7k);
1005 twa_printf(sc, "7K cmd = %x %x %x %x %x %x %x %x %x\n",
1006 cmd7k->generic.opcode,
1007 cmd7k->generic.sgl_offset,
1008 cmd7k->generic.size,
1009 cmd7k->generic.request_id,
1010 cmd7k->generic.unit,
1011 cmd7k->generic.host_id,
1012 cmd7k->generic.status,
1013 cmd7k->generic.flags,
1014 cmd7k->generic.count);
1015 }
1016
1017 cmd_phys_addr = (int)(tr->tr_cmd_phys);
1018 twa_printf(sc, "cmdphys=0x%x data=%p length=0x%x\n",
1019 cmd_phys_addr, tr->tr_data, tr->tr_length);
1020 twa_printf(sc, "req_id=0x%x flags=0x%x callback=%p private=%p\n",
1021 tr->tr_request_id, tr->tr_flags,
1022 tr->tr_callback, tr->tr_private);
1023 }
1024}
1025#endif