twe(4): Sync with FreeBSD.
[dragonfly.git] / sys / dev / raid / twe / twe_freebsd.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2003 Paul Saab
4 * Copyright (c) 2003 Vinod Kashyap
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/dev/twe/twe_freebsd.c,v 1.54 2012/11/17 01:52:19 svnexp Exp $
30 */
31
32/*
33 * FreeBSD-specific code.
34 */
35
36#include <dev/raid/twe/twe_compat.h>
37#include <dev/raid/twe/twereg.h>
38#include <dev/raid/twe/tweio.h>
39#include <dev/raid/twe/twevar.h>
40#include <dev/raid/twe/twe_tables.h>
41#include <sys/dtype.h>
42#include <sys/mplock2.h>
43
44#include <vm/vm.h>
45
46static devclass_t twe_devclass;
47
48#ifdef TWE_DEBUG
49static u_int32_t twed_bio_in;
50#define TWED_BIO_IN twed_bio_in++
51static u_int32_t twed_bio_out;
52#define TWED_BIO_OUT twed_bio_out++
53#else
54#define TWED_BIO_IN
55#define TWED_BIO_OUT
56#endif
57
58static void twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
59static void twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
60
61/********************************************************************************
62 ********************************************************************************
63 Control device interface
64 ********************************************************************************
65 ********************************************************************************/
66
67static d_open_t twe_open;
68static d_close_t twe_close;
69static d_ioctl_t twe_ioctl_wrapper;
70
71static struct dev_ops twe_ops = {
72 { "twe", 0, D_MPSAFE },
73 .d_open = twe_open,
74 .d_close = twe_close,
75 .d_ioctl = twe_ioctl_wrapper,
76};
77
78/********************************************************************************
79 * Accept an open operation on the control device.
80 */
81static int
82twe_open(struct dev_open_args *ap)
83{
84 cdev_t dev = ap->a_head.a_dev;
85 struct twe_softc *sc = (struct twe_softc *)dev->si_drv1;
86
87 TWE_IO_LOCK(sc);
88 if (sc->twe_state & TWE_STATE_DETACHING) {
89 TWE_IO_UNLOCK(sc);
90 return (ENXIO);
91 }
92 sc->twe_state |= TWE_STATE_OPEN;
93 TWE_IO_UNLOCK(sc);
94 return(0);
95}
96
97/********************************************************************************
98 * Accept the last close on the control device.
99 */
100static int
101twe_close(struct dev_close_args *ap)
102{
103 cdev_t dev = ap->a_head.a_dev;
104 struct twe_softc *sc = (struct twe_softc *)dev->si_drv1;
105
106 TWE_IO_LOCK(sc);
107 sc->twe_state &= ~TWE_STATE_OPEN;
108 TWE_IO_UNLOCK(sc);
109 return (0);
110}
111
112/********************************************************************************
113 * Handle controller-specific control operations.
114 */
115static int
116twe_ioctl_wrapper(struct dev_ioctl_args *ap)
117{
118 cdev_t dev = ap->a_head.a_dev;
119 u_long cmd = ap->a_cmd;
120 caddr_t addr = ap->a_data;
121 struct twe_softc *sc = (struct twe_softc *)dev->si_drv1;
122
123 return(twe_ioctl(sc, cmd, addr));
124}
125
126/********************************************************************************
127 ********************************************************************************
128 PCI device interface
129 ********************************************************************************
130 ********************************************************************************/
131
132static int twe_probe(device_t dev);
133static int twe_attach(device_t dev);
134static void twe_free(struct twe_softc *sc);
135static int twe_detach(device_t dev);
136static int twe_shutdown(device_t dev);
137static int twe_suspend(device_t dev);
138static int twe_resume(device_t dev);
139static void twe_pci_intr(void *arg);
140static void twe_intrhook(void *arg);
141
142static device_method_t twe_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, twe_probe),
145 DEVMETHOD(device_attach, twe_attach),
146 DEVMETHOD(device_detach, twe_detach),
147 DEVMETHOD(device_shutdown, twe_shutdown),
148 DEVMETHOD(device_suspend, twe_suspend),
149 DEVMETHOD(device_resume, twe_resume),
150
151 { 0, 0 }
152};
153
154static driver_t twe_pci_driver = {
155 "twe",
156 twe_methods,
157 sizeof(struct twe_softc)
158};
159
160DRIVER_MODULE(twe, pci, twe_pci_driver, twe_devclass, NULL, NULL);
161
162/********************************************************************************
163 * Match a 3ware Escalade ATA RAID controller.
164 */
165static int
166twe_probe(device_t dev)
167{
168
169 debug_called(4);
170
171 if ((pci_get_vendor(dev) == TWE_VENDOR_ID) &&
172 ((pci_get_device(dev) == TWE_DEVICE_ID) ||
173 (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) {
174 device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING);
175 return(BUS_PROBE_DEFAULT);
176 }
177 return(ENXIO);
178}
179
180/********************************************************************************
181 * Allocate resources, initialise the controller.
182 */
183static int
184twe_attach(device_t dev)
185{
186 struct twe_softc *sc;
187 int rid, error;
188
189 debug_called(4);
190
191 /*
192 * Initialise the softc structure.
193 */
194 sc = device_get_softc(dev);
195 sc->twe_dev = dev;
196 lockinit(&sc->twe_io_lock, "twe I/O", 0, LK_CANRECURSE);
197 lockinit(&sc->twe_config_lock, "twe config", 0, LK_CANRECURSE);
198
199 sysctl_ctx_init(&sc->sysctl_ctx);
200 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
201 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
202 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
203 if (sc->sysctl_tree == NULL) {
204 twe_printf(sc, "cannot add sysctl tree node\n");
205 return (ENXIO);
206 }
207 SYSCTL_ADD_STRING(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
208 OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
209 "TWE driver version");
210
211 /*
212 * Force the busmaster enable bit on, in case the BIOS forgot.
213 */
214 pci_enable_busmaster(dev);
215
216 /*
217 * Allocate the PCI register window.
218 */
219 rid = TWE_IO_CONFIG_REG;
220 if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
221 RF_ACTIVE)) == NULL) {
222 twe_printf(sc, "can't allocate register window\n");
223 twe_free(sc);
224 return(ENXIO);
225 }
226
227 /*
228 * Allocate the parent bus DMA tag appropriate for PCI.
229 */
230 if (bus_dma_tag_create(NULL, /* parent */
231 1, 0, /* alignment, boundary */
232 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
233 BUS_SPACE_MAXADDR, /* highaddr */
234 NULL, NULL, /* filter, filterarg */
235 MAXBSIZE, TWE_MAX_SGL_LENGTH, /* maxsize, nsegments */
236 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
237 0, /* flags */
238 &sc->twe_parent_dmat)) {
239 twe_printf(sc, "can't allocate parent DMA tag\n");
240 twe_free(sc);
241 return(ENOMEM);
242 }
243
244 /*
245 * Allocate and connect our interrupt.
246 */
247 rid = 0;
248 if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
249 &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
250 twe_printf(sc, "can't allocate interrupt\n");
251 twe_free(sc);
252 return(ENXIO);
253 }
254 if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_MPSAFE,
255 twe_pci_intr, sc, &sc->twe_intr, NULL)) {
256 twe_printf(sc, "can't set up interrupt\n");
257 twe_free(sc);
258 return(ENXIO);
259 }
260
261 /*
262 * Create DMA tag for mapping command's into controller-addressable space.
263 */
264 if (bus_dma_tag_create(sc->twe_parent_dmat, /* parent */
265 1, 0, /* alignment, boundary */
266 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
267 BUS_SPACE_MAXADDR, /* highaddr */
268 NULL, NULL, /* filter, filterarg */
269 sizeof(TWE_Command) *
270 TWE_Q_LENGTH, 1, /* maxsize, nsegments */
271 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
272 0, /* flags */
273 &sc->twe_cmd_dmat)) {
274 twe_printf(sc, "can't allocate data buffer DMA tag\n");
275 twe_free(sc);
276 return(ENOMEM);
277 }
278 /*
279 * Allocate memory and make it available for DMA.
280 */
281 if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
282 BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
283 twe_printf(sc, "can't allocate command memory\n");
284 return(ENOMEM);
285 }
286 bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
287 sizeof(TWE_Command) * TWE_Q_LENGTH,
288 twe_setup_request_dmamap, sc, 0);
289 bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);
290
291 /*
292 * Create DMA tag for mapping objects into controller-addressable space.
293 */
294 if (bus_dma_tag_create(sc->twe_parent_dmat, /* parent */
295 1, 0, /* alignment, boundary */
296 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
297 BUS_SPACE_MAXADDR, /* highaddr */
298 NULL, NULL, /* filter, filterarg */
299 MAXBSIZE, TWE_MAX_SGL_LENGTH,/* maxsize, nsegments */
300 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
301 BUS_DMA_ALLOCNOW, /* flags */
302 &sc->twe_buffer_dmat)) {
303 twe_printf(sc, "can't allocate data buffer DMA tag\n");
304 twe_free(sc);
305 return(ENOMEM);
306 }
307
308 /*
309 * Create DMA tag for mapping objects into controller-addressable space.
310 */
311 if (bus_dma_tag_create(sc->twe_parent_dmat, /* parent */
312 1, 0, /* alignment, boundary */
313 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
314 BUS_SPACE_MAXADDR, /* highaddr */
315 NULL, NULL, /* filter, filterarg */
316 MAXBSIZE, 1, /* maxsize, nsegments */
317 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
318 0, /* flags */
319 &sc->twe_immediate_dmat)) {
320 twe_printf(sc, "can't allocate data buffer DMA tag\n");
321 twe_free(sc);
322 return(ENOMEM);
323 }
324 /*
325 * Allocate memory for requests which cannot sleep or support continuation.
326 */
327 if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
328 BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
329 twe_printf(sc, "can't allocate memory for immediate requests\n");
330 return(ENOMEM);
331 }
332
333 /*
334 * Initialise the controller and driver core.
335 */
336 if ((error = twe_setup(sc))) {
337 twe_free(sc);
338 return(error);
339 }
340
341 /*
342 * Print some information about the controller and configuration.
343 */
344 twe_describe_controller(sc);
345
346 /*
347 * Create the control device.
348 */
349 sc->twe_dev_t = make_dev(&twe_ops, device_get_unit(sc->twe_dev),
350 UID_ROOT, GID_OPERATOR,
351 S_IRUSR | S_IWUSR, "twe%d",
352 device_get_unit(sc->twe_dev));
353 sc->twe_dev_t->si_drv1 = sc;
354
355 /*
356 * Schedule ourselves to bring the controller up once interrupts are
357 * available. This isn't strictly necessary, since we disable
358 * interrupts while probing the controller, but it is more in keeping
359 * with common practice for other disk devices.
360 */
361 sc->twe_ich.ich_func = twe_intrhook;
362 sc->twe_ich.ich_arg = sc;
363 sc->twe_ich.ich_desc = "twe";
364 if (config_intrhook_establish(&sc->twe_ich) != 0) {
365 twe_printf(sc, "can't establish configuration hook\n");
366 twe_free(sc);
367 return(ENXIO);
368 }
369
370 return(0);
371}
372
373/********************************************************************************
374 * Free all of the resources associated with (sc).
375 *
376 * Should not be called if the controller is active.
377 */
378static void
379twe_free(struct twe_softc *sc)
380{
381 struct twe_request *tr;
382
383 debug_called(4);
384
385 /* throw away any command buffers */
386 while ((tr = twe_dequeue_free(sc)) != NULL)
387 twe_free_request(tr);
388
389 if (sc->twe_cmd != NULL) {
390 bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap);
391 bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap);
392 }
393
394 if (sc->twe_immediate != NULL) {
395 bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
396 bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate,
397 sc->twe_immediate_map);
398 }
399
400 if (sc->twe_immediate_dmat)
401 bus_dma_tag_destroy(sc->twe_immediate_dmat);
402
403 /* destroy the data-transfer DMA tag */
404 if (sc->twe_buffer_dmat)
405 bus_dma_tag_destroy(sc->twe_buffer_dmat);
406
407 /* disconnect the interrupt handler */
408 if (sc->twe_intr)
409 bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr);
410 if (sc->twe_irq != NULL)
411 bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq);
412
413 /* destroy the parent DMA tag */
414 if (sc->twe_parent_dmat)
415 bus_dma_tag_destroy(sc->twe_parent_dmat);
416
417 /* release the register window mapping */
418 if (sc->twe_io != NULL)
419 bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io);
420
421 /* destroy control device */
422 if (sc->twe_dev_t != NULL)
423 destroy_dev(sc->twe_dev_t);
424 dev_ops_remove_minor(&twe_ops, device_get_unit(sc->twe_dev));
425
426 sysctl_ctx_free(&sc->sysctl_ctx);
427 lockuninit(&sc->twe_config_lock);
428 lockuninit(&sc->twe_io_lock);
429}
430
431/********************************************************************************
432 * Disconnect from the controller completely, in preparation for unload.
433 */
434static int
435twe_detach(device_t dev)
436{
437 struct twe_softc *sc = device_get_softc(dev);
438
439 debug_called(4);
440
441 TWE_IO_LOCK(sc);
442 if (sc->twe_state & TWE_STATE_OPEN) {
443 TWE_IO_UNLOCK(sc);
444 return (EBUSY);
445 }
446 sc->twe_state |= TWE_STATE_DETACHING;
447 TWE_IO_UNLOCK(sc);
448
449 /*
450 * Shut the controller down.
451 */
452 if (twe_shutdown(dev)) {
453 TWE_IO_LOCK(sc);
454 sc->twe_state &= ~TWE_STATE_DETACHING;
455 TWE_IO_UNLOCK(sc);
456 return (EBUSY);
457 }
458
459 twe_free(sc);
460
461 return(0);
462}
463
464/********************************************************************************
465 * Bring the controller down to a dormant state and detach all child devices.
466 *
467 * Note that we can assume that the bioq on the controller is empty, as we won't
468 * allow shutdown if any device is open.
469 */
470static int
471twe_shutdown(device_t dev)
472{
473 struct twe_softc *sc = device_get_softc(dev);
474 int i, error = 0;
475
476 debug_called(4);
477
478 /*
479 * Delete all our child devices.
480 */
481 TWE_CONFIG_LOCK(sc);
482 for (i = 0; i < TWE_MAX_UNITS; i++) {
483 if (sc->twe_drive[i].td_disk != 0) {
484 if ((error = twe_detach_drive(sc, i)) != 0) {
485 TWE_CONFIG_UNLOCK(sc);
486 return (error);
487 }
488 }
489 }
490 TWE_CONFIG_UNLOCK(sc);
491
492 /*
493 * Bring the controller down.
494 */
495 TWE_IO_LOCK(sc);
496 twe_deinit(sc);
497 TWE_IO_UNLOCK(sc);
498
499 return(0);
500}
501
502/********************************************************************************
503 * Bring the controller to a quiescent state, ready for system suspend.
504 */
505static int
506twe_suspend(device_t dev)
507{
508 struct twe_softc *sc = device_get_softc(dev);
509
510 debug_called(4);
511
512 TWE_IO_LOCK(sc);
513 sc->twe_state |= TWE_STATE_SUSPEND;
514 TWE_IO_UNLOCK(sc);
515
516 twe_disable_interrupts(sc);
517 crit_exit();
518
519 return(0);
520}
521
522/********************************************************************************
523 * Bring the controller back to a state ready for operation.
524 */
525static int
526twe_resume(device_t dev)
527{
528 struct twe_softc *sc = device_get_softc(dev);
529
530 debug_called(4);
531
532 TWE_IO_LOCK(sc);
533 sc->twe_state &= ~TWE_STATE_SUSPEND;
534 twe_enable_interrupts(sc);
535 TWE_IO_UNLOCK(sc);
536
537 return(0);
538}
539
540/*******************************************************************************
541 * Take an interrupt, or be poked by other code to look for interrupt-worthy
542 * status.
543 */
544static void
545twe_pci_intr(void *arg)
546{
547 struct twe_softc *sc = arg;
548
549 TWE_IO_LOCK(sc);
550 twe_intr(sc);
551 TWE_IO_UNLOCK(sc);
552}
553
554/********************************************************************************
555 * Delayed-startup hook
556 */
557static void
558twe_intrhook(void *arg)
559{
560 struct twe_softc *sc = (struct twe_softc *)arg;
561
562 /* pull ourselves off the intrhook chain */
563 config_intrhook_disestablish(&sc->twe_ich);
564
565 /* call core startup routine */
566 twe_init(sc);
567}
568
569/********************************************************************************
570 * Given a detected drive, attach it to the bio interface.
571 *
572 * This is called from twe_add_unit.
573 */
574int
575twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr)
576{
577 char buf[80];
578 int error;
579
580 get_mplock();
581 dr->td_disk = device_add_child(sc->twe_dev, NULL, -1);
582 if (dr->td_disk == NULL) {
583 rel_mplock();
584 twe_printf(sc, "Cannot add unit\n");
585 return (EIO);
586 }
587 device_set_ivars(dr->td_disk, dr);
588
589 /*
590 * XXX It would make sense to test the online/initialising bits, but they seem to be
591 * always set...
592 */
593 ksprintf(buf, "Unit %d, %s, %s",
594 dr->td_twe_unit,
595 twe_describe_code(twe_table_unittype, dr->td_type),
596 twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK));
597 device_set_desc_copy(dr->td_disk, buf);
598
599 error = device_probe_and_attach(dr->td_disk);
600 rel_mplock();
601 if (error != 0) {
602 twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error);
603 return (EIO);
604 }
605 return (0);
606}
607
608/********************************************************************************
609 * Detach the specified unit if it exsists
610 *
611 * This is called from twe_del_unit.
612 */
613int
614twe_detach_drive(struct twe_softc *sc, int unit)
615{
616 int error = 0;
617
618 TWE_CONFIG_ASSERT_LOCKED(sc);
619 get_mplock();
620 error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk);
621 rel_mplock();
622 if (error != 0) {
623 twe_printf(sc, "failed to delete unit %d\n", unit);
624 return(error);
625 }
626 bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit]));
627 return(error);
628}
629
630/********************************************************************************
631 * Clear a PCI parity error.
632 */
633void
634twe_clear_pci_parity_error(struct twe_softc *sc)
635{
636 TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR);
637 pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
638}
639
640/********************************************************************************
641 * Clear a PCI abort.
642 */
643void
644twe_clear_pci_abort(struct twe_softc *sc)
645{
646 TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT);
647 pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
648}
649
650/********************************************************************************
651 ********************************************************************************
652 Disk device
653 ********************************************************************************
654 ********************************************************************************/
655
656/*
657 * Disk device bus interface
658 */
659static int twed_probe(device_t dev);
660static int twed_attach(device_t dev);
661static int twed_detach(device_t dev);
662
663static device_method_t twed_methods[] = {
664 DEVMETHOD(device_probe, twed_probe),
665 DEVMETHOD(device_attach, twed_attach),
666 DEVMETHOD(device_detach, twed_detach),
667 { 0, 0 }
668};
669
670static driver_t twed_driver = {
671 "twed",
672 twed_methods,
673 sizeof(struct twed_softc)
674};
675
676static devclass_t twed_devclass;
677DRIVER_MODULE(twed, twe, twed_driver, twed_devclass, NULL, NULL);
678
679/*
680 * Disk device control interface.
681 */
682static d_open_t twed_open;
683static d_close_t twed_close;
684static d_strategy_t twed_strategy;
685static d_dump_t twed_dump;
686
687static struct dev_ops twed_ops = {
688 { "twed", 0, D_DISK | D_MPSAFE},
689 .d_open = twed_open,
690 .d_close = twed_close,
691 .d_read = physread,
692 .d_write = physwrite,
693 .d_strategy = twed_strategy,
694 .d_dump = twed_dump,
695};
696
697/********************************************************************************
698 * Handle open from generic layer.
699 *
700 * Note that this is typically only called by the diskslice code, and not
701 * for opens on subdevices (eg. slices, partitions).
702 */
703static int
704twed_open(struct dev_open_args *ap)
705{
706 cdev_t dev = ap->a_head.a_dev;
707 struct twed_softc *sc = (struct twed_softc *)dev->si_drv1;
708
709 debug_called(4);
710
711 if (sc == NULL)
712 return (ENXIO);
713
714 /* check that the controller is up and running */
715 if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN)
716 return(ENXIO);
717
718 sc->twed_flags |= TWED_OPEN;
719 return (0);
720}
721
722/********************************************************************************
723 * Handle last close of the disk device.
724 */
725static int
726twed_close(struct dev_close_args *ap)
727{
728 cdev_t dev = ap->a_head.a_dev;
729 struct twed_softc *sc = (struct twed_softc *)dev->si_drv1;
730
731 debug_called(4);
732
733 if (sc == NULL)
734 return (ENXIO);
735
736 sc->twed_flags &= ~TWED_OPEN;
737 return (0);
738}
739
740/********************************************************************************
741 * Handle an I/O request.
742 */
743static int
744twed_strategy(struct dev_strategy_args *ap)
745{
746 cdev_t dev = ap->a_head.a_dev;
747 struct bio *bio = ap->a_bio;
748 struct twed_softc *sc = dev->si_drv1;
749 struct buf *bp = bio->bio_buf;
750
751 bio->bio_driver_info = sc;
752
753 debug_called(4);
754
755 TWED_BIO_IN;
756
757 /* bogus disk? */
758 if (sc == NULL || sc->twed_drive->td_disk == NULL) {
759 bp->b_error = EINVAL;
760 bp->b_flags |= B_ERROR;
761 kprintf("twe: bio for invalid disk!\n");
762 biodone(bio);
763 TWED_BIO_OUT;
764 return(0);
765 }
766
767 /* perform accounting */
768 devstat_start_transaction(&sc->twed_stats);
769
770 /* queue the bio on the controller */
771 TWE_IO_LOCK(sc->twed_controller);
772 twe_enqueue_bio(sc->twed_controller, bio);
773
774 /* poke the controller to start I/O */
775 twe_startio(sc->twed_controller);
776 TWE_IO_UNLOCK(sc->twed_controller);
777 return(0);
778}
779
780/********************************************************************************
781 * System crashdump support
782 */
783static int
784twed_dump(struct dev_dump_args *ap)
785{
786 cdev_t dev = ap->a_head.a_dev;
787 size_t length = ap->a_length;
788 off_t offset = ap->a_offset;
789 void *virtual = ap->a_virtual;
790 struct twed_softc *twed_sc;
791 struct twe_softc *twe_sc;
792 int error;
793
794 twed_sc = dev->si_drv1;
795 if (twed_sc == NULL)
796 return(ENXIO);
797 twe_sc = (struct twe_softc *)twed_sc->twed_controller;
798
799 if (length > 0) {
800 if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0)
801 return(error);
802 }
803 return(0);
804}
805
806/********************************************************************************
807 * Handle completion of an I/O request.
808 */
809void
810twed_intr(struct bio *bio)
811{
812 struct buf *bp = bio->bio_buf;
813 struct twed_softc *sc = bio->bio_driver_info;
814
815 debug_called(4);
816
817 /* if no error, transfer completed */
818 if (!(bp->b_flags & B_ERROR))
819 bp->b_resid = 0;
820 devstat_end_transaction_buf(&sc->twed_stats, bp);
821 biodone(bio);
822 TWED_BIO_OUT;
823}
824
825/********************************************************************************
826 * Default probe stub.
827 */
828static int
829twed_probe(device_t dev)
830{
831 return (0);
832}
833
834/********************************************************************************
835 * Attach a unit to the controller.
836 */
837static int
838twed_attach(device_t dev)
839{
840 struct twed_softc *sc;
841 struct disk_info info;
842 device_t parent;
843 cdev_t dsk;
844
845 debug_called(4);
846
847 /* initialise our softc */
848 sc = device_get_softc(dev);
849 parent = device_get_parent(dev);
850 sc->twed_controller = (struct twe_softc *)device_get_softc(parent);
851 sc->twed_drive = device_get_ivars(dev);
852 sc->twed_dev = dev;
853
854 /* report the drive */
855 twed_printf(sc, "%uMB (%u sectors)\n",
856 sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE),
857 sc->twed_drive->td_size);
858
859 /* attach a generic disk device to ourselves */
860
861 sc->twed_drive->td_sys_unit = device_get_unit(dev);
862
863 devstat_add_entry(&sc->twed_stats, "twed", sc->twed_drive->td_sys_unit,
864 TWE_BLOCK_SIZE,
865 DEVSTAT_NO_ORDERED_TAGS,
866 DEVSTAT_TYPE_STORARRAY | DEVSTAT_TYPE_IF_OTHER,
867 DEVSTAT_PRIORITY_ARRAY);
868
869 dsk = disk_create(sc->twed_drive->td_sys_unit, &sc->twed_disk, &twed_ops);
870 dsk->si_drv1 = sc;
871 sc->twed_dev_t = dsk;
872
873 /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */
874 dsk->si_iosize_max = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE;
875
876 /*
877 * Set disk info, as it appears that all needed data is available already.
878 * Setting the disk info will also cause the probing to start.
879 */
880 bzero(&info, sizeof(info));
881 info.d_media_blksize = TWE_BLOCK_SIZE; /* mandatory */
882 info.d_media_blocks = sc->twed_drive->td_size;
883
884 info.d_type = DTYPE_ESDI; /* optional */
885 info.d_secpertrack = sc->twed_drive->td_sectors;
886 info.d_nheads = sc->twed_drive->td_heads;
887 info.d_ncylinders = sc->twed_drive->td_cylinders;
888 info.d_secpercyl = sc->twed_drive->td_sectors * sc->twed_drive->td_heads;
889
890 disk_setdiskinfo(&sc->twed_disk, &info);
891
892 return (0);
893}
894
895/********************************************************************************
896 * Disconnect ourselves from the system.
897 */
898static int
899twed_detach(device_t dev)
900{
901 struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev);
902
903 debug_called(4);
904
905 if (sc->twed_flags & TWED_OPEN)
906 return(EBUSY);
907
908 devstat_remove_entry(&sc->twed_stats);
909 disk_destroy(&sc->twed_disk);
910
911 return(0);
912}
913
914/********************************************************************************
915 ********************************************************************************
916 Misc
917 ********************************************************************************
918 ********************************************************************************/
919
920/********************************************************************************
921 * Allocate a command buffer
922 */
923static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
924
925struct twe_request *
926twe_allocate_request(struct twe_softc *sc, int tag)
927{
928 struct twe_request *tr;
929 int aligned_size;
930
931 /*
932 * TWE requires requests to be 512-byte aligned. Depend on malloc()
933 * guarenteeing alignment for power-of-2 requests. Note that the old
934 * (FreeBSD-4.x) malloc code aligned all requests, but the new slab
935 * allocator only guarentees same-size alignment for power-of-2 requests.
936 */
937 aligned_size = (sizeof(struct twe_request) + TWE_ALIGNMASK) &
938 ~TWE_ALIGNMASK;
939 tr = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT | M_ZERO);
940 tr->tr_sc = sc;
941 tr->tr_tag = tag;
942 if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) {
943 twe_free_request(tr);
944 twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag);
945 return(NULL);
946 }
947 return(tr);
948}
949
950/********************************************************************************
951 * Permanently discard a command buffer.
952 */
953void
954twe_free_request(struct twe_request *tr)
955{
956 struct twe_softc *sc = tr->tr_sc;
957
958 debug_called(4);
959
960 bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap);
961 kfree(tr, TWE_MALLOC_CLASS);
962}
963
964/********************************************************************************
965 * Map/unmap (tr)'s command and data in the controller's addressable space.
966 *
967 * These routines ensure that the data which the controller is going to try to
968 * access is actually visible to the controller, in a machine-independant
969 * fashion. Due to a hardware limitation, I/O buffers must be 512-byte aligned
970 * and we take care of that here as well.
971 */
972static void
973twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl)
974{
975 int i;
976
977 for (i = 0; i < nsegments; i++) {
978 sgl[i].address = segs[i].ds_addr;
979 sgl[i].length = segs[i].ds_len;
980 }
981 for (; i < max_sgl; i++) { /* XXX necessary? */
982 sgl[i].address = 0;
983 sgl[i].length = 0;
984 }
985}
986
987static void
988twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
989{
990 struct twe_request *tr = (struct twe_request *)arg;
991 struct twe_softc *sc = tr->tr_sc;
992 TWE_Command *cmd = TWE_FIND_COMMAND(tr);
993
994 debug_called(4);
995
996 if (tr->tr_flags & TWE_CMD_MAPPED)
997 panic("already mapped command");
998
999 tr->tr_flags |= TWE_CMD_MAPPED;
1000
1001 if (tr->tr_flags & TWE_CMD_IN_PROGRESS)
1002 sc->twe_state &= ~TWE_STATE_FRZN;
1003 /* save base of first segment in command (applicable if there only one segment) */
1004 tr->tr_dataphys = segs[0].ds_addr;
1005
1006 /* correct command size for s/g list size */
1007 cmd->generic.size += 2 * nsegments;
1008
1009 /*
1010 * Due to the fact that parameter and I/O commands have the scatter/gather list in
1011 * different places, we need to determine which sort of command this actually is
1012 * before we can populate it correctly.
1013 */
1014 switch(cmd->generic.opcode) {
1015 case TWE_OP_GET_PARAM:
1016 case TWE_OP_SET_PARAM:
1017 cmd->generic.sgl_offset = 2;
1018 twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1019 break;
1020 case TWE_OP_READ:
1021 case TWE_OP_WRITE:
1022 cmd->generic.sgl_offset = 3;
1023 twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1024 break;
1025 case TWE_OP_ATA_PASSTHROUGH:
1026 cmd->generic.sgl_offset = 5;
1027 twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1028 break;
1029 default:
1030 /*
1031 * Fall back to what the linux driver does.
1032 * Do this because the API may send an opcode
1033 * the driver knows nothing about and this will
1034 * at least stop PCIABRT's from hosing us.
1035 */
1036 switch (cmd->generic.sgl_offset) {
1037 case 2:
1038 twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1039 break;
1040 case 3:
1041 twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1042 break;
1043 case 5:
1044 twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1045 break;
1046 }
1047 }
1048
1049 if (tr->tr_flags & TWE_CMD_DATAIN) {
1050 if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1051 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1052 BUS_DMASYNC_PREREAD);
1053 } else {
1054 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1055 BUS_DMASYNC_PREREAD);
1056 }
1057 }
1058
1059 if (tr->tr_flags & TWE_CMD_DATAOUT) {
1060 /*
1061 * if we're using an alignment buffer, and we're writing data
1062 * copy the real data out
1063 */
1064 if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1065 bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length);
1066
1067 if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1068 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1069 BUS_DMASYNC_PREWRITE);
1070 } else {
1071 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1072 BUS_DMASYNC_PREWRITE);
1073 }
1074 }
1075
1076 if (twe_start(tr) == EBUSY) {
1077 tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY;
1078 twe_requeue_ready(tr);
1079 }
1080}
1081
1082static void
1083twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1084{
1085 struct twe_softc *sc = (struct twe_softc *)arg;
1086
1087 debug_called(4);
1088
1089 /* command can't cross a page boundary */
1090 sc->twe_cmdphys = segs[0].ds_addr;
1091}
1092
1093int
1094twe_map_request(struct twe_request *tr)
1095{
1096 struct twe_softc *sc = tr->tr_sc;
1097 int error = 0;
1098
1099 debug_called(4);
1100
1101 twe_lockassert(&sc->twe_io_lock);
1102 if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) {
1103 twe_requeue_ready(tr);
1104 return (EBUSY);
1105 }
1106
1107 bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE);
1108
1109 /*
1110 * If the command involves data, map that too.
1111 */
1112 if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) {
1113
1114 /*
1115 * Data must be 512-byte aligned; allocate a fixup buffer if it's not.
1116 *
1117 * DragonFly's malloc only guarentees alignment for requests which
1118 * are power-of-2 sized.
1119 */
1120 if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) {
1121 int aligned_size;
1122
1123 tr->tr_realdata = tr->tr_data; /* save pointer to 'real' data */
1124 aligned_size = TWE_ALIGNMENT;
1125 while (aligned_size < tr->tr_length)
1126 aligned_size <<= 1;
1127 tr->tr_flags |= TWE_CMD_ALIGNBUF;
1128 tr->tr_data = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT);
1129 if (tr->tr_data == NULL) {
1130 twe_printf(sc, "%s: malloc failed\n", __func__);
1131 tr->tr_data = tr->tr_realdata; /* restore original data pointer */
1132 return(ENOMEM);
1133 }
1134 }
1135
1136 /*
1137 * Map the data buffer into bus space and build the s/g list.
1138 */
1139 if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1140 error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate,
1141 tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT);
1142 } else {
1143 error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length,
1144 twe_setup_data_dmamap, tr, 0);
1145 }
1146 if (error == EINPROGRESS) {
1147 tr->tr_flags |= TWE_CMD_IN_PROGRESS;
1148 sc->twe_state |= TWE_STATE_FRZN;
1149 error = 0;
1150 }
1151 } else
1152 if ((error = twe_start(tr)) == EBUSY) {
1153 sc->twe_state |= TWE_STATE_CTLR_BUSY;
1154 twe_requeue_ready(tr);
1155 }
1156
1157 return(error);
1158}
1159
1160void
1161twe_unmap_request(struct twe_request *tr)
1162{
1163 struct twe_softc *sc = tr->tr_sc;
1164
1165 debug_called(4);
1166
1167 bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE);
1168
1169 /*
1170 * If the command involved data, unmap that too.
1171 */
1172 if (tr->tr_data != NULL) {
1173 if (tr->tr_flags & TWE_CMD_DATAIN) {
1174 if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1175 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1176 BUS_DMASYNC_POSTREAD);
1177 } else {
1178 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1179 BUS_DMASYNC_POSTREAD);
1180 }
1181
1182 /* if we're using an alignment buffer, and we're reading data, copy the real data in */
1183 if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1184 bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length);
1185 }
1186 if (tr->tr_flags & TWE_CMD_DATAOUT) {
1187 if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1188 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1189 BUS_DMASYNC_POSTWRITE);
1190 } else {
1191 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1192 BUS_DMASYNC_POSTWRITE);
1193 }
1194 }
1195
1196 if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1197 bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
1198 } else {
1199 bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap);
1200 }
1201 }
1202
1203 /* free alignment buffer if it was used */
1204 if (tr->tr_flags & TWE_CMD_ALIGNBUF) {
1205 kfree(tr->tr_data, TWE_MALLOC_CLASS);
1206 tr->tr_data = tr->tr_realdata; /* restore 'real' data pointer */
1207 }
1208}
1209
1210#ifdef TWE_DEBUG
1211void twe_report(void);
1212/********************************************************************************
1213 * Print current controller status, call from DDB.
1214 */
1215void
1216twe_report(void)
1217{
1218 struct twe_softc *sc;
1219 int i;
1220
1221 for (i = 0; (sc = devclass_get_softc(twe_devclass, i)) != NULL; i++)
1222 twe_print_controller(sc);
1223 kprintf("twed: total bio count in %u out %u\n", twed_bio_in, twed_bio_out);
1224}
1225#endif