- Add hw.busdma sysctl tree.
[dragonfly.git] / sys / platform / pc32 / i386 / busdma_machdep.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
3641b7ca 27 * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
984263bc
MD
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/mbuf.h>
34#include <sys/uio.h>
9acd5bbb 35#include <sys/thread2.h>
1f7ab7c9 36#include <sys/bus_dma.h>
1fae3d5f
SZ
37#include <sys/kernel.h>
38#include <sys/sysctl.h>
984263bc
MD
39
40#include <vm/vm.h>
41#include <vm/vm_page.h>
42
43/* XXX needed for to access pmap to convert per-proc virtual to physical */
44#include <sys/proc.h>
45#include <sys/lock.h>
46#include <vm/vm_map.h>
47
984263bc
MD
48#include <machine/md_var.h>
49
4343fdc9 50#define MAX_BPAGES 1024
984263bc
MD
51
52struct bus_dma_tag {
53 bus_dma_tag_t parent;
54 bus_size_t alignment;
55 bus_size_t boundary;
56 bus_addr_t lowaddr;
57 bus_addr_t highaddr;
58 bus_dma_filter_t *filter;
59 void *filterarg;
60 bus_size_t maxsize;
61 u_int nsegments;
62 bus_size_t maxsegsz;
63 int flags;
64 int ref_count;
65 int map_count;
06577778 66 bus_dma_segment_t *segments;
984263bc
MD
67};
68
69struct bounce_page {
70 vm_offset_t vaddr; /* kva of bounce buffer */
71 bus_addr_t busaddr; /* Physical address */
72 vm_offset_t datavaddr; /* kva of client data */
73 bus_size_t datacount; /* client data count */
74 STAILQ_ENTRY(bounce_page) links;
75};
76
77int busdma_swi_pending;
78
79static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
80static int free_bpages;
81static int reserved_bpages;
82static int active_bpages;
83static int total_bpages;
84static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
85
86struct bus_dmamap {
87 struct bp_list bpages;
88 int pagesneeded;
89 int pagesreserved;
90 bus_dma_tag_t dmat;
91 void *buf; /* unmapped buffer pointer */
92 bus_size_t buflen; /* unmapped buffer length */
93 bus_dmamap_callback_t *callback;
94 void *callback_arg;
95 STAILQ_ENTRY(bus_dmamap) links;
96};
97
98static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
99static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
100static struct bus_dmamap nobounce_dmamap;
101
102static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
103static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
6ef943a3 104static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
984263bc
MD
105 vm_offset_t vaddr, bus_size_t size);
106static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
107static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
108
1fae3d5f
SZ
109SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
110SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
111 "Total bounce pages");
112
984263bc
MD
113static __inline int
114run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
115{
116 int retval;
117
118 retval = 0;
119 do {
120 if (paddr > dmat->lowaddr
121 && paddr <= dmat->highaddr
122 && (dmat->filter == NULL
123 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
124 retval = 1;
125
126 dmat = dmat->parent;
127 } while (retval == 0 && dmat != NULL);
128 return (retval);
129}
130
131#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
132/*
133 * Allocate a device specific dma_tag.
134 */
135int
136bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
137 bus_size_t boundary, bus_addr_t lowaddr,
138 bus_addr_t highaddr, bus_dma_filter_t *filter,
139 void *filterarg, bus_size_t maxsize, int nsegments,
140 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
141{
142 bus_dma_tag_t newtag;
143 int error = 0;
144
145 /* Return a NULL tag on failure */
146 *dmat = NULL;
147
efda3bd0 148 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT);
984263bc
MD
149
150 newtag->parent = parent;
151 newtag->alignment = alignment;
152 newtag->boundary = boundary;
6ef943a3
MD
153 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
154 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
984263bc
MD
155 newtag->filter = filter;
156 newtag->filterarg = filterarg;
157 newtag->maxsize = maxsize;
158 newtag->nsegments = nsegments;
159 newtag->maxsegsz = maxsegsz;
160 newtag->flags = flags;
161 newtag->ref_count = 1; /* Count ourself */
162 newtag->map_count = 0;
3a2114c4 163 newtag->segments = NULL;
984263bc
MD
164
165 /* Take into account any restrictions imposed by our parent tag */
166 if (parent != NULL) {
167 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
168 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
169 /*
170 * XXX Not really correct??? Probably need to honor boundary
171 * all the way up the inheritence chain.
172 */
173 newtag->boundary = MAX(parent->boundary, newtag->boundary);
174 if (newtag->filter == NULL) {
175 /*
176 * Short circuit looking at our parent directly
177 * since we have encapsulated all of its information
178 */
179 newtag->filter = parent->filter;
180 newtag->filterarg = parent->filterarg;
181 newtag->parent = parent->parent;
182 }
183 if (newtag->parent != NULL) {
184 parent->ref_count++;
185 }
186 }
187
6ef943a3
MD
188 if (newtag->lowaddr < ptoa(Maxmem) &&
189 (flags & BUS_DMA_ALLOCNOW) != 0) {
984263bc
MD
190 /* Must bounce */
191
192 if (lowaddr > bounce_lowaddr) {
193 /*
194 * Go through the pool and kill any pages
195 * that don't reside below lowaddr.
196 */
197 panic("bus_dma_tag_create: page reallocation "
198 "not implemented");
199 }
200 if (ptoa(total_bpages) < maxsize) {
201 int pages;
202
203 pages = atop(maxsize) - total_bpages;
204
205 /* Add pages to our bounce pool */
206 if (alloc_bounce_pages(newtag, pages) < pages)
207 error = ENOMEM;
208 }
209 /* Performed initial allocation */
210 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
211 }
212
213 if (error != 0) {
efda3bd0 214 kfree(newtag, M_DEVBUF);
984263bc
MD
215 } else {
216 *dmat = newtag;
217 }
218 return (error);
219}
220
221int
222bus_dma_tag_destroy(bus_dma_tag_t dmat)
223{
224 if (dmat != NULL) {
225
226 if (dmat->map_count != 0)
227 return (EBUSY);
228
229 while (dmat != NULL) {
230 bus_dma_tag_t parent;
231
232 parent = dmat->parent;
233 dmat->ref_count--;
234 if (dmat->ref_count == 0) {
06577778 235 if (dmat->segments != NULL)
efda3bd0
MD
236 kfree(dmat->segments, M_DEVBUF);
237 kfree(dmat, M_DEVBUF);
984263bc
MD
238 /*
239 * Last reference count, so
240 * release our reference
241 * count on our parent.
242 */
243 dmat = parent;
244 } else
245 dmat = NULL;
246 }
247 }
248 return (0);
249}
250
251/*
252 * Allocate a handle for mapping from kva/uva/physical
253 * address space into bus device space.
254 */
255int
256bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
257{
258 int error;
259
260 error = 0;
261
3a2114c4
MD
262 if (dmat->segments == NULL) {
263 KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
77652cad 264 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
3a2114c4
MD
265 dmat->nsegments, M_DEVBUF, M_INTWAIT);
266 }
267
984263bc
MD
268 if (dmat->lowaddr < ptoa(Maxmem)) {
269 /* Must bounce */
270 int maxpages;
271
e7b4468c
SW
272 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
273 if (*mapp == NULL)
984263bc 274 return (ENOMEM);
e7b4468c
SW
275 /* Initialize the new map */
276 STAILQ_INIT(&((*mapp)->bpages));
984263bc
MD
277 /*
278 * Attempt to add pages to our pool on a per-instance
279 * basis up to a sane limit.
280 */
281 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
282 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
283 || (dmat->map_count > 0
284 && total_bpages < maxpages)) {
285 int pages;
286
287 if (dmat->lowaddr > bounce_lowaddr) {
288 /*
289 * Go through the pool and kill any pages
290 * that don't reside below lowaddr.
291 */
292 panic("bus_dmamap_create: page reallocation "
293 "not implemented");
294 }
4343fdc9
SZ
295
296 pages = MAX(atop(dmat->maxsize), 1);
984263bc 297 pages = MIN(maxpages - total_bpages, pages);
4343fdc9
SZ
298 pages = MAX(pages, 1);
299 if (alloc_bounce_pages(dmat, pages) < pages)
300 error = ENOMEM;
984263bc
MD
301
302 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
303 if (error == 0)
304 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
305 } else {
306 error = 0;
307 }
308 }
309 } else {
310 *mapp = NULL;
311 }
312 if (error == 0)
313 dmat->map_count++;
314 return (error);
315}
316
317/*
318 * Destroy a handle for mapping from kva/uva/physical
319 * address space into bus device space.
320 */
321int
322bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
323{
324 if (map != NULL) {
325 if (STAILQ_FIRST(&map->bpages) != NULL)
326 return (EBUSY);
efda3bd0 327 kfree(map, M_DEVBUF);
984263bc
MD
328 }
329 dmat->map_count--;
330 return (0);
331}
332
333
334/*
335 * Allocate a piece of memory that can be efficiently mapped into
336 * bus device space based on the constraints lited in the dma tag.
ff8e0d00
MD
337 *
338 * mapp is degenerate. By definition this allocation should not require
339 * bounce buffers so do not allocate a dma map.
984263bc
MD
340 */
341int
342bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
343 bus_dmamap_t *mapp)
344{
ba0fefd4 345 int mflags;
984263bc
MD
346 /* If we succeed, no mapping/bouncing will be required */
347 *mapp = NULL;
348
3a2114c4
MD
349 if (dmat->segments == NULL) {
350 KKASSERT(dmat->nsegments < 16384);
77652cad 351 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
3a2114c4
MD
352 dmat->nsegments, M_DEVBUF, M_INTWAIT);
353 }
354
ba0fefd4
JS
355 if (flags & BUS_DMA_NOWAIT)
356 mflags = M_NOWAIT;
357 else
358 mflags = M_WAITOK;
359 if (flags & BUS_DMA_ZERO)
360 mflags |= M_ZERO;
361
6ef943a3
MD
362 if ((dmat->maxsize <= PAGE_SIZE) &&
363 dmat->lowaddr >= ptoa(Maxmem)) {
efda3bd0 364 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
68aa5c00 365 /*
ff8e0d00 366 * XXX Check whether the allocation crossed a page boundary
68aa5c00
JS
367 * and retry with power-of-2 alignment in that case.
368 */
369 if ((((intptr_t)*vaddr) & PAGE_MASK) !=
370 (((intptr_t)*vaddr + dmat->maxsize) & PAGE_MASK)) {
371 size_t size;
efda3bd0 372 kfree(*vaddr, M_DEVBUF);
68aa5c00
JS
373 /* XXX check for overflow? */
374 for (size = 1; size <= dmat->maxsize; size <<= 1)
375 ;
efda3bd0 376 *vaddr = kmalloc(size, M_DEVBUF, mflags);
68aa5c00 377 }
984263bc
MD
378 } else {
379 /*
380 * XXX Use Contigmalloc until it is merged into this facility
381 * and handles multi-seg allocations. Nobody is doing
382 * multi-seg allocations yet though.
383 */
ba0fefd4 384 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
984263bc
MD
385 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
386 dmat->boundary);
387 }
388 if (*vaddr == NULL)
389 return (ENOMEM);
390 return (0);
391}
392
393/*
394 * Free a piece of memory and it's allociated dmamap, that was allocated
395 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
396 */
397void
398bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
399{
400 /*
401 * dmamem does not need to be bounced, so the map should be
402 * NULL
403 */
404 if (map != NULL)
405 panic("bus_dmamem_free: Invalid map freed\n");
6ef943a3
MD
406 if ((dmat->maxsize <= PAGE_SIZE) &&
407 dmat->lowaddr >= ptoa(Maxmem))
efda3bd0 408 kfree(vaddr, M_DEVBUF);
984263bc
MD
409 else
410 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
411}
412
413#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
414
415/*
416 * Map the buffer buf into bus space using the dmamap map.
417 */
418int
419bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
420 bus_size_t buflen, bus_dmamap_callback_t *callback,
421 void *callback_arg, int flags)
422{
423 vm_offset_t vaddr;
6ef943a3 424 vm_paddr_t paddr;
984263bc
MD
425 bus_dma_segment_t *sg;
426 int seg;
427 int error;
6ef943a3 428 vm_paddr_t nextpaddr;
270d2794 429 bus_addr_t bmask;
984263bc
MD
430
431 if (map == NULL)
432 map = &nobounce_dmamap;
433
434 error = 0;
435 /*
436 * If we are being called during a callback, pagesneeded will
437 * be non-zero, so we can avoid doing the work twice.
438 */
6ef943a3
MD
439 if (dmat->lowaddr < ptoa(Maxmem) &&
440 map->pagesneeded == 0) {
984263bc
MD
441 vm_offset_t vendaddr;
442
443 /*
444 * Count the number of bounce pages
445 * needed in order to complete this transfer
446 */
447 vaddr = trunc_page((vm_offset_t)buf);
448 vendaddr = (vm_offset_t)buf + buflen;
449
450 while (vaddr < vendaddr) {
451 paddr = pmap_kextract(vaddr);
452 if (run_filter(dmat, paddr) != 0) {
453
454 map->pagesneeded++;
455 }
456 vaddr += PAGE_SIZE;
457 }
458 }
459
460 /* Reserve Necessary Bounce Pages */
461 if (map->pagesneeded != 0) {
9acd5bbb 462 crit_enter();
984263bc
MD
463 if (reserve_bounce_pages(dmat, map) != 0) {
464
465 /* Queue us for resources */
466 map->dmat = dmat;
467 map->buf = buf;
468 map->buflen = buflen;
469 map->callback = callback;
470 map->callback_arg = callback_arg;
471
472 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
9acd5bbb 473 crit_exit();
984263bc
MD
474
475 return (EINPROGRESS);
476 }
9acd5bbb 477 crit_exit();
984263bc
MD
478 }
479
480 vaddr = (vm_offset_t)buf;
06577778 481 sg = dmat->segments;
984263bc
MD
482 seg = 1;
483 sg->ds_len = 0;
984263bc 484 nextpaddr = 0;
270d2794
MD
485 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */
486
487 /* force at least one segment */
984263bc
MD
488 do {
489 bus_size_t size;
490
270d2794
MD
491 /*
492 * Per-page main loop
493 */
984263bc
MD
494 paddr = pmap_kextract(vaddr);
495 size = PAGE_SIZE - (paddr & PAGE_MASK);
496 if (size > buflen)
497 size = buflen;
984263bc 498 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
270d2794
MD
499 /*
500 * note: this paddr has the same in-page offset
501 * as vaddr and thus the paddr above, so the
502 * size does not have to be recalculated
503 */
984263bc
MD
504 paddr = add_bounce_page(dmat, map, vaddr, size);
505 }
506
270d2794
MD
507 /*
508 * Fill in the bus_dma_segment
509 */
984263bc
MD
510 if (sg->ds_len == 0) {
511 sg->ds_addr = paddr;
512 sg->ds_len = size;
513 } else if (paddr == nextpaddr) {
514 sg->ds_len += size;
515 } else {
984263bc
MD
516 sg++;
517 seg++;
518 if (seg > dmat->nsegments)
519 break;
520 sg->ds_addr = paddr;
521 sg->ds_len = size;
522 }
984263bc 523 nextpaddr = paddr + size;
270d2794
MD
524
525 /*
526 * Handle maxsegsz and boundary issues with a nested loop
527 */
528 for (;;) {
529 bus_size_t tmpsize;
530
531 /*
532 * Limit to the boundary and maximum segment size
533 */
6b02d9de 534 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
270d2794
MD
535 tmpsize = dmat->boundary -
536 (sg->ds_addr & ~bmask);
537 if (tmpsize > dmat->maxsegsz)
538 tmpsize = dmat->maxsegsz;
539 KKASSERT(tmpsize < sg->ds_len);
540 } else if (sg->ds_len > dmat->maxsegsz) {
541 tmpsize = dmat->maxsegsz;
542 } else {
543 break;
544 }
545
546 /*
547 * Futz, split the data into a new segment.
548 */
549 if (seg >= dmat->nsegments)
550 goto fail;
551 sg[1].ds_len = sg[0].ds_len - tmpsize;
552 sg[1].ds_addr = sg[0].ds_addr + tmpsize;
553 sg[0].ds_len = tmpsize;
554 sg++;
555 seg++;
556 }
557
558 /*
559 * Adjust for loop
560 */
984263bc 561 buflen -= size;
270d2794 562 vaddr += size;
984263bc
MD
563 } while (buflen > 0);
564
270d2794 565fail:
984263bc 566 if (buflen != 0) {
26be20a0 567 kprintf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
984263bc
MD
568 (u_long)buflen);
569 error = EFBIG;
570 }
571
06577778 572 (*callback)(callback_arg, dmat->segments, seg, error);
984263bc
MD
573
574 return (0);
575}
576
577/*
578 * Utility function to load a linear buffer. lastaddrp holds state
579 * between invocations (for multiple-buffer loads). segp contains
580 * the starting segment on entrace, and the ending segment on exit.
581 * first indicates if this is the first invocation of this function.
582 */
583static int
584_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
984263bc 585 void *buf, bus_size_t buflen,
dadab5e9 586 struct thread *td,
984263bc
MD
587 int flags,
588 vm_offset_t *lastaddrp,
589 int *segp,
590 int first)
591{
06577778 592 bus_dma_segment_t *segs;
984263bc
MD
593 bus_size_t sgsize;
594 bus_addr_t curaddr, lastaddr, baddr, bmask;
595 vm_offset_t vaddr = (vm_offset_t)buf;
596 int seg;
597 pmap_t pmap;
598
dadab5e9
MD
599 if (td->td_proc != NULL)
600 pmap = vmspace_pmap(td->td_proc->p_vmspace);
984263bc
MD
601 else
602 pmap = NULL;
603
06577778 604 segs = dmat->segments;
984263bc
MD
605 lastaddr = *lastaddrp;
606 bmask = ~(dmat->boundary - 1);
607
608 for (seg = *segp; buflen > 0 ; ) {
609 /*
610 * Get the physical address for this segment.
611 */
612 if (pmap)
613 curaddr = pmap_extract(pmap, vaddr);
614 else
615 curaddr = pmap_kextract(vaddr);
616
617 /*
618 * Compute the segment size, and adjust counts.
619 */
620 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
621 if (buflen < sgsize)
622 sgsize = buflen;
623
624 /*
625 * Make sure we don't cross any boundaries.
626 */
627 if (dmat->boundary > 0) {
628 baddr = (curaddr + dmat->boundary) & bmask;
629 if (sgsize > (baddr - curaddr))
630 sgsize = (baddr - curaddr);
631 }
632
633 /*
634 * Insert chunk into a segment, coalescing with
635 * previous segment if possible.
636 */
637 if (first) {
638 segs[seg].ds_addr = curaddr;
639 segs[seg].ds_len = sgsize;
640 first = 0;
641 } else {
642 if (curaddr == lastaddr &&
643 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
644 (dmat->boundary == 0 ||
645 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
646 segs[seg].ds_len += sgsize;
647 else {
648 if (++seg >= dmat->nsegments)
649 break;
650 segs[seg].ds_addr = curaddr;
651 segs[seg].ds_len = sgsize;
652 }
653 }
654
655 lastaddr = curaddr + sgsize;
656 vaddr += sgsize;
657 buflen -= sgsize;
658 }
659
660 *segp = seg;
661 *lastaddrp = lastaddr;
662
663 /*
664 * Did we fit?
665 */
666 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
667}
668
669/*
670 * Like _bus_dmamap_load(), but for mbufs.
671 */
672int
673bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
674 struct mbuf *m0,
675 bus_dmamap_callback2_t *callback, void *callback_arg,
676 int flags)
677{
984263bc
MD
678 int nsegs, error;
679
680 KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
681 ("bus_dmamap_load_mbuf: No support for bounce pages!"));
682 KASSERT(m0->m_flags & M_PKTHDR,
683 ("bus_dmamap_load_mbuf: no packet header"));
684
685 nsegs = 0;
686 error = 0;
687 if (m0->m_pkthdr.len <= dmat->maxsize) {
688 int first = 1;
689 vm_offset_t lastaddr = 0;
690 struct mbuf *m;
691
692 for (m = m0; m != NULL && error == 0; m = m->m_next) {
485dcc96
JS
693 if ( m->m_len == 0 )
694 continue;
984263bc 695 error = _bus_dmamap_load_buffer(dmat,
984263bc 696 m->m_data, m->m_len,
6b9fef06
MD
697 curthread, flags, &lastaddr,
698 &nsegs, first);
984263bc
MD
699 first = 0;
700 }
701 } else {
702 error = EINVAL;
703 }
704
705 if (error) {
706 /* force "no valid mappings" in callback */
06577778 707 (*callback)(callback_arg, dmat->segments, 0, 0, error);
984263bc 708 } else {
06577778 709 (*callback)(callback_arg, dmat->segments,
984263bc
MD
710 nsegs+1, m0->m_pkthdr.len, error);
711 }
712 return (error);
713}
714
715/*
716 * Like _bus_dmamap_load(), but for uios.
717 */
718int
719bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
720 struct uio *uio,
721 bus_dmamap_callback2_t *callback, void *callback_arg,
722 int flags)
723{
724 vm_offset_t lastaddr;
984263bc
MD
725 int nsegs, error, first, i;
726 bus_size_t resid;
727 struct iovec *iov;
dadab5e9 728 struct thread *td = NULL;
984263bc
MD
729
730 KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
731 ("bus_dmamap_load_uio: No support for bounce pages!"));
732
733 resid = uio->uio_resid;
734 iov = uio->uio_iov;
735
736 if (uio->uio_segflg == UIO_USERSPACE) {
dadab5e9
MD
737 td = uio->uio_td;
738 KASSERT(td != NULL && td->td_proc != NULL,
984263bc
MD
739 ("bus_dmamap_load_uio: USERSPACE but no proc"));
740 }
741
742 nsegs = 0;
743 error = 0;
744 first = 1;
745 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
746 /*
747 * Now at the first iovec to load. Load each iovec
748 * until we have exhausted the residual count.
749 */
750 bus_size_t minlen =
751 resid < iov[i].iov_len ? resid : iov[i].iov_len;
752 caddr_t addr = (caddr_t) iov[i].iov_base;
753
754 error = _bus_dmamap_load_buffer(dmat,
984263bc 755 addr, minlen,
dadab5e9 756 td, flags, &lastaddr, &nsegs, first);
984263bc
MD
757 first = 0;
758
759 resid -= minlen;
760 }
761
762 if (error) {
763 /* force "no valid mappings" in callback */
06577778 764 (*callback)(callback_arg, dmat->segments, 0, 0, error);
984263bc 765 } else {
06577778 766 (*callback)(callback_arg, dmat->segments,
984263bc
MD
767 nsegs+1, uio->uio_resid, error);
768 }
769 return (error);
770}
771
772/*
773 * Release the mapping held by map.
774 */
775void
776_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
777{
778 struct bounce_page *bpage;
779
780 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
781 STAILQ_REMOVE_HEAD(&map->bpages, links);
782 free_bounce_page(dmat, bpage);
783 }
784}
785
786void
787_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
788{
789 struct bounce_page *bpage;
790
791 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
792
793 /*
794 * Handle data bouncing. We might also
795 * want to add support for invalidating
796 * the caches on broken hardware
797 */
798 switch (op) {
799 case BUS_DMASYNC_PREWRITE:
800 while (bpage != NULL) {
801 bcopy((void *)bpage->datavaddr,
802 (void *)bpage->vaddr,
803 bpage->datacount);
804 bpage = STAILQ_NEXT(bpage, links);
805 }
806 break;
807
808 case BUS_DMASYNC_POSTREAD:
809 while (bpage != NULL) {
810 bcopy((void *)bpage->vaddr,
811 (void *)bpage->datavaddr,
812 bpage->datacount);
813 bpage = STAILQ_NEXT(bpage, links);
814 }
815 break;
816 case BUS_DMASYNC_PREREAD:
817 case BUS_DMASYNC_POSTWRITE:
818 /* No-ops */
819 break;
820 }
821 }
822}
823
824static int
825alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
826{
827 int count;
828
829 count = 0;
830 if (total_bpages == 0) {
831 STAILQ_INIT(&bounce_page_list);
832 STAILQ_INIT(&bounce_map_waitinglist);
833 STAILQ_INIT(&bounce_map_callbacklist);
834 }
835
836 while (numpages > 0) {
837 struct bounce_page *bpage;
984263bc 838
efda3bd0 839 bpage = (struct bounce_page *)kmalloc(sizeof(*bpage), M_DEVBUF,
e7b4468c 840 M_INTWAIT | M_ZERO);
984263bc
MD
841
842 if (bpage == NULL)
843 break;
984263bc
MD
844 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
845 M_NOWAIT, 0ul,
846 dmat->lowaddr,
847 PAGE_SIZE,
848 0);
3641b7ca 849 if (bpage->vaddr == 0) {
efda3bd0 850 kfree(bpage, M_DEVBUF);
984263bc
MD
851 break;
852 }
853 bpage->busaddr = pmap_kextract(bpage->vaddr);
9acd5bbb 854 crit_enter();
984263bc
MD
855 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
856 total_bpages++;
857 free_bpages++;
9acd5bbb 858 crit_exit();
984263bc
MD
859 count++;
860 numpages--;
861 }
862 return (count);
863}
864
865static int
866reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
867{
868 int pages;
869
870 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
871 free_bpages -= pages;
872 reserved_bpages += pages;
873 map->pagesreserved += pages;
874 pages = map->pagesneeded - map->pagesreserved;
875
876 return (pages);
877}
878
6ef943a3 879static bus_addr_t
984263bc
MD
880add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
881 bus_size_t size)
882{
984263bc
MD
883 struct bounce_page *bpage;
884
885 if (map->pagesneeded == 0)
886 panic("add_bounce_page: map doesn't need any pages");
887 map->pagesneeded--;
888
889 if (map->pagesreserved == 0)
890 panic("add_bounce_page: map doesn't need any pages");
891 map->pagesreserved--;
892
9acd5bbb 893 crit_enter();
984263bc
MD
894 bpage = STAILQ_FIRST(&bounce_page_list);
895 if (bpage == NULL)
896 panic("add_bounce_page: free page list is empty");
897
898 STAILQ_REMOVE_HEAD(&bounce_page_list, links);
899 reserved_bpages--;
900 active_bpages++;
9acd5bbb 901 crit_exit();
984263bc
MD
902
903 bpage->datavaddr = vaddr;
904 bpage->datacount = size;
905 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
906 return (bpage->busaddr);
907}
908
909static void
910free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
911{
984263bc
MD
912 struct bus_dmamap *map;
913
914 bpage->datavaddr = 0;
915 bpage->datacount = 0;
916
9acd5bbb 917 crit_enter();
984263bc
MD
918 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
919 free_bpages++;
920 active_bpages--;
921 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
922 if (reserve_bounce_pages(map->dmat, map) == 0) {
923 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
924 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
925 map, links);
926 busdma_swi_pending = 1;
927 setsoftvm();
928 }
929 }
9acd5bbb 930 crit_exit();
984263bc
MD
931}
932
933void
f123d5a1 934busdma_swi(void)
984263bc 935{
984263bc
MD
936 struct bus_dmamap *map;
937
9acd5bbb 938 crit_enter();
984263bc
MD
939 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
940 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
9acd5bbb 941 crit_exit();
984263bc
MD
942 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
943 map->callback, map->callback_arg, /*flags*/0);
9acd5bbb 944 crit_enter();
984263bc 945 }
9acd5bbb 946 crit_exit();
984263bc 947}