2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
27 * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
41 #include <vm/vm_page.h>
43 /* XXX needed for to access pmap to convert per-proc virtual to physical */
46 #include <vm/vm_map.h>
48 #include <machine/md_var.h>
50 #define MAX_BPAGES 1024
61 bus_dma_filter_t *filter;
69 bus_dma_segment_t *segments;
70 struct bounce_zone *bounce_zone;
74 * bus_dma_tag private flags
76 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
77 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
80 vm_offset_t vaddr; /* kva of bounce buffer */
81 bus_addr_t busaddr; /* Physical address */
82 vm_offset_t datavaddr; /* kva of client data */
83 bus_size_t datacount; /* client data count */
84 STAILQ_ENTRY(bounce_page) links;
88 STAILQ_ENTRY(bounce_zone) links;
89 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
90 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
103 struct sysctl_ctx_list sysctl_ctx;
104 struct sysctl_oid *sysctl_tree;
107 static struct lwkt_token bounce_zone_tok =
108 LWKT_TOKEN_INITIALIZER(bounce_zone_tok);
109 static int busdma_zonecount;
110 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
111 STAILQ_HEAD_INITIALIZER(bounce_zone_list);
113 int busdma_swi_pending;
114 static int total_bounce_pages;
115 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
118 struct bp_list bpages;
122 void *buf; /* unmapped buffer pointer */
123 bus_size_t buflen; /* unmapped buffer length */
124 bus_dmamap_callback_t *callback;
126 STAILQ_ENTRY(bus_dmamap) links;
129 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
130 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
132 static struct bus_dmamap nobounce_dmamap;
134 static int alloc_bounce_zone(bus_dma_tag_t);
135 static int alloc_bounce_pages(bus_dma_tag_t, u_int);
136 static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
137 static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
138 vm_offset_t, bus_size_t);
139 static void free_bounce_page(bus_dma_tag_t, struct bounce_page *);
141 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
142 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages,
143 0, "Total bounce pages");
146 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
152 if (paddr > dmat->lowaddr
153 && paddr <= dmat->highaddr
154 && (dmat->filter == NULL
155 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
159 } while (retval == 0 && dmat != NULL);
164 * Allocate a device specific dma_tag.
167 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
168 bus_size_t boundary, bus_addr_t lowaddr,
169 bus_addr_t highaddr, bus_dma_filter_t *filter,
170 void *filterarg, bus_size_t maxsize, int nsegments,
171 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
173 bus_dma_tag_t newtag;
179 /* Return a NULL tag on failure */
182 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT);
184 newtag->parent = parent;
185 newtag->alignment = alignment;
186 newtag->boundary = boundary;
187 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
188 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
189 newtag->filter = filter;
190 newtag->filterarg = filterarg;
191 newtag->maxsize = maxsize;
192 newtag->nsegments = nsegments;
193 newtag->maxsegsz = maxsegsz;
194 newtag->flags = flags;
195 newtag->ref_count = 1; /* Count ourself */
196 newtag->map_count = 0;
197 newtag->segments = NULL;
198 newtag->bounce_zone = NULL;
200 /* Take into account any restrictions imposed by our parent tag */
201 if (parent != NULL) {
202 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
203 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
205 if (newtag->boundary == 0) {
206 newtag->boundary = parent->boundary;
207 } else if (parent->boundary != 0) {
208 newtag->boundary = MIN(parent->boundary,
213 newtag->alignment = MAX(parent->alignment, newtag->alignment);
216 if (newtag->filter == NULL) {
218 * Short circuit looking at our parent directly
219 * since we have encapsulated all of its information
221 newtag->filter = parent->filter;
222 newtag->filterarg = parent->filterarg;
223 newtag->parent = parent->parent;
225 if (newtag->parent != NULL)
229 if (newtag->lowaddr < ptoa(Maxmem))
230 newtag->flags |= BUS_DMA_COULD_BOUNCE;
232 if ((newtag->flags & BUS_DMA_COULD_BOUNCE) &&
233 (flags & BUS_DMA_ALLOCNOW) != 0) {
234 struct bounce_zone *bz;
238 if (lowaddr > bounce_lowaddr) {
240 * Go through the pool and kill any pages
241 * that don't reside below lowaddr.
243 panic("bus_dma_tag_create: page reallocation "
247 error = alloc_bounce_zone(newtag);
250 bz = newtag->bounce_zone;
252 if (ptoa(bz->total_bpages) < maxsize) {
255 pages = atop(maxsize) - bz->total_bpages;
257 /* Add pages to our bounce pool */
258 if (alloc_bounce_pages(newtag, pages) < pages)
261 /* Performed initial allocation */
262 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
266 kfree(newtag, M_DEVBUF);
273 bus_dma_tag_destroy(bus_dma_tag_t dmat)
276 if (dmat->map_count != 0)
279 while (dmat != NULL) {
280 bus_dma_tag_t parent;
282 parent = dmat->parent;
284 if (dmat->ref_count == 0) {
285 if (dmat->segments != NULL)
286 kfree(dmat->segments, M_DEVBUF);
287 kfree(dmat, M_DEVBUF);
289 * Last reference count, so
290 * release our reference
291 * count on our parent.
302 * Allocate a handle for mapping from kva/uva/physical
303 * address space into bus device space.
306 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
312 if (dmat->segments == NULL) {
313 KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
314 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
315 dmat->nsegments, M_DEVBUF, M_INTWAIT);
318 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
319 struct bounce_zone *bz;
324 if (dmat->bounce_zone == NULL) {
325 error = alloc_bounce_zone(dmat);
329 bz = dmat->bounce_zone;
331 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
333 /* Initialize the new map */
334 STAILQ_INIT(&((*mapp)->bpages));
336 * Attempt to add pages to our pool on a per-instance
337 * basis up to a sane limit.
339 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
340 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
341 || (dmat->map_count > 0
342 && bz->total_bpages < maxpages)) {
345 if (dmat->lowaddr > bounce_lowaddr) {
347 * Go through the pool and kill any pages
348 * that don't reside below lowaddr.
350 panic("bus_dmamap_create: page reallocation "
354 pages = MAX(atop(dmat->maxsize), 1);
355 pages = MIN(maxpages - bz->total_bpages, pages);
356 pages = MAX(pages, 1);
357 if (alloc_bounce_pages(dmat, pages) < pages)
360 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
362 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
376 * Destroy a handle for mapping from kva/uva/physical
377 * address space into bus device space.
380 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
383 if (STAILQ_FIRST(&map->bpages) != NULL)
385 kfree(map, M_DEVBUF);
392 * Allocate a piece of memory that can be efficiently mapped into
393 * bus device space based on the constraints lited in the dma tag.
395 * mapp is degenerate. By definition this allocation should not require
396 * bounce buffers so do not allocate a dma map.
399 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
404 /* If we succeed, no mapping/bouncing will be required */
407 if (dmat->segments == NULL) {
408 KKASSERT(dmat->nsegments < 16384);
409 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
410 dmat->nsegments, M_DEVBUF, M_INTWAIT);
413 if (flags & BUS_DMA_NOWAIT)
417 if (flags & BUS_DMA_ZERO)
420 if ((dmat->maxsize <= PAGE_SIZE) &&
421 dmat->lowaddr >= ptoa(Maxmem)) {
422 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
424 * XXX Check whether the allocation crossed a page boundary
425 * and retry with power-of-2 alignment in that case.
427 if ((((intptr_t)*vaddr) & PAGE_MASK) !=
428 (((intptr_t)*vaddr + dmat->maxsize) & PAGE_MASK)) {
431 kfree(*vaddr, M_DEVBUF);
432 /* XXX check for overflow? */
433 for (size = 1; size <= dmat->maxsize; size <<= 1)
435 *vaddr = kmalloc(size, M_DEVBUF, mflags);
439 * XXX Use Contigmalloc until it is merged into this facility
440 * and handles multi-seg allocations. Nobody is doing
441 * multi-seg allocations yet though.
443 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
444 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary);
452 * Free a piece of memory and it's allociated dmamap, that was allocated
453 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
456 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
459 * dmamem does not need to be bounced, so the map should be
463 panic("bus_dmamem_free: Invalid map freed\n");
464 if ((dmat->maxsize <= PAGE_SIZE) &&
465 dmat->lowaddr >= ptoa(Maxmem))
466 kfree(vaddr, M_DEVBUF);
468 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
471 static __inline vm_paddr_t
472 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr)
475 return pmap_extract(pmap, vaddr);
477 return pmap_kextract(vaddr);
481 * Utility function to load a linear buffer. lastaddrp holds state
482 * between invocations (for multiple-buffer loads). segp contains
483 * the segment following the starting one on entrace, and the ending
484 * segment on exit. first indicates if this is the first invocation
488 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
490 void *buf, bus_size_t buflen,
493 vm_paddr_t *lastpaddrp,
498 vm_paddr_t paddr, nextpaddr;
499 bus_dma_segment_t *sg;
504 map = &nobounce_dmamap;
507 * If we are being called during a callback, pagesneeded will
508 * be non-zero, so we can avoid doing the work twice.
510 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) &&
511 map != &nobounce_dmamap && map->pagesneeded == 0) {
512 vm_offset_t vendaddr;
515 * Count the number of bounce pages
516 * needed in order to complete this transfer
518 vaddr = (vm_offset_t)buf;
519 vendaddr = (vm_offset_t)buf + buflen;
521 while (vaddr < vendaddr) {
522 paddr = _bus_dma_extract(pmap, vaddr);
523 if (run_filter(dmat, paddr) != 0)
525 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
529 /* Reserve Necessary Bounce Pages */
530 if (map->pagesneeded != 0) {
532 if (flags & BUS_DMA_NOWAIT) {
533 if (reserve_bounce_pages(dmat, map, 0) != 0) {
538 if (reserve_bounce_pages(dmat, map, 1) != 0) {
539 /* Queue us for resources */
542 map->buflen = buflen;
545 &dmat->bounce_zone->bounce_map_waitinglist,
549 return (EINPROGRESS);
555 KKASSERT(*segp >= 1);
557 sg = &dmat->segments[seg - 1];
559 vaddr = (vm_offset_t)buf;
560 nextpaddr = *lastpaddrp;
561 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */
563 /* force at least one segment */
570 paddr = _bus_dma_extract(pmap, vaddr);
571 size = PAGE_SIZE - (paddr & PAGE_MASK);
574 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
576 * note: this paddr has the same in-page offset
577 * as vaddr and thus the paddr above, so the
578 * size does not have to be recalculated
580 paddr = add_bounce_page(dmat, map, vaddr, size);
584 * Fill in the bus_dma_segment
590 } else if (paddr == nextpaddr) {
595 if (seg > dmat->nsegments)
600 nextpaddr = paddr + size;
603 * Handle maxsegsz and boundary issues with a nested loop
609 * Limit to the boundary and maximum segment size
611 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
612 tmpsize = dmat->boundary -
613 (sg->ds_addr & ~bmask);
614 if (tmpsize > dmat->maxsegsz)
615 tmpsize = dmat->maxsegsz;
616 KKASSERT(tmpsize < sg->ds_len);
617 } else if (sg->ds_len > dmat->maxsegsz) {
618 tmpsize = dmat->maxsegsz;
624 * Futz, split the data into a new segment.
626 if (seg >= dmat->nsegments)
628 sg[1].ds_len = sg[0].ds_len - tmpsize;
629 sg[1].ds_addr = sg[0].ds_addr + tmpsize;
630 sg[0].ds_len = tmpsize;
640 } while (buflen > 0);
643 kprintf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
649 *lastpaddrp = nextpaddr;
655 * Map the buffer buf into bus space using the dmamap map.
658 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
659 bus_size_t buflen, bus_dmamap_callback_t *callback,
660 void *callback_arg, int flags)
662 vm_paddr_t lastaddr = 0;
663 int error, nsegs = 1;
668 * Follow old semantics. Once all of the callers are fixed,
669 * we should get rid of these internal flag "adjustment".
671 flags &= ~BUS_DMA_NOWAIT;
672 flags |= BUS_DMA_WAITOK;
674 map->callback = callback;
675 map->callback_arg = callback_arg;
678 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen,
679 NULL, flags, &lastaddr, &nsegs, 1);
680 if (error == EINPROGRESS)
683 callback(callback_arg, dmat->segments, nsegs, error);
688 * Like _bus_dmamap_load(), but for mbufs.
691 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
693 bus_dmamap_callback2_t *callback, void *callback_arg,
702 * Follow old semantics. Once all of the callers are fixed,
703 * we should get rid of these internal flag "adjustment".
705 flags &= ~BUS_DMA_WAITOK;
706 flags |= BUS_DMA_NOWAIT;
708 if (m0->m_pkthdr.len <= dmat->maxsize) {
710 vm_paddr_t lastaddr = 0;
715 for (m = m0; m != NULL && error == 0; m = m->m_next) {
719 error = _bus_dmamap_load_buffer(dmat, map,
721 NULL, flags, &lastaddr,
731 /* force "no valid mappings" in callback */
732 callback(callback_arg, dmat->segments, 0, 0, error);
734 callback(callback_arg, dmat->segments, nsegs,
735 m0->m_pkthdr.len, error);
741 * Like _bus_dmamap_load(), but for uios.
744 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
746 bus_dmamap_callback2_t *callback, void *callback_arg,
750 int nsegs, error, first, i;
757 * Follow old semantics. Once all of the callers are fixed,
758 * we should get rid of these internal flag "adjustment".
760 flags &= ~BUS_DMA_WAITOK;
761 flags |= BUS_DMA_NOWAIT;
763 resid = uio->uio_resid;
766 if (uio->uio_segflg == UIO_USERSPACE) {
770 KASSERT(td != NULL && td->td_proc != NULL,
771 ("bus_dmamap_load_uio: USERSPACE but no proc"));
772 pmap = vmspace_pmap(td->td_proc->p_vmspace);
781 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
783 * Now at the first iovec to load. Load each iovec
784 * until we have exhausted the residual count.
787 resid < iov[i].iov_len ? resid : iov[i].iov_len;
788 caddr_t addr = (caddr_t) iov[i].iov_base;
790 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen,
791 pmap, flags, &lastaddr, &nsegs, first);
798 /* force "no valid mappings" in callback */
799 callback(callback_arg, dmat->segments, 0, 0, error);
801 callback(callback_arg, dmat->segments, nsegs,
802 uio->uio_resid, error);
808 * Release the mapping held by map.
811 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
813 struct bounce_page *bpage;
815 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
816 STAILQ_REMOVE_HEAD(&map->bpages, links);
817 free_bounce_page(dmat, bpage);
822 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
824 struct bounce_page *bpage;
826 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
828 * Handle data bouncing. We might also
829 * want to add support for invalidating
830 * the caches on broken hardware
833 case BUS_DMASYNC_PREWRITE:
834 while (bpage != NULL) {
835 bcopy((void *)bpage->datavaddr,
836 (void *)bpage->vaddr,
838 bpage = STAILQ_NEXT(bpage, links);
840 dmat->bounce_zone->total_bounced++;
843 case BUS_DMASYNC_POSTREAD:
844 while (bpage != NULL) {
845 bcopy((void *)bpage->vaddr,
846 (void *)bpage->datavaddr,
848 bpage = STAILQ_NEXT(bpage, links);
850 dmat->bounce_zone->total_bounced++;
853 case BUS_DMASYNC_PREREAD:
854 case BUS_DMASYNC_POSTWRITE:
862 alloc_bounce_zone(bus_dma_tag_t dmat)
864 struct bounce_zone *bz, *new_bz;
867 KASSERT(dmat->bounce_zone == NULL,
868 ("bounce zone was already assigned\n"));
870 new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO);
872 lwkt_gettoken(&ref, &bounce_zone_tok);
874 /* Check to see if we already have a suitable zone */
875 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
876 if (dmat->alignment <= bz->alignment &&
877 dmat->boundary <= bz->boundary &&
878 dmat->lowaddr >= bz->lowaddr) {
881 dmat->bounce_zone = bz;
882 kfree(new_bz, M_DEVBUF);
888 STAILQ_INIT(&bz->bounce_page_list);
889 STAILQ_INIT(&bz->bounce_map_waitinglist);
891 bz->reserved_bpages = 0;
892 bz->active_bpages = 0;
893 bz->lowaddr = dmat->lowaddr;
894 bz->alignment = dmat->alignment;
895 bz->boundary = dmat->boundary;
896 ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
898 ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
899 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
903 dmat->bounce_zone = bz;
905 sysctl_ctx_init(&bz->sysctl_ctx);
906 bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx,
907 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
909 if (bz->sysctl_tree == NULL) {
910 sysctl_ctx_free(&bz->sysctl_ctx);
911 return 0; /* XXX error code? */
914 SYSCTL_ADD_INT(&bz->sysctl_ctx,
915 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
916 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
917 "Total bounce pages");
918 SYSCTL_ADD_INT(&bz->sysctl_ctx,
919 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
920 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
921 "Free bounce pages");
922 SYSCTL_ADD_INT(&bz->sysctl_ctx,
923 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
924 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
925 "Reserved bounce pages");
926 SYSCTL_ADD_INT(&bz->sysctl_ctx,
927 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
928 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
929 "Active bounce pages");
930 SYSCTL_ADD_INT(&bz->sysctl_ctx,
931 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
932 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
933 "Total bounce requests");
934 SYSCTL_ADD_INT(&bz->sysctl_ctx,
935 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
936 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
937 "Total bounce requests that were deferred");
938 SYSCTL_ADD_INT(&bz->sysctl_ctx,
939 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
940 "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0,
941 "Total bounce page reservations that were failed");
942 SYSCTL_ADD_STRING(&bz->sysctl_ctx,
943 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
944 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
945 SYSCTL_ADD_INT(&bz->sysctl_ctx,
946 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
947 "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
948 SYSCTL_ADD_INT(&bz->sysctl_ctx,
949 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
950 "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
956 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
958 struct bounce_zone *bz = dmat->bounce_zone;
961 while (numpages > 0) {
962 struct bounce_page *bpage;
964 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO);
966 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
971 if (bpage->vaddr == 0) {
972 kfree(bpage, M_DEVBUF);
975 bpage->busaddr = pmap_kextract(bpage->vaddr);
978 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
979 total_bounce_pages++;
991 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
993 struct bounce_zone *bz = dmat->bounce_zone;
996 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
997 if (!commit && map->pagesneeded > (map->pagesreserved + pages)) {
998 bz->reserve_failed++;
999 return (map->pagesneeded - (map->pagesreserved + pages));
1001 bz->free_bpages -= pages;
1002 bz->reserved_bpages += pages;
1003 map->pagesreserved += pages;
1004 pages = map->pagesneeded - map->pagesreserved;
1010 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1013 struct bounce_zone *bz = dmat->bounce_zone;
1014 struct bounce_page *bpage;
1016 if (map->pagesneeded == 0)
1017 panic("add_bounce_page: map doesn't need any pages");
1020 if (map->pagesreserved == 0)
1021 panic("add_bounce_page: map doesn't need any pages");
1022 map->pagesreserved--;
1025 bpage = STAILQ_FIRST(&bz->bounce_page_list);
1027 panic("add_bounce_page: free page list is empty");
1029 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1030 bz->reserved_bpages--;
1031 bz->active_bpages++;
1034 bpage->datavaddr = vaddr;
1035 bpage->datacount = size;
1036 STAILQ_INSERT_TAIL(&map->bpages, bpage, links);
1037 return bpage->busaddr;
1041 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1043 struct bounce_zone *bz = dmat->bounce_zone;
1044 struct bus_dmamap *map;
1046 bpage->datavaddr = 0;
1047 bpage->datacount = 0;
1050 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1052 bz->active_bpages--;
1053 if ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) {
1054 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1055 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
1056 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1058 busdma_swi_pending = 1;
1059 bz->total_deferred++;
1069 struct bus_dmamap *map;
1072 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1073 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1075 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1076 map->callback, map->callback_arg, /*flags*/0);