bus_dma_tag fixes and enhancements.
[dragonfly.git] / sys / platform / pc64 / amd64 / busdma_machdep.c
1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
27  * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
28  */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/uio.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/lock.h>
40 #include <sys/spinlock2.h>
41
42 #include <vm/vm.h>
43 #include <vm/vm_page.h>
44
45 /* XXX needed for to access pmap to convert per-proc virtual to physical */
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <vm/vm_map.h>
49
50 #include <machine/md_var.h>
51
52 #define MAX_BPAGES      1024
53
54 /*
55  * 16 x N declared on stack.
56  */
57 #define        BUS_DMA_CACHE_SEGMENTS  8
58
59 struct bounce_zone;
60 struct bus_dmamap;
61
62 struct bus_dma_tag {
63         bus_dma_tag_t   parent;
64         bus_size_t      alignment;
65         bus_size_t      boundary;
66         bus_addr_t      lowaddr;
67         bus_addr_t      highaddr;
68         bus_dma_filter_t *filter;
69         void            *filterarg;
70         bus_size_t      maxsize;
71         u_int           nsegments;
72         bus_size_t      maxsegsz;
73         int             flags;
74         int             ref_count;
75         int             map_count;
76         bus_dma_segment_t *segments;
77         struct bounce_zone *bounce_zone;
78 #ifdef SMP
79         struct spinlock spin;
80 #else
81         int             unused0;
82 #endif
83 };
84
85 /*
86  * bus_dma_tag private flags
87  */
88 #define BUS_DMA_BOUNCE_ALIGN    BUS_DMA_BUS2
89 #define BUS_DMA_BOUNCE_LOWADDR  BUS_DMA_BUS3
90 #define BUS_DMA_MIN_ALLOC_COMP  BUS_DMA_BUS4
91
92 #define BUS_DMA_COULD_BOUNCE    (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
93
94 #define BUS_DMAMEM_KMALLOC(dmat) \
95         ((dmat)->maxsize <= PAGE_SIZE && \
96          (dmat)->alignment <= PAGE_SIZE && \
97          (dmat)->lowaddr >= ptoa(Maxmem))
98
99 struct bounce_page {
100         vm_offset_t     vaddr;          /* kva of bounce buffer */
101         bus_addr_t      busaddr;        /* Physical address */
102         vm_offset_t     datavaddr;      /* kva of client data */
103         bus_size_t      datacount;      /* client data count */
104         STAILQ_ENTRY(bounce_page) links;
105 };
106
107 struct bounce_zone {
108         STAILQ_ENTRY(bounce_zone) links;
109         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
110         STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
111 #ifdef SMP
112         struct spinlock spin;
113 #else
114         int             unused0;
115 #endif
116         int             total_bpages;
117         int             free_bpages;
118         int             reserved_bpages;
119         int             active_bpages;
120         int             total_bounced;
121         int             total_deferred;
122         int             reserve_failed;
123         bus_size_t      alignment;
124         bus_addr_t      lowaddr;
125         char            zoneid[8];
126         char            lowaddrid[20];
127         struct sysctl_ctx_list sysctl_ctx;
128         struct sysctl_oid *sysctl_tree;
129 };
130
131 #ifdef SMP
132 #define BZ_LOCK(bz)     spin_lock_wr(&(bz)->spin)
133 #define BZ_UNLOCK(bz)   spin_unlock_wr(&(bz)->spin)
134 #else
135 #define BZ_LOCK(bz)     crit_enter()
136 #define BZ_UNLOCK(bz)   crit_exit()
137 #endif
138
139 static struct lwkt_token bounce_zone_tok =
140         LWKT_TOKEN_INITIALIZER(bounce_zone_tok);
141 static int busdma_zonecount;
142 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
143         STAILQ_HEAD_INITIALIZER(bounce_zone_list);
144
145 int busdma_swi_pending;
146 static int total_bounce_pages;
147 static int max_bounce_pages = MAX_BPAGES;
148 static int bounce_alignment = 1; /* XXX temporary */
149
150 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages);
151 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment);
152
153 struct bus_dmamap {
154         struct bp_list  bpages;
155         int             pagesneeded;
156         int             pagesreserved;
157         bus_dma_tag_t   dmat;
158         void            *buf;           /* unmapped buffer pointer */
159         bus_size_t      buflen;         /* unmapped buffer length */
160         bus_dmamap_callback_t *callback;
161         void            *callback_arg;
162         STAILQ_ENTRY(bus_dmamap) links;
163 };
164
165 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
166         STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
167
168 static struct bus_dmamap nobounce_dmamap;
169
170 static int              alloc_bounce_zone(bus_dma_tag_t);
171 static int              alloc_bounce_pages(bus_dma_tag_t, u_int, int);
172 static int              reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
173 static void             return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
174 static bus_addr_t       add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
175                             vm_offset_t, bus_size_t);
176 static void             free_bounce_page(bus_dma_tag_t, struct bounce_page *);
177
178 static bus_dmamap_t     get_map_waiting(bus_dma_tag_t);
179 static void             add_map_callback(bus_dmamap_t);
180
181 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
182 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages,
183            0, "Total bounce pages");
184 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages,
185            0, "Max bounce pages per bounce zone");
186 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD,
187            &bounce_alignment, 0, "Obey alignment constraint");
188
189 static __inline int
190 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
191 {
192         int retval;
193
194         retval = 0;
195         do {
196                 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
197                      (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0))
198                  && (dmat->filter == NULL ||
199                      dmat->filter(dmat->filterarg, paddr) != 0))
200                         retval = 1;
201
202                 dmat = dmat->parent;
203         } while (retval == 0 && dmat != NULL);
204         return (retval);
205 }
206
207 /*
208  * Allocate a device specific dma_tag.
209  */
210 int
211 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
212                    bus_size_t boundary, bus_addr_t lowaddr,
213                    bus_addr_t highaddr, bus_dma_filter_t *filter,
214                    void *filterarg, bus_size_t maxsize, int nsegments,
215                    bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
216 {
217         bus_dma_tag_t newtag;
218         int error = 0;
219
220         /*
221          * Sanity checks
222          */
223
224         if (alignment == 0)
225                 alignment = 1;
226         if (alignment & (alignment - 1))
227                 panic("alignment must be power of 2\n");
228
229         if (boundary != 0) {
230                 if (boundary & (boundary - 1))
231                         panic("boundary must be power of 2\n");
232                 if (boundary < maxsegsz) {
233                         kprintf("boundary < maxsegsz:\n");
234                         print_backtrace();
235                         maxsegsz = boundary;
236                 }
237         }
238
239         /* Return a NULL tag on failure */
240         *dmat = NULL;
241
242         newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
243
244         spin_init(&newtag->spin);
245         newtag->parent = parent;
246         newtag->alignment = alignment;
247         newtag->boundary = boundary;
248         newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
249         newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
250         newtag->filter = filter;
251         newtag->filterarg = filterarg;
252         newtag->maxsize = maxsize;
253         newtag->nsegments = nsegments;
254         newtag->maxsegsz = maxsegsz;
255         newtag->flags = flags;
256         newtag->ref_count = 1; /* Count ourself */
257         newtag->map_count = 0;
258         newtag->segments = NULL;
259         newtag->bounce_zone = NULL;
260
261         /* Take into account any restrictions imposed by our parent tag */
262         if (parent != NULL) {
263                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
264                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
265
266                 if (newtag->boundary == 0) {
267                         newtag->boundary = parent->boundary;
268                 } else if (parent->boundary != 0) {
269                         newtag->boundary = MIN(parent->boundary,
270                                                newtag->boundary);
271                 }
272
273 #ifdef notyet
274                 newtag->alignment = MAX(parent->alignment, newtag->alignment);
275 #endif
276
277                 if (newtag->filter == NULL) {
278                         /*
279                          * Short circuit looking at our parent directly
280                          * since we have encapsulated all of its information
281                          */
282                         newtag->filter = parent->filter;
283                         newtag->filterarg = parent->filterarg;
284                         newtag->parent = parent->parent;
285                 }
286                 if (newtag->parent != NULL)
287                         parent->ref_count++;
288         }
289
290         if (newtag->lowaddr < ptoa(Maxmem))
291                 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR;
292         if (bounce_alignment && newtag->alignment > 1 &&
293             !(newtag->flags & BUS_DMA_ALIGNED))
294                 newtag->flags |= BUS_DMA_BOUNCE_ALIGN;
295
296         if ((newtag->flags & BUS_DMA_COULD_BOUNCE) &&
297             (flags & BUS_DMA_ALLOCNOW) != 0) {
298                 struct bounce_zone *bz;
299
300                 /* Must bounce */
301
302                 error = alloc_bounce_zone(newtag);
303                 if (error)
304                         goto back;
305                 bz = newtag->bounce_zone;
306
307                 if (ptoa(bz->total_bpages) < maxsize) {
308                         int pages;
309
310                         if (flags & BUS_DMA_ONEBPAGE) {
311                                 pages = 1;
312                         } else {
313                                 pages = atop(round_page(maxsize)) -
314                                         bz->total_bpages;
315                                 pages = MAX(pages, 1);
316                         }
317
318                         /* Add pages to our bounce pool */
319                         if (alloc_bounce_pages(newtag, pages, flags) < pages)
320                                 error = ENOMEM;
321
322                         /* Performed initial allocation */
323                         newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
324                 }
325         }
326 back:
327         if (error)
328                 kfree(newtag, M_DEVBUF);
329         else
330                 *dmat = newtag;
331         return error;
332 }
333
334 int
335 bus_dma_tag_destroy(bus_dma_tag_t dmat)
336 {
337         if (dmat != NULL) {
338                 if (dmat->map_count != 0)
339                         return (EBUSY);
340
341                 while (dmat != NULL) {
342                         bus_dma_tag_t parent;
343
344                         parent = dmat->parent;
345                         dmat->ref_count--;
346                         if (dmat->ref_count == 0) {
347                                 if (dmat->segments != NULL)
348                                         kfree(dmat->segments, M_DEVBUF);
349                                 kfree(dmat, M_DEVBUF);
350                                 /*
351                                  * Last reference count, so
352                                  * release our reference
353                                  * count on our parent.
354                                  */
355                                 dmat = parent;
356                         } else
357                                 dmat = NULL;
358                 }
359         }
360         return (0);
361 }
362
363 bus_size_t
364 bus_dma_tag_getmaxsize(bus_dma_tag_t tag)
365 {
366         return(tag->maxsize);
367 }
368
369 /*
370  * Allocate a handle for mapping from kva/uva/physical
371  * address space into bus device space.
372  */
373 int
374 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
375 {
376         int error;
377
378         error = 0;
379
380         if (dmat->segments == NULL) {
381                 KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
382                 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
383                                         dmat->nsegments, M_DEVBUF, M_INTWAIT);
384         }
385
386         if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
387                 struct bounce_zone *bz;
388                 int maxpages;
389
390                 /* Must bounce */
391
392                 if (dmat->bounce_zone == NULL) {
393                         error = alloc_bounce_zone(dmat);
394                         if (error)
395                                 return error;
396                 }
397                 bz = dmat->bounce_zone;
398
399                 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
400
401                 /* Initialize the new map */
402                 STAILQ_INIT(&((*mapp)->bpages));
403
404                 /*
405                  * Attempt to add pages to our pool on a per-instance
406                  * basis up to a sane limit.
407                  */
408                 if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
409                         maxpages = max_bounce_pages;
410                 } else {
411                         maxpages = MIN(max_bounce_pages,
412                                        Maxmem - atop(dmat->lowaddr));
413                 }
414                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
415                  || (dmat->map_count > 0
416                   && bz->total_bpages < maxpages)) {
417                         int pages;
418
419                         if (flags & BUS_DMA_ONEBPAGE) {
420                                 pages = 1;
421                         } else {
422                                 pages = atop(round_page(dmat->maxsize));
423                                 pages = MIN(maxpages - bz->total_bpages, pages);
424                                 pages = MAX(pages, 1);
425                         }
426                         if (alloc_bounce_pages(dmat, pages, flags) < pages)
427                                 error = ENOMEM;
428
429                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
430                                 if (!error)
431                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
432                         } else {
433                                 error = 0;
434                         }
435                 }
436         } else {
437                 *mapp = NULL;
438         }
439         if (!error)
440                 dmat->map_count++;
441         return error;
442 }
443
444 /*
445  * Destroy a handle for mapping from kva/uva/physical
446  * address space into bus device space.
447  */
448 int
449 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
450 {
451         if (map != NULL) {
452                 if (STAILQ_FIRST(&map->bpages) != NULL)
453                         return (EBUSY);
454                 kfree(map, M_DEVBUF);
455         }
456         dmat->map_count--;
457         return (0);
458 }
459
460 static __inline bus_size_t
461 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify)
462 {
463         bus_size_t maxsize = 0;
464         uintptr_t vaddr = (uintptr_t)vaddr0;
465
466         if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) {
467                 kprintf("boundary check failed\n");
468                 if (verify)
469                         print_backtrace(); /* XXX panic */
470                 maxsize = dmat->maxsize;
471         }
472         if (vaddr & (dmat->alignment - 1)) {
473                 kprintf("alignment check failed\n");
474                 if (verify)
475                         print_backtrace(); /* XXX panic */
476                 if (dmat->maxsize < dmat->alignment)
477                         maxsize = dmat->alignment;
478                 else
479                         maxsize = dmat->maxsize;
480         }
481         return maxsize;
482 }
483
484 /*
485  * Allocate a piece of memory that can be efficiently mapped into
486  * bus device space based on the constraints lited in the dma tag.
487  *
488  * mapp is degenerate.  By definition this allocation should not require
489  * bounce buffers so do not allocate a dma map.
490  */
491 int
492 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
493                  bus_dmamap_t *mapp)
494 {
495         int mflags;
496
497         /* If we succeed, no mapping/bouncing will be required */
498         *mapp = NULL;
499
500         if (dmat->segments == NULL) {
501                 KKASSERT(dmat->nsegments < 16384);
502                 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
503                                         dmat->nsegments, M_DEVBUF, M_INTWAIT);
504         }
505
506         if (flags & BUS_DMA_NOWAIT)
507                 mflags = M_NOWAIT;
508         else
509                 mflags = M_WAITOK;
510         if (flags & BUS_DMA_ZERO)
511                 mflags |= M_ZERO;
512
513         if (BUS_DMAMEM_KMALLOC(dmat)) {
514                 bus_size_t maxsize;
515
516                 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
517
518                 /*
519                  * XXX
520                  * Check whether the allocation
521                  * - crossed a page boundary
522                  * - was not aligned
523                  * Retry with power-of-2 alignment in the above cases.
524                  */
525                 maxsize = check_kmalloc(dmat, *vaddr, 0);
526                 if (maxsize) {
527                         size_t size;
528
529                         kfree(*vaddr, M_DEVBUF);
530                         /* XXX check for overflow? */
531                         for (size = 1; size <= maxsize; size <<= 1)
532                                 ;
533                         *vaddr = kmalloc(size, M_DEVBUF, mflags);
534                         check_kmalloc(dmat, *vaddr, 1);
535                 }
536         } else {
537                 /*
538                  * XXX Use Contigmalloc until it is merged into this facility
539                  *     and handles multi-seg allocations.  Nobody is doing
540                  *     multi-seg allocations yet though.
541                  */
542                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
543                     0ul, dmat->lowaddr, dmat->alignment, dmat->boundary);
544         }
545         if (*vaddr == NULL)
546                 return (ENOMEM);
547         return (0);
548 }
549
550 /*
551  * Free a piece of memory and it's allociated dmamap, that was allocated
552  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
553  */
554 void
555 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
556 {
557         /*
558          * dmamem does not need to be bounced, so the map should be
559          * NULL
560          */
561         if (map != NULL)
562                 panic("bus_dmamem_free: Invalid map freed\n");
563         if (BUS_DMAMEM_KMALLOC(dmat))
564                 kfree(vaddr, M_DEVBUF);
565         else
566                 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
567 }
568
569 static __inline vm_paddr_t
570 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr)
571 {
572         if (pmap)
573                 return pmap_extract(pmap, vaddr);
574         else
575                 return pmap_kextract(vaddr);
576 }
577
578 /*
579  * Utility function to load a linear buffer.  lastaddrp holds state
580  * between invocations (for multiple-buffer loads).  segp contains
581  * the segment following the starting one on entrace, and the ending
582  * segment on exit.  first indicates if this is the first invocation
583  * of this function.
584  */
585 static int
586 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
587                         bus_dmamap_t map,
588                         void *buf, bus_size_t buflen,
589                         bus_dma_segment_t *segments,
590                         int nsegments,
591                         pmap_t pmap,
592                         int flags,
593                         vm_paddr_t *lastpaddrp,
594                         int *segp,
595                         int first)
596 {
597         vm_offset_t vaddr;
598         vm_paddr_t paddr, nextpaddr;
599         bus_dma_segment_t *sg;
600         bus_addr_t bmask;
601         int seg, error = 0;
602
603         if (map == NULL)
604                 map = &nobounce_dmamap;
605
606 #ifdef INVARIANTS
607         if (dmat->flags & BUS_DMA_ALIGNED)
608                 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0);
609 #endif
610
611         /*
612          * If we are being called during a callback, pagesneeded will
613          * be non-zero, so we can avoid doing the work twice.
614          */
615         if ((dmat->flags & BUS_DMA_COULD_BOUNCE) &&
616             map != &nobounce_dmamap && map->pagesneeded == 0) {
617                 vm_offset_t vendaddr;
618
619                 /*
620                  * Count the number of bounce pages
621                  * needed in order to complete this transfer
622                  */
623                 vaddr = (vm_offset_t)buf;
624                 vendaddr = (vm_offset_t)buf + buflen;
625
626                 while (vaddr < vendaddr) {
627                         paddr = _bus_dma_extract(pmap, vaddr);
628                         if (run_filter(dmat, paddr) != 0)
629                                 map->pagesneeded++;
630                         vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
631                 }
632         }
633
634         /* Reserve Necessary Bounce Pages */
635         if (map->pagesneeded != 0) {
636                 struct bounce_zone *bz;
637
638                 bz = dmat->bounce_zone;
639                 BZ_LOCK(bz);
640                 if (flags & BUS_DMA_NOWAIT) {
641                         if (reserve_bounce_pages(dmat, map, 0) != 0) {
642                                 BZ_UNLOCK(bz);
643                                 error = ENOMEM;
644                                 goto free_bounce;
645                         }
646                 } else {
647                         if (reserve_bounce_pages(dmat, map, 1) != 0) {
648                                 /* Queue us for resources */
649                                 map->dmat = dmat;
650                                 map->buf = buf;
651                                 map->buflen = buflen;
652
653                                 STAILQ_INSERT_TAIL(
654                                     &dmat->bounce_zone->bounce_map_waitinglist,
655                                     map, links);
656                                 BZ_UNLOCK(bz);
657
658                                 return (EINPROGRESS);
659                         }
660                 }
661                 BZ_UNLOCK(bz);
662         }
663
664         KKASSERT(*segp >= 1 && *segp <= nsegments);
665         seg = *segp;
666         sg = &segments[seg - 1];
667
668         vaddr = (vm_offset_t)buf;
669         nextpaddr = *lastpaddrp;
670         bmask = ~(dmat->boundary - 1);  /* note: will be 0 if boundary is 0 */
671
672         /* force at least one segment */
673         do {
674                 bus_size_t size;
675
676                 /*
677                  * Per-page main loop
678                  */
679                 paddr = _bus_dma_extract(pmap, vaddr);
680                 size = PAGE_SIZE - (paddr & PAGE_MASK);
681                 if (size > buflen)
682                         size = buflen;
683                 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
684                         /*
685                          * note: this paddr has the same in-page offset
686                          * as vaddr and thus the paddr above, so the
687                          * size does not have to be recalculated
688                          */
689                         paddr = add_bounce_page(dmat, map, vaddr, size);
690                 }
691
692                 /*
693                  * Fill in the bus_dma_segment
694                  */
695                 if (first) {
696                         sg->ds_addr = paddr;
697                         sg->ds_len = size;
698                         first = 0;
699                 } else if (paddr == nextpaddr) {
700                         sg->ds_len += size;
701                 } else {
702                         sg++;
703                         seg++;
704                         if (seg > nsegments)
705                                 break;
706                         sg->ds_addr = paddr;
707                         sg->ds_len = size;
708                 }
709                 nextpaddr = paddr + size;
710
711                 /*
712                  * Handle maxsegsz and boundary issues with a nested loop
713                  */
714                 for (;;) {
715                         bus_size_t tmpsize;
716
717                         /*
718                          * Limit to the boundary and maximum segment size
719                          */
720                         if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
721                                 tmpsize = dmat->boundary -
722                                           (sg->ds_addr & ~bmask);
723                                 if (tmpsize > dmat->maxsegsz)
724                                         tmpsize = dmat->maxsegsz;
725                                 KKASSERT(tmpsize < sg->ds_len);
726                         } else if (sg->ds_len > dmat->maxsegsz) {
727                                 tmpsize = dmat->maxsegsz;
728                         } else {
729                                 break;
730                         }
731
732                         /*
733                          * Futz, split the data into a new segment.
734                          */
735                         if (seg >= nsegments)
736                                 goto fail;
737                         sg[1].ds_len = sg[0].ds_len - tmpsize;
738                         sg[1].ds_addr = sg[0].ds_addr + tmpsize;
739                         sg[0].ds_len = tmpsize;
740                         sg++;
741                         seg++;
742                 }
743
744                 /*
745                  * Adjust for loop
746                  */
747                 buflen -= size;
748                 vaddr += size;
749         } while (buflen > 0);
750 fail:
751         if (buflen != 0)
752                 error = EFBIG;
753
754         *segp = seg;
755         *lastpaddrp = nextpaddr;
756
757 free_bounce:
758         if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) &&
759             map != &nobounce_dmamap) {
760                 _bus_dmamap_unload(dmat, map);
761                 return_bounce_pages(dmat, map);
762         }
763         return error;
764 }
765
766 /*
767  * Map the buffer buf into bus space using the dmamap map.
768  */
769 int
770 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
771                 bus_size_t buflen, bus_dmamap_callback_t *callback,
772                 void *callback_arg, int flags)
773 {
774         bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
775         bus_dma_segment_t *segments;
776         vm_paddr_t lastaddr = 0;
777         int error, nsegs = 1;
778
779         if (map != NULL) {
780                 /*
781                  * XXX
782                  * Follow old semantics.  Once all of the callers are fixed,
783                  * we should get rid of these internal flag "adjustment".
784                  */
785                 flags &= ~BUS_DMA_NOWAIT;
786                 flags |= BUS_DMA_WAITOK;
787
788                 map->callback = callback;
789                 map->callback_arg = callback_arg;
790         }
791
792         segments = bus_dma_tag_lock(dmat, cache_segments);
793         error = _bus_dmamap_load_buffer(dmat, map, buf, buflen,
794                         segments, dmat->nsegments,
795                         NULL, flags, &lastaddr, &nsegs, 1);
796         if (error == EINPROGRESS) {
797                 bus_dma_tag_unlock(dmat);
798                 return error;
799         }
800
801         callback(callback_arg, segments, nsegs, error);
802         callback(callback_arg, segments, nsegs, error);
803         bus_dma_tag_unlock(dmat);
804         return 0;
805 }
806
807 /*
808  * Like _bus_dmamap_load(), but for mbufs.
809  */
810 int
811 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
812                      struct mbuf *m0,
813                      bus_dmamap_callback2_t *callback, void *callback_arg,
814                      int flags)
815 {
816         bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
817         bus_dma_segment_t *segments;
818         int nsegs, error;
819
820         /*
821          * XXX
822          * Follow old semantics.  Once all of the callers are fixed,
823          * we should get rid of these internal flag "adjustment".
824          */
825         flags &= ~BUS_DMA_WAITOK;
826         flags |= BUS_DMA_NOWAIT;
827
828         segments = bus_dma_tag_lock(dmat, cache_segments);
829         error = bus_dmamap_load_mbuf_segment(dmat, map, m0,
830                         segments, dmat->nsegments, &nsegs, flags);
831         if (error) {
832                 /* force "no valid mappings" in callback */
833                 callback(callback_arg, segments, 0,
834                          0, error);
835         } else {
836                 callback(callback_arg, segments, nsegs,
837                          m0->m_pkthdr.len, error);
838         }
839         bus_dma_tag_unlock(dmat);
840         return error;
841 }
842
843 int
844 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map,
845                              struct mbuf *m0,
846                              bus_dma_segment_t *segs, int maxsegs,
847                              int *nsegs, int flags)
848 {
849         int error;
850
851         M_ASSERTPKTHDR(m0);
852
853         KASSERT(maxsegs >= 1, ("invalid maxsegs %d\n", maxsegs));
854         KASSERT(maxsegs <= dmat->nsegments,
855                 ("%d too many segments, dmat only support %d segments\n",
856                  maxsegs, dmat->nsegments));
857         KASSERT(flags & BUS_DMA_NOWAIT,
858                 ("only BUS_DMA_NOWAIT is supported\n"));
859
860         if (m0->m_pkthdr.len <= dmat->maxsize) {
861                 int first = 1;
862                 vm_paddr_t lastaddr = 0;
863                 struct mbuf *m;
864
865                 *nsegs = 1;
866                 error = 0;
867                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
868                         if (m->m_len == 0)
869                                 continue;
870
871                         error = _bus_dmamap_load_buffer(dmat, map,
872                                         m->m_data, m->m_len,
873                                         segs, maxsegs,
874                                         NULL, flags, &lastaddr,
875                                         nsegs, first);
876                         if (error == ENOMEM && !first) {
877                                 /*
878                                  * Out of bounce pages due to too many
879                                  * fragments in the mbuf chain; return
880                                  * EFBIG instead.
881                                  */
882                                 error = EFBIG;
883                         }
884                         first = 0;
885                 }
886 #ifdef INVARIANTS
887                 if (!error)
888                         KKASSERT(*nsegs <= maxsegs && *nsegs >= 1);
889 #endif
890         } else {
891                 *nsegs = 0;
892                 error = EINVAL;
893         }
894         KKASSERT(error != EINPROGRESS);
895         return error;
896 }
897
898 /*
899  * Like _bus_dmamap_load(), but for uios.
900  */
901 int
902 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
903                     struct uio *uio,
904                     bus_dmamap_callback2_t *callback, void *callback_arg,
905                     int flags)
906 {
907         vm_paddr_t lastaddr;
908         int nsegs, error, first, i;
909         bus_size_t resid;
910         struct iovec *iov;
911         pmap_t pmap;
912         bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
913         bus_dma_segment_t *segments;
914         bus_dma_segment_t *segs;
915         int nsegs_left;
916
917         if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS)
918                 segments = cache_segments;
919         else
920                 segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments,
921                                    M_DEVBUF, M_WAITOK | M_ZERO);
922
923         /*
924          * XXX
925          * Follow old semantics.  Once all of the callers are fixed,
926          * we should get rid of these internal flag "adjustment".
927          */
928         flags &= ~BUS_DMA_WAITOK;
929         flags |= BUS_DMA_NOWAIT;
930
931         resid = uio->uio_resid;
932         iov = uio->uio_iov;
933
934         segs = segments;
935         nsegs_left = dmat->nsegments;
936
937         if (uio->uio_segflg == UIO_USERSPACE) {
938                 struct thread *td;
939
940                 td = uio->uio_td;
941                 KASSERT(td != NULL && td->td_proc != NULL,
942                         ("bus_dmamap_load_uio: USERSPACE but no proc"));
943                 pmap = vmspace_pmap(td->td_proc->p_vmspace);
944         } else {
945                 pmap = NULL;
946         }
947
948         error = 0;
949         nsegs = 1;
950         first = 1;
951         lastaddr = 0;
952         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
953                 /*
954                  * Now at the first iovec to load.  Load each iovec
955                  * until we have exhausted the residual count.
956                  */
957                 bus_size_t minlen =
958                         resid < iov[i].iov_len ? resid : iov[i].iov_len;
959                 caddr_t addr = (caddr_t) iov[i].iov_base;
960
961                 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen,
962                                 segs, nsegs_left,
963                                 pmap, flags, &lastaddr, &nsegs, first);
964                 first = 0;
965
966                 resid -= minlen;
967                 if (error == 0) {
968                         nsegs_left -= nsegs;
969                         segs += nsegs;
970                 }
971         }
972
973         /*
974          * Minimum one DMA segment, even if 0-length buffer.
975          */
976         if (nsegs_left == dmat->nsegments)
977                 --nsegs_left;
978
979         if (error) {
980                 /* force "no valid mappings" in callback */
981                 callback(callback_arg, segments, 0,
982                          0, error);
983         } else {
984                 callback(callback_arg, segments, dmat->nsegments - nsegs_left,
985                          uio->uio_resid, error);
986         }
987         if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS)
988                 kfree(segments, M_DEVBUF);
989         return error;
990 }
991
992 /*
993  * Release the mapping held by map.
994  */
995 void
996 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
997 {
998         struct bounce_page *bpage;
999
1000         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1001                 STAILQ_REMOVE_HEAD(&map->bpages, links);
1002                 free_bounce_page(dmat, bpage);
1003         }
1004 }
1005
1006 void
1007 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1008 {
1009         struct bounce_page *bpage;
1010
1011         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1012                 /*
1013                  * Handle data bouncing.  We might also
1014                  * want to add support for invalidating
1015                  * the caches on broken hardware
1016                  */
1017                 switch (op) {
1018                 case BUS_DMASYNC_PREWRITE:
1019                         while (bpage != NULL) {
1020                                 bcopy((void *)bpage->datavaddr,
1021                                       (void *)bpage->vaddr,
1022                                       bpage->datacount);
1023                                 bpage = STAILQ_NEXT(bpage, links);
1024                         }
1025                         dmat->bounce_zone->total_bounced++;
1026                         break;
1027
1028                 case BUS_DMASYNC_POSTREAD:
1029                         while (bpage != NULL) {
1030                                 bcopy((void *)bpage->vaddr,
1031                                       (void *)bpage->datavaddr,
1032                                       bpage->datacount);
1033                                 bpage = STAILQ_NEXT(bpage, links);
1034                         }
1035                         dmat->bounce_zone->total_bounced++;
1036                         break;
1037
1038                 case BUS_DMASYNC_PREREAD:
1039                 case BUS_DMASYNC_POSTWRITE:
1040                         /* No-ops */
1041                         break;
1042                 }
1043         }
1044 }
1045
1046 static int
1047 alloc_bounce_zone(bus_dma_tag_t dmat)
1048 {
1049         struct bounce_zone *bz, *new_bz;
1050         lwkt_tokref ref;
1051
1052         KASSERT(dmat->bounce_zone == NULL,
1053                 ("bounce zone was already assigned\n"));
1054
1055         new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO);
1056
1057         lwkt_gettoken(&ref, &bounce_zone_tok);
1058
1059         /* Check to see if we already have a suitable zone */
1060         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1061                 if (dmat->alignment <= bz->alignment &&
1062                     dmat->lowaddr >= bz->lowaddr) {
1063                         lwkt_reltoken(&ref);
1064
1065                         dmat->bounce_zone = bz;
1066                         kfree(new_bz, M_DEVBUF);
1067                         return 0;
1068                 }
1069         }
1070         bz = new_bz;
1071
1072 #ifdef SMP
1073         spin_init(&bz->spin);
1074 #endif
1075         STAILQ_INIT(&bz->bounce_page_list);
1076         STAILQ_INIT(&bz->bounce_map_waitinglist);
1077         bz->free_bpages = 0;
1078         bz->reserved_bpages = 0;
1079         bz->active_bpages = 0;
1080         bz->lowaddr = dmat->lowaddr;
1081         bz->alignment = round_page(dmat->alignment);
1082         ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1083         busdma_zonecount++;
1084         ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1085         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1086
1087         lwkt_reltoken(&ref);
1088
1089         dmat->bounce_zone = bz;
1090
1091         sysctl_ctx_init(&bz->sysctl_ctx);
1092         bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx,
1093             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1094             CTLFLAG_RD, 0, "");
1095         if (bz->sysctl_tree == NULL) {
1096                 sysctl_ctx_free(&bz->sysctl_ctx);
1097                 return 0;       /* XXX error code? */
1098         }
1099
1100         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1101             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1102             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1103             "Total bounce pages");
1104         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1105             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1106             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1107             "Free bounce pages");
1108         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1109             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1110             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1111             "Reserved bounce pages");
1112         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1113             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1114             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1115             "Active bounce pages");
1116         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1117             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1118             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1119             "Total bounce requests");
1120         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1121             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1122             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1123             "Total bounce requests that were deferred");
1124         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1125             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1126             "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0,
1127             "Total bounce page reservations that were failed");
1128         SYSCTL_ADD_STRING(&bz->sysctl_ctx,
1129             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1130             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1131         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1132             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1133             "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1134
1135         return 0;
1136 }
1137
1138 static int
1139 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
1140 {
1141         struct bounce_zone *bz = dmat->bounce_zone;
1142         int count = 0, mflags;
1143
1144         if (flags & BUS_DMA_NOWAIT)
1145                 mflags = M_NOWAIT;
1146         else
1147                 mflags = M_WAITOK;
1148
1149         while (numpages > 0) {
1150                 struct bounce_page *bpage;
1151
1152                 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO);
1153
1154                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1155                                                          mflags, 0ul,
1156                                                          bz->lowaddr,
1157                                                          bz->alignment, 0);
1158                 if (bpage->vaddr == 0) {
1159                         kfree(bpage, M_DEVBUF);
1160                         break;
1161                 }
1162                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1163
1164                 BZ_LOCK(bz);
1165                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1166                 total_bounce_pages++;
1167                 bz->total_bpages++;
1168                 bz->free_bpages++;
1169                 BZ_UNLOCK(bz);
1170
1171                 count++;
1172                 numpages--;
1173         }
1174         return count;
1175 }
1176
1177 /* Assume caller holds bounce zone spinlock */
1178 static int
1179 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1180 {
1181         struct bounce_zone *bz = dmat->bounce_zone;
1182         int pages;
1183
1184         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1185         if (!commit && map->pagesneeded > (map->pagesreserved + pages)) {
1186                 bz->reserve_failed++;
1187                 return (map->pagesneeded - (map->pagesreserved + pages));
1188         }
1189
1190         bz->free_bpages -= pages;
1191
1192         bz->reserved_bpages += pages;
1193         KKASSERT(bz->reserved_bpages <= bz->total_bpages);
1194
1195         map->pagesreserved += pages;
1196         pages = map->pagesneeded - map->pagesreserved;
1197
1198         return pages;
1199 }
1200
1201 static void
1202 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
1203 {
1204         struct bounce_zone *bz = dmat->bounce_zone;
1205         int reserved = map->pagesreserved;
1206         bus_dmamap_t wait_map;
1207
1208         map->pagesreserved = 0;
1209         map->pagesneeded = 0;
1210
1211         if (reserved == 0)
1212                 return;
1213
1214         BZ_LOCK(bz);
1215
1216         bz->free_bpages += reserved;
1217         KKASSERT(bz->free_bpages <= bz->total_bpages);
1218
1219         KKASSERT(bz->reserved_bpages >= reserved);
1220         bz->reserved_bpages -= reserved;
1221
1222         wait_map = get_map_waiting(dmat);
1223
1224         BZ_UNLOCK(bz);
1225
1226         if (wait_map != NULL)
1227                 add_map_callback(map);
1228 }
1229
1230 static bus_addr_t
1231 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1232                 bus_size_t size)
1233 {
1234         struct bounce_zone *bz = dmat->bounce_zone;
1235         struct bounce_page *bpage;
1236
1237         KASSERT(map->pagesneeded > 0, ("map doesn't need any pages"));
1238         map->pagesneeded--;
1239
1240         KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages"));
1241         map->pagesreserved--;
1242
1243         BZ_LOCK(bz);
1244
1245         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1246         KASSERT(bpage != NULL, ("free page list is empty"));
1247         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1248
1249         KKASSERT(bz->reserved_bpages > 0);
1250         bz->reserved_bpages--;
1251
1252         bz->active_bpages++;
1253         KKASSERT(bz->active_bpages <= bz->total_bpages);
1254
1255         BZ_UNLOCK(bz);
1256
1257         bpage->datavaddr = vaddr;
1258         bpage->datacount = size;
1259         STAILQ_INSERT_TAIL(&map->bpages, bpage, links);
1260         return bpage->busaddr;
1261 }
1262
1263 static void
1264 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1265 {
1266         struct bounce_zone *bz = dmat->bounce_zone;
1267         bus_dmamap_t map;
1268
1269         bpage->datavaddr = 0;
1270         bpage->datacount = 0;
1271
1272         BZ_LOCK(bz);
1273
1274         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1275
1276         bz->free_bpages++;
1277         KKASSERT(bz->free_bpages <= bz->total_bpages);
1278
1279         KKASSERT(bz->active_bpages > 0);
1280         bz->active_bpages--;
1281
1282         map = get_map_waiting(dmat);
1283
1284         BZ_UNLOCK(bz);
1285
1286         if (map != NULL)
1287                 add_map_callback(map);
1288 }
1289
1290 /* Assume caller holds bounce zone spinlock */
1291 static bus_dmamap_t
1292 get_map_waiting(bus_dma_tag_t dmat)
1293 {
1294         struct bounce_zone *bz = dmat->bounce_zone;
1295         bus_dmamap_t map;
1296
1297         map = STAILQ_FIRST(&bz->bounce_map_waitinglist);
1298         if (map != NULL) {
1299                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1300                         STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
1301                         bz->total_deferred++;
1302                 } else {
1303                         map = NULL;
1304                 }
1305         }
1306         return map;
1307 }
1308
1309 static void
1310 add_map_callback(bus_dmamap_t map)
1311 {
1312         /* XXX callbacklist is not MPSAFE */
1313         crit_enter();
1314         get_mplock();
1315         STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
1316         busdma_swi_pending = 1;
1317         setsoftvm();
1318         rel_mplock();
1319         crit_exit();
1320 }
1321
1322 void
1323 busdma_swi(void)
1324 {
1325         bus_dmamap_t map;
1326
1327         crit_enter();
1328         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1329                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1330                 crit_exit();
1331                 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1332                                 map->callback, map->callback_arg, /*flags*/0);
1333                 crit_enter();
1334         }
1335         crit_exit();
1336 }