kernel - MPSAFE work - Add global tokens
[dragonfly.git] / sys / platform / pc64 / x86_64 / busdma_machdep.c
1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
27  * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
28  */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/uio.h>
35 #include <sys/bus_dma.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39
40 #include <sys/thread2.h>
41 #include <sys/spinlock2.h>
42 #include <sys/mplock2.h>
43
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46
47 /* XXX needed for to access pmap to convert per-proc virtual to physical */
48 #include <sys/proc.h>
49 #include <sys/lock.h>
50 #include <vm/vm_map.h>
51
52 #include <machine/md_var.h>
53
54 #define MAX_BPAGES      1024
55
56 /*
57  * 16 x N declared on stack.
58  */
59 #define BUS_DMA_CACHE_SEGMENTS  8
60
61 struct bounce_zone;
62 struct bus_dmamap;
63
64 struct bus_dma_tag {
65         bus_dma_tag_t   parent;
66         bus_size_t      alignment;
67         bus_size_t      boundary;
68         bus_addr_t      lowaddr;
69         bus_addr_t      highaddr;
70         bus_dma_filter_t *filter;
71         void            *filterarg;
72         bus_size_t      maxsize;
73         u_int           nsegments;
74         bus_size_t      maxsegsz;
75         int             flags;
76         int             ref_count;
77         int             map_count;
78         bus_dma_segment_t *segments;
79         struct bounce_zone *bounce_zone;
80 #ifdef SMP
81         struct spinlock spin;
82 #else
83         int             unused0;
84 #endif
85 };
86
87 /*
88  * bus_dma_tag private flags
89  */
90 #define BUS_DMA_BOUNCE_ALIGN    BUS_DMA_BUS2
91 #define BUS_DMA_BOUNCE_LOWADDR  BUS_DMA_BUS3
92 #define BUS_DMA_MIN_ALLOC_COMP  BUS_DMA_BUS4
93
94 #define BUS_DMA_COULD_BOUNCE    (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
95
96 #define BUS_DMAMEM_KMALLOC(dmat) \
97         ((dmat)->maxsize <= PAGE_SIZE && \
98          (dmat)->alignment <= PAGE_SIZE && \
99          (dmat)->lowaddr >= ptoa(Maxmem))
100
101 struct bounce_page {
102         vm_offset_t     vaddr;          /* kva of bounce buffer */
103         bus_addr_t      busaddr;        /* Physical address */
104         vm_offset_t     datavaddr;      /* kva of client data */
105         bus_size_t      datacount;      /* client data count */
106         STAILQ_ENTRY(bounce_page) links;
107 };
108
109 struct bounce_zone {
110         STAILQ_ENTRY(bounce_zone) links;
111         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112         STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
113 #ifdef SMP
114         struct spinlock spin;
115 #else
116         int             unused0;
117 #endif
118         int             total_bpages;
119         int             free_bpages;
120         int             reserved_bpages;
121         int             active_bpages;
122         int             total_bounced;
123         int             total_deferred;
124         int             reserve_failed;
125         bus_size_t      alignment;
126         bus_addr_t      lowaddr;
127         char            zoneid[8];
128         char            lowaddrid[20];
129         struct sysctl_ctx_list sysctl_ctx;
130         struct sysctl_oid *sysctl_tree;
131 };
132
133 #ifdef SMP
134 #define BZ_LOCK(bz)     spin_lock_wr(&(bz)->spin)
135 #define BZ_UNLOCK(bz)   spin_unlock_wr(&(bz)->spin)
136 #else
137 #define BZ_LOCK(bz)     crit_enter()
138 #define BZ_UNLOCK(bz)   crit_exit()
139 #endif
140
141 static struct lwkt_token bounce_zone_tok = LWKT_TOKEN_MP_INITIALIZER;
142 static int busdma_zonecount;
143 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
144         STAILQ_HEAD_INITIALIZER(bounce_zone_list);
145
146 int busdma_swi_pending;
147 static int total_bounce_pages;
148 static int max_bounce_pages = MAX_BPAGES;
149 static int bounce_alignment = 1; /* XXX temporary */
150
151 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages);
152 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment);
153
154 struct bus_dmamap {
155         struct bp_list  bpages;
156         int             pagesneeded;
157         int             pagesreserved;
158         bus_dma_tag_t   dmat;
159         void            *buf;           /* unmapped buffer pointer */
160         bus_size_t      buflen;         /* unmapped buffer length */
161         bus_dmamap_callback_t *callback;
162         void            *callback_arg;
163         STAILQ_ENTRY(bus_dmamap) links;
164 };
165
166 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
167         STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
168
169 static struct bus_dmamap nobounce_dmamap;
170
171 static int              alloc_bounce_zone(bus_dma_tag_t);
172 static int              alloc_bounce_pages(bus_dma_tag_t, u_int, int);
173 static int              reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
174 static void             return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
175 static bus_addr_t       add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
176                             vm_offset_t, bus_size_t);
177 static void             free_bounce_page(bus_dma_tag_t, struct bounce_page *);
178
179 static bus_dmamap_t     get_map_waiting(bus_dma_tag_t);
180 static void             add_map_callback(bus_dmamap_t);
181
182 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
183 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages,
184            0, "Total bounce pages");
185 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages,
186            0, "Max bounce pages per bounce zone");
187 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD,
188            &bounce_alignment, 0, "Obey alignment constraint");
189
190 static __inline int
191 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
192 {
193         int retval;
194
195         retval = 0;
196         do {
197                 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
198                      (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0))
199                  && (dmat->filter == NULL ||
200                      dmat->filter(dmat->filterarg, paddr) != 0))
201                         retval = 1;
202
203                 dmat = dmat->parent;
204         } while (retval == 0 && dmat != NULL);
205         return (retval);
206 }
207
208 static __inline
209 bus_dma_segment_t *
210 bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache)
211 {
212         if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS)
213                 return(cache);
214 #ifdef SMP
215         spin_lock_wr(&tag->spin);
216 #endif
217         return(tag->segments);
218 }
219
220 static __inline
221 void
222 bus_dma_tag_unlock(bus_dma_tag_t tag)
223 {
224 #ifdef SMP
225         if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS)
226                 spin_unlock_wr(&tag->spin);
227 #endif
228 }
229
230 /*
231  * Allocate a device specific dma_tag.
232  */
233 int
234 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
235                    bus_size_t boundary, bus_addr_t lowaddr,
236                    bus_addr_t highaddr, bus_dma_filter_t *filter,
237                    void *filterarg, bus_size_t maxsize, int nsegments,
238                    bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
239 {
240         bus_dma_tag_t newtag;
241         int error = 0;
242
243         /*
244          * Sanity checks
245          */
246
247         if (alignment == 0)
248                 alignment = 1;
249         if (alignment & (alignment - 1))
250                 panic("alignment must be power of 2\n");
251
252         if (boundary != 0) {
253                 if (boundary & (boundary - 1))
254                         panic("boundary must be power of 2\n");
255                 if (boundary < maxsegsz) {
256                         kprintf("boundary < maxsegsz:\n");
257                         print_backtrace(-1);
258                         maxsegsz = boundary;
259                 }
260         }
261
262         /* Return a NULL tag on failure */
263         *dmat = NULL;
264
265         newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
266
267 #ifdef SMP
268         spin_init(&newtag->spin);
269 #endif
270         newtag->parent = parent;
271         newtag->alignment = alignment;
272         newtag->boundary = boundary;
273         newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
274         newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
275         newtag->filter = filter;
276         newtag->filterarg = filterarg;
277         newtag->maxsize = maxsize;
278         newtag->nsegments = nsegments;
279         newtag->maxsegsz = maxsegsz;
280         newtag->flags = flags;
281         newtag->ref_count = 1; /* Count ourself */
282         newtag->map_count = 0;
283         newtag->segments = NULL;
284         newtag->bounce_zone = NULL;
285
286         /* Take into account any restrictions imposed by our parent tag */
287         if (parent != NULL) {
288                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
289                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
290
291                 if (newtag->boundary == 0) {
292                         newtag->boundary = parent->boundary;
293                 } else if (parent->boundary != 0) {
294                         newtag->boundary = MIN(parent->boundary,
295                                                newtag->boundary);
296                 }
297
298 #ifdef notyet
299                 newtag->alignment = MAX(parent->alignment, newtag->alignment);
300 #endif
301
302                 if (newtag->filter == NULL) {
303                         /*
304                          * Short circuit looking at our parent directly
305                          * since we have encapsulated all of its information
306                          */
307                         newtag->filter = parent->filter;
308                         newtag->filterarg = parent->filterarg;
309                         newtag->parent = parent->parent;
310                 }
311                 if (newtag->parent != NULL)
312                         parent->ref_count++;
313         }
314
315         if (newtag->lowaddr < ptoa(Maxmem))
316                 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR;
317         if (bounce_alignment && newtag->alignment > 1 &&
318             !(newtag->flags & BUS_DMA_ALIGNED))
319                 newtag->flags |= BUS_DMA_BOUNCE_ALIGN;
320
321         if ((newtag->flags & BUS_DMA_COULD_BOUNCE) &&
322             (flags & BUS_DMA_ALLOCNOW) != 0) {
323                 struct bounce_zone *bz;
324
325                 /* Must bounce */
326
327                 error = alloc_bounce_zone(newtag);
328                 if (error)
329                         goto back;
330                 bz = newtag->bounce_zone;
331
332                 if (ptoa(bz->total_bpages) < maxsize) {
333                         int pages;
334
335                         if (flags & BUS_DMA_ONEBPAGE) {
336                                 pages = 1;
337                         } else {
338                                 pages = atop(round_page(maxsize)) -
339                                         bz->total_bpages;
340                                 pages = MAX(pages, 1);
341                         }
342
343                         /* Add pages to our bounce pool */
344                         if (alloc_bounce_pages(newtag, pages, flags) < pages)
345                                 error = ENOMEM;
346
347                         /* Performed initial allocation */
348                         newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
349                 }
350         }
351 back:
352         if (error)
353                 kfree(newtag, M_DEVBUF);
354         else
355                 *dmat = newtag;
356         return error;
357 }
358
359 int
360 bus_dma_tag_destroy(bus_dma_tag_t dmat)
361 {
362         if (dmat != NULL) {
363                 if (dmat->map_count != 0)
364                         return (EBUSY);
365
366                 while (dmat != NULL) {
367                         bus_dma_tag_t parent;
368
369                         parent = dmat->parent;
370                         dmat->ref_count--;
371                         if (dmat->ref_count == 0) {
372                                 if (dmat->segments != NULL)
373                                         kfree(dmat->segments, M_DEVBUF);
374                                 kfree(dmat, M_DEVBUF);
375                                 /*
376                                  * Last reference count, so
377                                  * release our reference
378                                  * count on our parent.
379                                  */
380                                 dmat = parent;
381                         } else
382                                 dmat = NULL;
383                 }
384         }
385         return (0);
386 }
387
388 bus_size_t
389 bus_dma_tag_getmaxsize(bus_dma_tag_t tag)
390 {
391         return(tag->maxsize);
392 }
393
394 /*
395  * Allocate a handle for mapping from kva/uva/physical
396  * address space into bus device space.
397  */
398 int
399 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
400 {
401         int error;
402
403         error = 0;
404
405         if (dmat->segments == NULL) {
406                 KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
407                 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
408                                         dmat->nsegments, M_DEVBUF, M_INTWAIT);
409         }
410
411         if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
412                 struct bounce_zone *bz;
413                 int maxpages;
414
415                 /* Must bounce */
416
417                 if (dmat->bounce_zone == NULL) {
418                         error = alloc_bounce_zone(dmat);
419                         if (error)
420                                 return error;
421                 }
422                 bz = dmat->bounce_zone;
423
424                 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
425
426                 /* Initialize the new map */
427                 STAILQ_INIT(&((*mapp)->bpages));
428
429                 /*
430                  * Attempt to add pages to our pool on a per-instance
431                  * basis up to a sane limit.
432                  */
433                 if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
434                         maxpages = max_bounce_pages;
435                 } else {
436                         maxpages = MIN(max_bounce_pages,
437                                        Maxmem - atop(dmat->lowaddr));
438                 }
439                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
440                  || (dmat->map_count > 0
441                   && bz->total_bpages < maxpages)) {
442                         int pages;
443
444                         if (flags & BUS_DMA_ONEBPAGE) {
445                                 pages = 1;
446                         } else {
447                                 pages = atop(round_page(dmat->maxsize));
448                                 pages = MIN(maxpages - bz->total_bpages, pages);
449                                 pages = MAX(pages, 1);
450                         }
451                         if (alloc_bounce_pages(dmat, pages, flags) < pages)
452                                 error = ENOMEM;
453
454                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
455                                 if (!error)
456                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
457                         } else {
458                                 error = 0;
459                         }
460                 }
461         } else {
462                 *mapp = NULL;
463         }
464         if (!error)
465                 dmat->map_count++;
466         return error;
467 }
468
469 /*
470  * Destroy a handle for mapping from kva/uva/physical
471  * address space into bus device space.
472  */
473 int
474 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
475 {
476         if (map != NULL) {
477                 if (STAILQ_FIRST(&map->bpages) != NULL)
478                         return (EBUSY);
479                 kfree(map, M_DEVBUF);
480         }
481         dmat->map_count--;
482         return (0);
483 }
484
485 static __inline bus_size_t
486 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify)
487 {
488         bus_size_t maxsize = 0;
489         uintptr_t vaddr = (uintptr_t)vaddr0;
490
491         if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) {
492                 if (verify || bootverbose)
493                         kprintf("boundary check failed\n");
494                 if (verify)
495                         print_backtrace(-1); /* XXX panic */
496                 maxsize = dmat->maxsize;
497         }
498         if (vaddr & (dmat->alignment - 1)) {
499                 if (verify || bootverbose)
500                         kprintf("alignment check failed\n");
501                 if (verify)
502                         print_backtrace(-1); /* XXX panic */
503                 if (dmat->maxsize < dmat->alignment)
504                         maxsize = dmat->alignment;
505                 else
506                         maxsize = dmat->maxsize;
507         }
508         return maxsize;
509 }
510
511 /*
512  * Allocate a piece of memory that can be efficiently mapped into
513  * bus device space based on the constraints lited in the dma tag.
514  *
515  * mapp is degenerate.  By definition this allocation should not require
516  * bounce buffers so do not allocate a dma map.
517  */
518 int
519 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
520                  bus_dmamap_t *mapp)
521 {
522         int mflags;
523
524         /* If we succeed, no mapping/bouncing will be required */
525         *mapp = NULL;
526
527         if (dmat->segments == NULL) {
528                 KKASSERT(dmat->nsegments < 16384);
529                 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
530                                         dmat->nsegments, M_DEVBUF, M_INTWAIT);
531         }
532
533         if (flags & BUS_DMA_NOWAIT)
534                 mflags = M_NOWAIT;
535         else
536                 mflags = M_WAITOK;
537         if (flags & BUS_DMA_ZERO)
538                 mflags |= M_ZERO;
539
540         if (BUS_DMAMEM_KMALLOC(dmat)) {
541                 bus_size_t maxsize;
542
543                 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
544
545                 /*
546                  * XXX
547                  * Check whether the allocation
548                  * - crossed a page boundary
549                  * - was not aligned
550                  * Retry with power-of-2 alignment in the above cases.
551                  */
552                 maxsize = check_kmalloc(dmat, *vaddr, 0);
553                 if (maxsize) {
554                         size_t size;
555
556                         kfree(*vaddr, M_DEVBUF);
557                         /* XXX check for overflow? */
558                         for (size = 1; size <= maxsize; size <<= 1)
559                                 ;
560                         *vaddr = kmalloc(size, M_DEVBUF, mflags);
561                         check_kmalloc(dmat, *vaddr, 1);
562                 }
563         } else {
564                 /*
565                  * XXX Use Contigmalloc until it is merged into this facility
566                  *     and handles multi-seg allocations.  Nobody is doing
567                  *     multi-seg allocations yet though.
568                  */
569                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
570                     0ul, dmat->lowaddr, dmat->alignment, dmat->boundary);
571         }
572         if (*vaddr == NULL)
573                 return (ENOMEM);
574         return (0);
575 }
576
577 /*
578  * Free a piece of memory and it's allociated dmamap, that was allocated
579  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
580  */
581 void
582 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
583 {
584         /*
585          * dmamem does not need to be bounced, so the map should be
586          * NULL
587          */
588         if (map != NULL)
589                 panic("bus_dmamem_free: Invalid map freed\n");
590         if (BUS_DMAMEM_KMALLOC(dmat))
591                 kfree(vaddr, M_DEVBUF);
592         else
593                 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
594 }
595
596 static __inline vm_paddr_t
597 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr)
598 {
599         if (pmap)
600                 return pmap_extract(pmap, vaddr);
601         else
602                 return pmap_kextract(vaddr);
603 }
604
605 /*
606  * Utility function to load a linear buffer.  lastaddrp holds state
607  * between invocations (for multiple-buffer loads).  segp contains
608  * the segment following the starting one on entrace, and the ending
609  * segment on exit.  first indicates if this is the first invocation
610  * of this function.
611  */
612 static int
613 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
614                         bus_dmamap_t map,
615                         void *buf, bus_size_t buflen,
616                         bus_dma_segment_t *segments,
617                         int nsegments,
618                         pmap_t pmap,
619                         int flags,
620                         vm_paddr_t *lastpaddrp,
621                         int *segp,
622                         int first)
623 {
624         vm_offset_t vaddr;
625         vm_paddr_t paddr, nextpaddr;
626         bus_dma_segment_t *sg;
627         bus_addr_t bmask;
628         int seg, error = 0;
629
630         if (map == NULL)
631                 map = &nobounce_dmamap;
632
633 #ifdef INVARIANTS
634         if (dmat->flags & BUS_DMA_ALIGNED)
635                 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0);
636 #endif
637
638         /*
639          * If we are being called during a callback, pagesneeded will
640          * be non-zero, so we can avoid doing the work twice.
641          */
642         if ((dmat->flags & BUS_DMA_COULD_BOUNCE) &&
643             map != &nobounce_dmamap && map->pagesneeded == 0) {
644                 vm_offset_t vendaddr;
645
646                 /*
647                  * Count the number of bounce pages
648                  * needed in order to complete this transfer
649                  */
650                 vaddr = (vm_offset_t)buf;
651                 vendaddr = (vm_offset_t)buf + buflen;
652
653                 while (vaddr < vendaddr) {
654                         paddr = _bus_dma_extract(pmap, vaddr);
655                         if (run_filter(dmat, paddr) != 0)
656                                 map->pagesneeded++;
657                         vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
658                 }
659         }
660
661         /* Reserve Necessary Bounce Pages */
662         if (map->pagesneeded != 0) {
663                 struct bounce_zone *bz;
664
665                 bz = dmat->bounce_zone;
666                 BZ_LOCK(bz);
667                 if (flags & BUS_DMA_NOWAIT) {
668                         if (reserve_bounce_pages(dmat, map, 0) != 0) {
669                                 BZ_UNLOCK(bz);
670                                 error = ENOMEM;
671                                 goto free_bounce;
672                         }
673                 } else {
674                         if (reserve_bounce_pages(dmat, map, 1) != 0) {
675                                 /* Queue us for resources */
676                                 map->dmat = dmat;
677                                 map->buf = buf;
678                                 map->buflen = buflen;
679
680                                 STAILQ_INSERT_TAIL(
681                                     &dmat->bounce_zone->bounce_map_waitinglist,
682                                     map, links);
683                                 BZ_UNLOCK(bz);
684
685                                 return (EINPROGRESS);
686                         }
687                 }
688                 BZ_UNLOCK(bz);
689         }
690
691         KKASSERT(*segp >= 1 && *segp <= nsegments);
692         seg = *segp;
693         sg = &segments[seg - 1];
694
695         vaddr = (vm_offset_t)buf;
696         nextpaddr = *lastpaddrp;
697         bmask = ~(dmat->boundary - 1);  /* note: will be 0 if boundary is 0 */
698
699         /* force at least one segment */
700         do {
701                 bus_size_t size;
702
703                 /*
704                  * Per-page main loop
705                  */
706                 paddr = _bus_dma_extract(pmap, vaddr);
707                 size = PAGE_SIZE - (paddr & PAGE_MASK);
708                 if (size > buflen)
709                         size = buflen;
710                 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
711                         /*
712                          * note: this paddr has the same in-page offset
713                          * as vaddr and thus the paddr above, so the
714                          * size does not have to be recalculated
715                          */
716                         paddr = add_bounce_page(dmat, map, vaddr, size);
717                 }
718
719                 /*
720                  * Fill in the bus_dma_segment
721                  */
722                 if (first) {
723                         sg->ds_addr = paddr;
724                         sg->ds_len = size;
725                         first = 0;
726                 } else if (paddr == nextpaddr) {
727                         sg->ds_len += size;
728                 } else {
729                         sg++;
730                         seg++;
731                         if (seg > nsegments)
732                                 break;
733                         sg->ds_addr = paddr;
734                         sg->ds_len = size;
735                 }
736                 nextpaddr = paddr + size;
737
738                 /*
739                  * Handle maxsegsz and boundary issues with a nested loop
740                  */
741                 for (;;) {
742                         bus_size_t tmpsize;
743
744                         /*
745                          * Limit to the boundary and maximum segment size
746                          */
747                         if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
748                                 tmpsize = dmat->boundary -
749                                           (sg->ds_addr & ~bmask);
750                                 if (tmpsize > dmat->maxsegsz)
751                                         tmpsize = dmat->maxsegsz;
752                                 KKASSERT(tmpsize < sg->ds_len);
753                         } else if (sg->ds_len > dmat->maxsegsz) {
754                                 tmpsize = dmat->maxsegsz;
755                         } else {
756                                 break;
757                         }
758
759                         /*
760                          * Futz, split the data into a new segment.
761                          */
762                         if (seg >= nsegments)
763                                 goto fail;
764                         sg[1].ds_len = sg[0].ds_len - tmpsize;
765                         sg[1].ds_addr = sg[0].ds_addr + tmpsize;
766                         sg[0].ds_len = tmpsize;
767                         sg++;
768                         seg++;
769                 }
770
771                 /*
772                  * Adjust for loop
773                  */
774                 buflen -= size;
775                 vaddr += size;
776         } while (buflen > 0);
777 fail:
778         if (buflen != 0)
779                 error = EFBIG;
780
781         *segp = seg;
782         *lastpaddrp = nextpaddr;
783
784 free_bounce:
785         if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) &&
786             map != &nobounce_dmamap) {
787                 _bus_dmamap_unload(dmat, map);
788                 return_bounce_pages(dmat, map);
789         }
790         return error;
791 }
792
793 /*
794  * Map the buffer buf into bus space using the dmamap map.
795  */
796 int
797 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
798                 bus_size_t buflen, bus_dmamap_callback_t *callback,
799                 void *callback_arg, int flags)
800 {
801         bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
802         bus_dma_segment_t *segments;
803         vm_paddr_t lastaddr = 0;
804         int error, nsegs = 1;
805
806         if (map != NULL) {
807                 /*
808                  * XXX
809                  * Follow old semantics.  Once all of the callers are fixed,
810                  * we should get rid of these internal flag "adjustment".
811                  */
812                 flags &= ~BUS_DMA_NOWAIT;
813                 flags |= BUS_DMA_WAITOK;
814
815                 map->callback = callback;
816                 map->callback_arg = callback_arg;
817         }
818
819         segments = bus_dma_tag_lock(dmat, cache_segments);
820         error = _bus_dmamap_load_buffer(dmat, map, buf, buflen,
821                         segments, dmat->nsegments,
822                         NULL, flags, &lastaddr, &nsegs, 1);
823         if (error == EINPROGRESS) {
824                 bus_dma_tag_unlock(dmat);
825                 return error;
826         }
827         callback(callback_arg, segments, nsegs, error);
828         bus_dma_tag_unlock(dmat);
829         return 0;
830 }
831
832 /*
833  * Like _bus_dmamap_load(), but for mbufs.
834  */
835 int
836 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
837                      struct mbuf *m0,
838                      bus_dmamap_callback2_t *callback, void *callback_arg,
839                      int flags)
840 {
841         bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
842         bus_dma_segment_t *segments;
843         int nsegs, error;
844
845         /*
846          * XXX
847          * Follow old semantics.  Once all of the callers are fixed,
848          * we should get rid of these internal flag "adjustment".
849          */
850         flags &= ~BUS_DMA_WAITOK;
851         flags |= BUS_DMA_NOWAIT;
852
853         segments = bus_dma_tag_lock(dmat, cache_segments);
854         error = bus_dmamap_load_mbuf_segment(dmat, map, m0,
855                         segments, dmat->nsegments, &nsegs, flags);
856         if (error) {
857                 /* force "no valid mappings" in callback */
858                 callback(callback_arg, segments, 0,
859                          0, error);
860         } else {
861                 callback(callback_arg, segments, nsegs,
862                          m0->m_pkthdr.len, error);
863         }
864         bus_dma_tag_unlock(dmat);
865         return error;
866 }
867
868 int
869 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map,
870                              struct mbuf *m0,
871                              bus_dma_segment_t *segs, int maxsegs,
872                              int *nsegs, int flags)
873 {
874         int error;
875
876         M_ASSERTPKTHDR(m0);
877
878         KASSERT(maxsegs >= 1, ("invalid maxsegs %d\n", maxsegs));
879         KASSERT(maxsegs <= dmat->nsegments,
880                 ("%d too many segments, dmat only support %d segments\n",
881                  maxsegs, dmat->nsegments));
882         KASSERT(flags & BUS_DMA_NOWAIT,
883                 ("only BUS_DMA_NOWAIT is supported\n"));
884
885         if (m0->m_pkthdr.len <= dmat->maxsize) {
886                 int first = 1;
887                 vm_paddr_t lastaddr = 0;
888                 struct mbuf *m;
889
890                 *nsegs = 1;
891                 error = 0;
892                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
893                         if (m->m_len == 0)
894                                 continue;
895
896                         error = _bus_dmamap_load_buffer(dmat, map,
897                                         m->m_data, m->m_len,
898                                         segs, maxsegs,
899                                         NULL, flags, &lastaddr,
900                                         nsegs, first);
901                         if (error == ENOMEM && !first) {
902                                 /*
903                                  * Out of bounce pages due to too many
904                                  * fragments in the mbuf chain; return
905                                  * EFBIG instead.
906                                  */
907                                 error = EFBIG;
908                         }
909                         first = 0;
910                 }
911 #ifdef INVARIANTS
912                 if (!error)
913                         KKASSERT(*nsegs <= maxsegs && *nsegs >= 1);
914 #endif
915         } else {
916                 *nsegs = 0;
917                 error = EINVAL;
918         }
919         KKASSERT(error != EINPROGRESS);
920         return error;
921 }
922
923 /*
924  * Like _bus_dmamap_load(), but for uios.
925  */
926 int
927 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
928                     struct uio *uio,
929                     bus_dmamap_callback2_t *callback, void *callback_arg,
930                     int flags)
931 {
932         vm_paddr_t lastaddr;
933         int nsegs, error, first, i;
934         bus_size_t resid;
935         struct iovec *iov;
936         pmap_t pmap;
937         bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
938         bus_dma_segment_t *segments;
939         bus_dma_segment_t *segs;
940         int nsegs_left;
941
942         if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS)
943                 segments = cache_segments;
944         else
945                 segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments,
946                                    M_DEVBUF, M_WAITOK | M_ZERO);
947
948         /*
949          * XXX
950          * Follow old semantics.  Once all of the callers are fixed,
951          * we should get rid of these internal flag "adjustment".
952          */
953         flags &= ~BUS_DMA_WAITOK;
954         flags |= BUS_DMA_NOWAIT;
955
956         resid = (bus_size_t)uio->uio_resid;
957         iov = uio->uio_iov;
958
959         segs = segments;
960         nsegs_left = dmat->nsegments;
961
962         if (uio->uio_segflg == UIO_USERSPACE) {
963                 struct thread *td;
964
965                 td = uio->uio_td;
966                 KASSERT(td != NULL && td->td_proc != NULL,
967                         ("bus_dmamap_load_uio: USERSPACE but no proc"));
968                 pmap = vmspace_pmap(td->td_proc->p_vmspace);
969         } else {
970                 pmap = NULL;
971         }
972
973         error = 0;
974         nsegs = 1;
975         first = 1;
976         lastaddr = 0;
977         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
978                 /*
979                  * Now at the first iovec to load.  Load each iovec
980                  * until we have exhausted the residual count.
981                  */
982                 bus_size_t minlen =
983                         resid < iov[i].iov_len ? resid : iov[i].iov_len;
984                 caddr_t addr = (caddr_t) iov[i].iov_base;
985
986                 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen,
987                                 segs, nsegs_left,
988                                 pmap, flags, &lastaddr, &nsegs, first);
989                 first = 0;
990
991                 resid -= minlen;
992                 if (error == 0) {
993                         nsegs_left -= nsegs;
994                         segs += nsegs;
995                 }
996         }
997
998         /*
999          * Minimum one DMA segment, even if 0-length buffer.
1000          */
1001         if (nsegs_left == dmat->nsegments)
1002                 --nsegs_left;
1003
1004         if (error) {
1005                 /* force "no valid mappings" in callback */
1006                 callback(callback_arg, segments, 0,
1007                          0, error);
1008         } else {
1009                 callback(callback_arg, segments, dmat->nsegments - nsegs_left,
1010                          (bus_size_t)uio->uio_resid, error);
1011         }
1012         if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS)
1013                 kfree(segments, M_DEVBUF);
1014         return error;
1015 }
1016
1017 /*
1018  * Release the mapping held by map.
1019  */
1020 void
1021 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1022 {
1023         struct bounce_page *bpage;
1024
1025         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1026                 STAILQ_REMOVE_HEAD(&map->bpages, links);
1027                 free_bounce_page(dmat, bpage);
1028         }
1029 }
1030
1031 void
1032 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1033 {
1034         struct bounce_page *bpage;
1035
1036         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1037                 /*
1038                  * Handle data bouncing.  We might also
1039                  * want to add support for invalidating
1040                  * the caches on broken hardware
1041                  */
1042                 switch (op) {
1043                 case BUS_DMASYNC_PREWRITE:
1044                         while (bpage != NULL) {
1045                                 bcopy((void *)bpage->datavaddr,
1046                                       (void *)bpage->vaddr,
1047                                       bpage->datacount);
1048                                 bpage = STAILQ_NEXT(bpage, links);
1049                         }
1050                         dmat->bounce_zone->total_bounced++;
1051                         break;
1052
1053                 case BUS_DMASYNC_POSTREAD:
1054                         while (bpage != NULL) {
1055                                 bcopy((void *)bpage->vaddr,
1056                                       (void *)bpage->datavaddr,
1057                                       bpage->datacount);
1058                                 bpage = STAILQ_NEXT(bpage, links);
1059                         }
1060                         dmat->bounce_zone->total_bounced++;
1061                         break;
1062
1063                 case BUS_DMASYNC_PREREAD:
1064                 case BUS_DMASYNC_POSTWRITE:
1065                         /* No-ops */
1066                         break;
1067                 }
1068         }
1069 }
1070
1071 static int
1072 alloc_bounce_zone(bus_dma_tag_t dmat)
1073 {
1074         struct bounce_zone *bz, *new_bz;
1075
1076         KASSERT(dmat->bounce_zone == NULL,
1077                 ("bounce zone was already assigned\n"));
1078
1079         new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO);
1080
1081         lwkt_gettoken(&bounce_zone_tok);
1082
1083         /* Check to see if we already have a suitable zone */
1084         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1085                 if (dmat->alignment <= bz->alignment &&
1086                     dmat->lowaddr >= bz->lowaddr) {
1087                         lwkt_reltoken(&bounce_zone_tok);
1088
1089                         dmat->bounce_zone = bz;
1090                         kfree(new_bz, M_DEVBUF);
1091                         return 0;
1092                 }
1093         }
1094         bz = new_bz;
1095
1096 #ifdef SMP
1097         spin_init(&bz->spin);
1098 #endif
1099         STAILQ_INIT(&bz->bounce_page_list);
1100         STAILQ_INIT(&bz->bounce_map_waitinglist);
1101         bz->free_bpages = 0;
1102         bz->reserved_bpages = 0;
1103         bz->active_bpages = 0;
1104         bz->lowaddr = dmat->lowaddr;
1105         bz->alignment = round_page(dmat->alignment);
1106         ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1107         busdma_zonecount++;
1108         ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1109         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1110
1111         lwkt_reltoken(&bounce_zone_tok);
1112
1113         dmat->bounce_zone = bz;
1114
1115         sysctl_ctx_init(&bz->sysctl_ctx);
1116         bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx,
1117             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1118             CTLFLAG_RD, 0, "");
1119         if (bz->sysctl_tree == NULL) {
1120                 sysctl_ctx_free(&bz->sysctl_ctx);
1121                 return 0;       /* XXX error code? */
1122         }
1123
1124         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1125             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1126             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1127             "Total bounce pages");
1128         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1129             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1130             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1131             "Free bounce pages");
1132         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1133             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1134             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1135             "Reserved bounce pages");
1136         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1137             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1138             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1139             "Active bounce pages");
1140         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1141             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1142             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1143             "Total bounce requests");
1144         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1145             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1146             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1147             "Total bounce requests that were deferred");
1148         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1149             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1150             "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0,
1151             "Total bounce page reservations that were failed");
1152         SYSCTL_ADD_STRING(&bz->sysctl_ctx,
1153             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1154             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1155         SYSCTL_ADD_INT(&bz->sysctl_ctx,
1156             SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1157             "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1158
1159         return 0;
1160 }
1161
1162 static int
1163 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
1164 {
1165         struct bounce_zone *bz = dmat->bounce_zone;
1166         int count = 0, mflags;
1167
1168         if (flags & BUS_DMA_NOWAIT)
1169                 mflags = M_NOWAIT;
1170         else
1171                 mflags = M_WAITOK;
1172
1173         while (numpages > 0) {
1174                 struct bounce_page *bpage;
1175
1176                 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO);
1177
1178                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1179                                                          mflags, 0ul,
1180                                                          bz->lowaddr,
1181                                                          bz->alignment, 0);
1182                 if (bpage->vaddr == 0) {
1183                         kfree(bpage, M_DEVBUF);
1184                         break;
1185                 }
1186                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1187
1188                 BZ_LOCK(bz);
1189                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1190                 total_bounce_pages++;
1191                 bz->total_bpages++;
1192                 bz->free_bpages++;
1193                 BZ_UNLOCK(bz);
1194
1195                 count++;
1196                 numpages--;
1197         }
1198         return count;
1199 }
1200
1201 /* Assume caller holds bounce zone spinlock */
1202 static int
1203 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1204 {
1205         struct bounce_zone *bz = dmat->bounce_zone;
1206         int pages;
1207
1208         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1209         if (!commit && map->pagesneeded > (map->pagesreserved + pages)) {
1210                 bz->reserve_failed++;
1211                 return (map->pagesneeded - (map->pagesreserved + pages));
1212         }
1213
1214         bz->free_bpages -= pages;
1215
1216         bz->reserved_bpages += pages;
1217         KKASSERT(bz->reserved_bpages <= bz->total_bpages);
1218
1219         map->pagesreserved += pages;
1220         pages = map->pagesneeded - map->pagesreserved;
1221
1222         return pages;
1223 }
1224
1225 static void
1226 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
1227 {
1228         struct bounce_zone *bz = dmat->bounce_zone;
1229         int reserved = map->pagesreserved;
1230         bus_dmamap_t wait_map;
1231
1232         map->pagesreserved = 0;
1233         map->pagesneeded = 0;
1234
1235         if (reserved == 0)
1236                 return;
1237
1238         BZ_LOCK(bz);
1239
1240         bz->free_bpages += reserved;
1241         KKASSERT(bz->free_bpages <= bz->total_bpages);
1242
1243         KKASSERT(bz->reserved_bpages >= reserved);
1244         bz->reserved_bpages -= reserved;
1245
1246         wait_map = get_map_waiting(dmat);
1247
1248         BZ_UNLOCK(bz);
1249
1250         if (wait_map != NULL)
1251                 add_map_callback(map);
1252 }
1253
1254 static bus_addr_t
1255 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1256                 bus_size_t size)
1257 {
1258         struct bounce_zone *bz = dmat->bounce_zone;
1259         struct bounce_page *bpage;
1260
1261         KASSERT(map->pagesneeded > 0, ("map doesn't need any pages"));
1262         map->pagesneeded--;
1263
1264         KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages"));
1265         map->pagesreserved--;
1266
1267         BZ_LOCK(bz);
1268
1269         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1270         KASSERT(bpage != NULL, ("free page list is empty"));
1271         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1272
1273         KKASSERT(bz->reserved_bpages > 0);
1274         bz->reserved_bpages--;
1275
1276         bz->active_bpages++;
1277         KKASSERT(bz->active_bpages <= bz->total_bpages);
1278
1279         BZ_UNLOCK(bz);
1280
1281         bpage->datavaddr = vaddr;
1282         bpage->datacount = size;
1283         STAILQ_INSERT_TAIL(&map->bpages, bpage, links);
1284         return bpage->busaddr;
1285 }
1286
1287 static void
1288 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1289 {
1290         struct bounce_zone *bz = dmat->bounce_zone;
1291         bus_dmamap_t map;
1292
1293         bpage->datavaddr = 0;
1294         bpage->datacount = 0;
1295
1296         BZ_LOCK(bz);
1297
1298         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1299
1300         bz->free_bpages++;
1301         KKASSERT(bz->free_bpages <= bz->total_bpages);
1302
1303         KKASSERT(bz->active_bpages > 0);
1304         bz->active_bpages--;
1305
1306         map = get_map_waiting(dmat);
1307
1308         BZ_UNLOCK(bz);
1309
1310         if (map != NULL)
1311                 add_map_callback(map);
1312 }
1313
1314 /* Assume caller holds bounce zone spinlock */
1315 static bus_dmamap_t
1316 get_map_waiting(bus_dma_tag_t dmat)
1317 {
1318         struct bounce_zone *bz = dmat->bounce_zone;
1319         bus_dmamap_t map;
1320
1321         map = STAILQ_FIRST(&bz->bounce_map_waitinglist);
1322         if (map != NULL) {
1323                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1324                         STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
1325                         bz->total_deferred++;
1326                 } else {
1327                         map = NULL;
1328                 }
1329         }
1330         return map;
1331 }
1332
1333 static void
1334 add_map_callback(bus_dmamap_t map)
1335 {
1336         /* XXX callbacklist is not MPSAFE */
1337         crit_enter();
1338         get_mplock();
1339         STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
1340         busdma_swi_pending = 1;
1341         setsoftvm();
1342         rel_mplock();
1343         crit_exit();
1344 }
1345
1346 void
1347 busdma_swi(void)
1348 {
1349         bus_dmamap_t map;
1350
1351         crit_enter();
1352         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1353                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1354                 crit_exit();
1355                 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1356                                 map->callback, map->callback_arg, /*flags*/0);
1357                 crit_enter();
1358         }
1359         crit_exit();
1360 }