Merge from vendor branch TNFTP:
[dragonfly.git] / sys / platform / pc32 / i386 / busdma_machdep.c
1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
27  * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.21 2007/07/14 07:29:30 sephe Exp $
28  */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/uio.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
37
38 #include <vm/vm.h>
39 #include <vm/vm_page.h>
40
41 /* XXX needed for to access pmap to convert per-proc virtual to physical */
42 #include <sys/proc.h>
43 #include <sys/lock.h>
44 #include <vm/vm_map.h>
45
46 #include <machine/md_var.h>
47
48 #define MAX_BPAGES 1024
49
50 struct bus_dma_tag {
51         bus_dma_tag_t     parent;
52         bus_size_t        alignment;
53         bus_size_t        boundary;
54         bus_addr_t        lowaddr;
55         bus_addr_t        highaddr;
56         bus_dma_filter_t *filter;
57         void             *filterarg;
58         bus_size_t        maxsize;
59         u_int             nsegments;
60         bus_size_t        maxsegsz;
61         int               flags;
62         int               ref_count;
63         int               map_count;
64         bus_dma_segment_t *segments;
65 };
66
67 struct bounce_page {
68         vm_offset_t     vaddr;          /* kva of bounce buffer */
69         bus_addr_t      busaddr;        /* Physical address */
70         vm_offset_t     datavaddr;      /* kva of client data */
71         bus_size_t      datacount;      /* client data count */
72         STAILQ_ENTRY(bounce_page) links;
73 };
74
75 int busdma_swi_pending;
76
77 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
78 static int free_bpages;
79 static int reserved_bpages;
80 static int active_bpages;
81 static int total_bpages;
82 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
83
84 struct bus_dmamap {
85         struct bp_list         bpages;
86         int                    pagesneeded;
87         int                    pagesreserved;
88         bus_dma_tag_t          dmat;
89         void                  *buf;             /* unmapped buffer pointer */
90         bus_size_t             buflen;          /* unmapped buffer length */
91         bus_dmamap_callback_t *callback;
92         void                  *callback_arg;
93         STAILQ_ENTRY(bus_dmamap) links;
94 };
95
96 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
97 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
98 static struct bus_dmamap nobounce_dmamap;
99
100 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
101 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
102 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
103                                    vm_offset_t vaddr, bus_size_t size);
104 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
105 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
106
107 static __inline int
108 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
109 {
110         int retval;
111
112         retval = 0;
113         do {
114                 if (paddr > dmat->lowaddr
115                  && paddr <= dmat->highaddr
116                  && (dmat->filter == NULL
117                   || (*dmat->filter)(dmat->filterarg, paddr) != 0))
118                         retval = 1;
119
120                 dmat = dmat->parent;            
121         } while (retval == 0 && dmat != NULL);
122         return (retval);
123 }
124
125 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
126 /*
127  * Allocate a device specific dma_tag.
128  */
129 int
130 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
131                    bus_size_t boundary, bus_addr_t lowaddr,
132                    bus_addr_t highaddr, bus_dma_filter_t *filter,
133                    void *filterarg, bus_size_t maxsize, int nsegments,
134                    bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
135 {
136         bus_dma_tag_t newtag;
137         int error = 0;
138
139         /* Return a NULL tag on failure */
140         *dmat = NULL;
141
142         newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT);
143
144         newtag->parent = parent;
145         newtag->alignment = alignment;
146         newtag->boundary = boundary;
147         newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
148         newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
149         newtag->filter = filter;
150         newtag->filterarg = filterarg;
151         newtag->maxsize = maxsize;
152         newtag->nsegments = nsegments;
153         newtag->maxsegsz = maxsegsz;
154         newtag->flags = flags;
155         newtag->ref_count = 1; /* Count ourself */
156         newtag->map_count = 0;
157         newtag->segments = NULL;
158         
159         /* Take into account any restrictions imposed by our parent tag */
160         if (parent != NULL) {
161                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
162                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
163                 /*
164                  * XXX Not really correct??? Probably need to honor boundary
165                  *     all the way up the inheritence chain.
166                  */
167                 newtag->boundary = MAX(parent->boundary, newtag->boundary);
168                 if (newtag->filter == NULL) {
169                         /*
170                          * Short circuit looking at our parent directly
171                          * since we have encapsulated all of its information
172                          */
173                         newtag->filter = parent->filter;
174                         newtag->filterarg = parent->filterarg;
175                         newtag->parent = parent->parent;
176                 }
177                 if (newtag->parent != NULL) {
178                         parent->ref_count++;
179                 }
180         }
181         
182         if (newtag->lowaddr < ptoa(Maxmem) &&
183             (flags & BUS_DMA_ALLOCNOW) != 0) {
184                 /* Must bounce */
185
186                 if (lowaddr > bounce_lowaddr) {
187                         /*
188                          * Go through the pool and kill any pages
189                          * that don't reside below lowaddr.
190                          */
191                         panic("bus_dma_tag_create: page reallocation "
192                               "not implemented");
193                 }
194                 if (ptoa(total_bpages) < maxsize) {
195                         int pages;
196
197                         pages = atop(maxsize) - total_bpages;
198
199                         /* Add pages to our bounce pool */
200                         if (alloc_bounce_pages(newtag, pages) < pages)
201                                 error = ENOMEM;
202                 }
203                 /* Performed initial allocation */
204                 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
205         }
206         
207         if (error != 0) {
208                 kfree(newtag, M_DEVBUF);
209         } else {
210                 *dmat = newtag;
211         }
212         return (error);
213 }
214
215 int
216 bus_dma_tag_destroy(bus_dma_tag_t dmat)
217 {
218         if (dmat != NULL) {
219
220                 if (dmat->map_count != 0)
221                         return (EBUSY);
222
223                 while (dmat != NULL) {
224                         bus_dma_tag_t parent;
225
226                         parent = dmat->parent;
227                         dmat->ref_count--;
228                         if (dmat->ref_count == 0) {
229                                 if (dmat->segments != NULL)
230                                         kfree(dmat->segments, M_DEVBUF);
231                                 kfree(dmat, M_DEVBUF);
232                                 /*
233                                  * Last reference count, so
234                                  * release our reference
235                                  * count on our parent.
236                                  */
237                                 dmat = parent;
238                         } else
239                                 dmat = NULL;
240                 }
241         }
242         return (0);
243 }
244
245 /*
246  * Allocate a handle for mapping from kva/uva/physical
247  * address space into bus device space.
248  */
249 int
250 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
251 {
252         int error;
253
254         error = 0;
255
256         if (dmat->segments == NULL) {
257                 KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
258                 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
259                                         dmat->nsegments, M_DEVBUF, M_INTWAIT);
260         }
261
262         if (dmat->lowaddr < ptoa(Maxmem)) {
263                 /* Must bounce */
264                 int maxpages;
265
266                 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT);
267                 if (*mapp == NULL) {
268                         return (ENOMEM);
269                 } else {
270                         /* Initialize the new map */
271                         bzero(*mapp, sizeof(**mapp));
272                         STAILQ_INIT(&((*mapp)->bpages));
273                 }
274                 /*
275                  * Attempt to add pages to our pool on a per-instance
276                  * basis up to a sane limit.
277                  */
278                 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
279                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
280                  || (dmat->map_count > 0
281                   && total_bpages < maxpages)) {
282                         int pages;
283
284                         if (dmat->lowaddr > bounce_lowaddr) {
285                                 /*
286                                  * Go through the pool and kill any pages
287                                  * that don't reside below lowaddr.
288                                  */
289                                 panic("bus_dmamap_create: page reallocation "
290                                       "not implemented");
291                         }
292
293                         pages = MAX(atop(dmat->maxsize), 1);
294                         pages = MIN(maxpages - total_bpages, pages);
295                         pages = MAX(pages, 1);
296                         if (alloc_bounce_pages(dmat, pages) < pages)
297                                 error = ENOMEM;
298
299                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
300                                 if (error == 0)
301                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
302                         } else {
303                                 error = 0;
304                         }
305                 }
306         } else {
307                 *mapp = NULL;
308         }
309         if (error == 0)
310                 dmat->map_count++;
311         return (error);
312 }
313
314 /*
315  * Destroy a handle for mapping from kva/uva/physical
316  * address space into bus device space.
317  */
318 int
319 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
320 {
321         if (map != NULL) {
322                 if (STAILQ_FIRST(&map->bpages) != NULL)
323                         return (EBUSY);
324                 kfree(map, M_DEVBUF);
325         }
326         dmat->map_count--;
327         return (0);
328 }
329
330
331 /*
332  * Allocate a piece of memory that can be efficiently mapped into
333  * bus device space based on the constraints lited in the dma tag.
334  *
335  * mapp is degenerate.  By definition this allocation should not require
336  * bounce buffers so do not allocate a dma map.
337  */
338 int
339 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
340                  bus_dmamap_t *mapp)
341 {
342         int mflags;
343         /* If we succeed, no mapping/bouncing will be required */
344         *mapp = NULL;
345
346         if (dmat->segments == NULL) {
347                 KKASSERT(dmat->nsegments < 16384);
348                 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
349                                         dmat->nsegments, M_DEVBUF, M_INTWAIT);
350         }
351
352         if (flags & BUS_DMA_NOWAIT)
353                 mflags = M_NOWAIT;
354         else
355                 mflags = M_WAITOK;
356         if (flags & BUS_DMA_ZERO)
357                 mflags |= M_ZERO;
358
359         if ((dmat->maxsize <= PAGE_SIZE) &&
360             dmat->lowaddr >= ptoa(Maxmem)) {
361                 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
362                 /*
363                  * XXX Check whether the allocation crossed a page boundary
364                  * and retry with power-of-2 alignment in that case.
365                  */
366                 if ((((intptr_t)*vaddr) & PAGE_MASK) !=
367                     (((intptr_t)*vaddr + dmat->maxsize) & PAGE_MASK)) {
368                         size_t size;
369                         kfree(*vaddr, M_DEVBUF);
370                         /* XXX check for overflow? */
371                         for (size = 1; size <= dmat->maxsize; size <<= 1)
372                                 ;
373                         *vaddr = kmalloc(size, M_DEVBUF, mflags);
374                 }
375         } else {
376                 /*
377                  * XXX Use Contigmalloc until it is merged into this facility
378                  *     and handles multi-seg allocations.  Nobody is doing
379                  *     multi-seg allocations yet though.
380                  */
381                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
382                     0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
383                     dmat->boundary);
384         }
385         if (*vaddr == NULL)
386                 return (ENOMEM);
387         return (0);
388 }
389
390 /*
391  * Free a piece of memory and it's allociated dmamap, that was allocated
392  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
393  */
394 void
395 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
396 {
397         /*
398          * dmamem does not need to be bounced, so the map should be
399          * NULL
400          */
401         if (map != NULL)
402                 panic("bus_dmamem_free: Invalid map freed\n");
403         if ((dmat->maxsize <= PAGE_SIZE) &&
404             dmat->lowaddr >= ptoa(Maxmem))
405                 kfree(vaddr, M_DEVBUF);
406         else
407                 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
408 }
409
410 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
411
412 /*
413  * Map the buffer buf into bus space using the dmamap map.
414  */
415 int
416 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
417                 bus_size_t buflen, bus_dmamap_callback_t *callback,
418                 void *callback_arg, int flags)
419 {
420         vm_offset_t             vaddr;
421         vm_paddr_t              paddr;
422         bus_dma_segment_t      *sg;
423         int                     seg;
424         int                     error;
425         vm_paddr_t              nextpaddr;
426         bus_addr_t              bmask;
427
428         if (map == NULL)
429                 map = &nobounce_dmamap;
430
431         error = 0;
432         /*
433          * If we are being called during a callback, pagesneeded will
434          * be non-zero, so we can avoid doing the work twice.
435          */
436         if (dmat->lowaddr < ptoa(Maxmem) &&
437             map->pagesneeded == 0) {
438                 vm_offset_t     vendaddr;
439
440                 /*
441                  * Count the number of bounce pages
442                  * needed in order to complete this transfer
443                  */
444                 vaddr = trunc_page((vm_offset_t)buf);
445                 vendaddr = (vm_offset_t)buf + buflen;
446
447                 while (vaddr < vendaddr) {
448                         paddr = pmap_kextract(vaddr);
449                         if (run_filter(dmat, paddr) != 0) {
450
451                                 map->pagesneeded++;
452                         }
453                         vaddr += PAGE_SIZE;
454                 }
455         }
456
457         /* Reserve Necessary Bounce Pages */
458         if (map->pagesneeded != 0) {
459                 crit_enter();
460                 if (reserve_bounce_pages(dmat, map) != 0) {
461
462                         /* Queue us for resources */
463                         map->dmat = dmat;
464                         map->buf = buf;
465                         map->buflen = buflen;
466                         map->callback = callback;
467                         map->callback_arg = callback_arg;
468
469                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
470                         crit_exit();
471
472                         return (EINPROGRESS);
473                 }
474                 crit_exit();
475         }
476
477         vaddr = (vm_offset_t)buf;
478         sg = dmat->segments;
479         seg = 1;
480         sg->ds_len = 0;
481         nextpaddr = 0;
482         bmask = ~(dmat->boundary - 1);  /* note: will be 0 if boundary is 0 */
483
484         /* force at least one segment */
485         do {
486                 bus_size_t      size;
487
488                 /*
489                  * Per-page main loop
490                  */
491                 paddr = pmap_kextract(vaddr);
492                 size = PAGE_SIZE - (paddr & PAGE_MASK);
493                 if (size > buflen)
494                         size = buflen;
495                 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
496                         /* 
497                          * note: this paddr has the same in-page offset
498                          * as vaddr and thus the paddr above, so the
499                          * size does not have to be recalculated
500                          */
501                         paddr = add_bounce_page(dmat, map, vaddr, size);
502                 }
503
504                 /*
505                  * Fill in the bus_dma_segment
506                  */
507                 if (sg->ds_len == 0) {
508                         sg->ds_addr = paddr;
509                         sg->ds_len = size;
510                 } else if (paddr == nextpaddr) {
511                         sg->ds_len += size;
512                 } else {
513                         sg++;
514                         seg++;
515                         if (seg > dmat->nsegments)
516                                 break;
517                         sg->ds_addr = paddr;
518                         sg->ds_len = size;
519                 }
520                 nextpaddr = paddr + size;
521
522                 /*
523                  * Handle maxsegsz and boundary issues with a nested loop
524                  */
525                 for (;;) {
526                         bus_size_t      tmpsize;
527
528                         /*
529                          * Limit to the boundary and maximum segment size
530                          */
531                         if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
532                                 tmpsize = dmat->boundary -
533                                           (sg->ds_addr & ~bmask);
534                                 if (tmpsize > dmat->maxsegsz)
535                                         tmpsize = dmat->maxsegsz;
536                                 KKASSERT(tmpsize < sg->ds_len);
537                         } else if (sg->ds_len > dmat->maxsegsz) {
538                                 tmpsize = dmat->maxsegsz;
539                         } else {
540                                 break;
541                         }
542
543                         /*
544                          * Futz, split the data into a new segment.
545                          */
546                         if (seg >= dmat->nsegments)
547                                 goto fail;
548                         sg[1].ds_len = sg[0].ds_len - tmpsize;
549                         sg[1].ds_addr = sg[0].ds_addr + tmpsize;
550                         sg[0].ds_len = tmpsize;
551                         sg++;
552                         seg++;
553                 }
554
555                 /*
556                  * Adjust for loop
557                  */
558                 buflen -= size;
559                 vaddr += size;
560         } while (buflen > 0);
561
562 fail:
563         if (buflen != 0) {
564                 kprintf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
565                        (u_long)buflen);
566                 error = EFBIG;
567         }
568
569         (*callback)(callback_arg, dmat->segments, seg, error);
570
571         return (0);
572 }
573
574 /*
575  * Utility function to load a linear buffer.  lastaddrp holds state
576  * between invocations (for multiple-buffer loads).  segp contains
577  * the starting segment on entrace, and the ending segment on exit.
578  * first indicates if this is the first invocation of this function.
579  */
580 static int
581 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
582                         void *buf, bus_size_t buflen,
583                         struct thread *td,
584                         int flags,
585                         vm_offset_t *lastaddrp,
586                         int *segp,
587                         int first)
588 {
589         bus_dma_segment_t *segs;
590         bus_size_t sgsize;
591         bus_addr_t curaddr, lastaddr, baddr, bmask;
592         vm_offset_t vaddr = (vm_offset_t)buf;
593         int seg;
594         pmap_t pmap;
595
596         if (td->td_proc != NULL)
597                 pmap = vmspace_pmap(td->td_proc->p_vmspace);
598         else
599                 pmap = NULL;
600
601         segs = dmat->segments;
602         lastaddr = *lastaddrp;
603         bmask  = ~(dmat->boundary - 1);
604
605         for (seg = *segp; buflen > 0 ; ) {
606                 /*
607                  * Get the physical address for this segment.
608                  */
609                 if (pmap)
610                         curaddr = pmap_extract(pmap, vaddr);
611                 else
612                         curaddr = pmap_kextract(vaddr);
613
614                 /*
615                  * Compute the segment size, and adjust counts.
616                  */
617                 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
618                 if (buflen < sgsize)
619                         sgsize = buflen;
620
621                 /*
622                  * Make sure we don't cross any boundaries.
623                  */
624                 if (dmat->boundary > 0) {
625                         baddr = (curaddr + dmat->boundary) & bmask;
626                         if (sgsize > (baddr - curaddr))
627                                 sgsize = (baddr - curaddr);
628                 }
629
630                 /*
631                  * Insert chunk into a segment, coalescing with
632                  * previous segment if possible.
633                  */
634                 if (first) {
635                         segs[seg].ds_addr = curaddr;
636                         segs[seg].ds_len = sgsize;
637                         first = 0;
638                 } else {
639                         if (curaddr == lastaddr &&
640                             (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
641                             (dmat->boundary == 0 ||
642                              (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
643                                 segs[seg].ds_len += sgsize;
644                         else {
645                                 if (++seg >= dmat->nsegments)
646                                         break;
647                                 segs[seg].ds_addr = curaddr;
648                                 segs[seg].ds_len = sgsize;
649                         }
650                 }
651
652                 lastaddr = curaddr + sgsize;
653                 vaddr += sgsize;
654                 buflen -= sgsize;
655         }
656
657         *segp = seg;
658         *lastaddrp = lastaddr;
659
660         /*
661          * Did we fit?
662          */
663         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
664 }
665
666 /*
667  * Like _bus_dmamap_load(), but for mbufs.
668  */
669 int
670 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
671                      struct mbuf *m0,
672                      bus_dmamap_callback2_t *callback, void *callback_arg,
673                      int flags)
674 {
675         int nsegs, error;
676
677         KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
678                 ("bus_dmamap_load_mbuf: No support for bounce pages!"));
679         KASSERT(m0->m_flags & M_PKTHDR,
680                 ("bus_dmamap_load_mbuf: no packet header"));
681
682         nsegs = 0;
683         error = 0;
684         if (m0->m_pkthdr.len <= dmat->maxsize) {
685                 int first = 1;
686                 vm_offset_t lastaddr = 0;
687                 struct mbuf *m;
688
689                 for (m = m0; m != NULL && error == 0; m = m->m_next) {
690                         if ( m->m_len == 0 )
691                                 continue;
692                         error = _bus_dmamap_load_buffer(dmat,
693                                         m->m_data, m->m_len,
694                                         curthread, flags, &lastaddr,
695                                         &nsegs, first);
696                         first = 0;
697                 }
698         } else {
699                 error = EINVAL;
700         }
701
702         if (error) {
703                 /* force "no valid mappings" in callback */
704                 (*callback)(callback_arg, dmat->segments, 0, 0, error);
705         } else {
706                 (*callback)(callback_arg, dmat->segments,
707                             nsegs+1, m0->m_pkthdr.len, error);
708         }
709         return (error);
710 }
711
712 /*
713  * Like _bus_dmamap_load(), but for uios.
714  */
715 int
716 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
717                     struct uio *uio,
718                     bus_dmamap_callback2_t *callback, void *callback_arg,
719                     int flags)
720 {
721         vm_offset_t lastaddr;
722         int nsegs, error, first, i;
723         bus_size_t resid;
724         struct iovec *iov;
725         struct thread *td = NULL;
726
727         KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
728                 ("bus_dmamap_load_uio: No support for bounce pages!"));
729
730         resid = uio->uio_resid;
731         iov = uio->uio_iov;
732
733         if (uio->uio_segflg == UIO_USERSPACE) {
734                 td = uio->uio_td;
735                 KASSERT(td != NULL && td->td_proc != NULL,
736                         ("bus_dmamap_load_uio: USERSPACE but no proc"));
737         }
738
739         nsegs = 0;
740         error = 0;
741         first = 1;
742         for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
743                 /*
744                  * Now at the first iovec to load.  Load each iovec
745                  * until we have exhausted the residual count.
746                  */
747                 bus_size_t minlen =
748                         resid < iov[i].iov_len ? resid : iov[i].iov_len;
749                 caddr_t addr = (caddr_t) iov[i].iov_base;
750
751                 error = _bus_dmamap_load_buffer(dmat,
752                                 addr, minlen,
753                                 td, flags, &lastaddr, &nsegs, first);
754                 first = 0;
755
756                 resid -= minlen;
757         }
758
759         if (error) {
760                 /* force "no valid mappings" in callback */
761                 (*callback)(callback_arg, dmat->segments, 0, 0, error);
762         } else {
763                 (*callback)(callback_arg, dmat->segments,
764                             nsegs+1, uio->uio_resid, error);
765         }
766         return (error);
767 }
768
769 /*
770  * Release the mapping held by map.
771  */
772 void
773 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
774 {
775         struct bounce_page *bpage;
776
777         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
778                 STAILQ_REMOVE_HEAD(&map->bpages, links);
779                 free_bounce_page(dmat, bpage);
780         }
781 }
782
783 void
784 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
785 {
786         struct bounce_page *bpage;
787
788         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
789                 
790                 /*
791                  * Handle data bouncing.  We might also
792                  * want to add support for invalidating
793                  * the caches on broken hardware
794                  */
795                 switch (op) {
796                 case BUS_DMASYNC_PREWRITE:
797                         while (bpage != NULL) {
798                                 bcopy((void *)bpage->datavaddr,
799                                       (void *)bpage->vaddr,
800                                       bpage->datacount);
801                                 bpage = STAILQ_NEXT(bpage, links);
802                         }
803                         break;
804
805                 case BUS_DMASYNC_POSTREAD:
806                         while (bpage != NULL) {
807                                 bcopy((void *)bpage->vaddr,
808                                       (void *)bpage->datavaddr,
809                                       bpage->datacount);
810                                 bpage = STAILQ_NEXT(bpage, links);
811                         }
812                         break;
813                 case BUS_DMASYNC_PREREAD:
814                 case BUS_DMASYNC_POSTWRITE:
815                         /* No-ops */
816                         break;
817                 }
818         }
819 }
820
821 static int
822 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
823 {
824         int count;
825
826         count = 0;
827         if (total_bpages == 0) {
828                 STAILQ_INIT(&bounce_page_list);
829                 STAILQ_INIT(&bounce_map_waitinglist);
830                 STAILQ_INIT(&bounce_map_callbacklist);
831         }
832         
833         while (numpages > 0) {
834                 struct bounce_page *bpage;
835
836                 bpage = (struct bounce_page *)kmalloc(sizeof(*bpage), M_DEVBUF,
837                                                      M_INTWAIT);
838
839                 if (bpage == NULL)
840                         break;
841                 bzero(bpage, sizeof(*bpage));
842                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
843                                                          M_NOWAIT, 0ul,
844                                                          dmat->lowaddr,
845                                                          PAGE_SIZE,
846                                                          0);
847                 if (bpage->vaddr == NULL) {
848                         kfree(bpage, M_DEVBUF);
849                         break;
850                 }
851                 bpage->busaddr = pmap_kextract(bpage->vaddr);
852                 crit_enter();
853                 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
854                 total_bpages++;
855                 free_bpages++;
856                 crit_exit();
857                 count++;
858                 numpages--;
859         }
860         return (count);
861 }
862
863 static int
864 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
865 {
866         int pages;
867
868         pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
869         free_bpages -= pages;
870         reserved_bpages += pages;
871         map->pagesreserved += pages;
872         pages = map->pagesneeded - map->pagesreserved;
873
874         return (pages);
875 }
876
877 static bus_addr_t
878 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
879                 bus_size_t size)
880 {
881         struct bounce_page *bpage;
882
883         if (map->pagesneeded == 0)
884                 panic("add_bounce_page: map doesn't need any pages");
885         map->pagesneeded--;
886
887         if (map->pagesreserved == 0)
888                 panic("add_bounce_page: map doesn't need any pages");
889         map->pagesreserved--;
890
891         crit_enter();
892         bpage = STAILQ_FIRST(&bounce_page_list);
893         if (bpage == NULL)
894                 panic("add_bounce_page: free page list is empty");
895
896         STAILQ_REMOVE_HEAD(&bounce_page_list, links);
897         reserved_bpages--;
898         active_bpages++;
899         crit_exit();
900
901         bpage->datavaddr = vaddr;
902         bpage->datacount = size;
903         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
904         return (bpage->busaddr);
905 }
906
907 static void
908 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
909 {
910         struct bus_dmamap *map;
911
912         bpage->datavaddr = 0;
913         bpage->datacount = 0;
914
915         crit_enter();
916         STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
917         free_bpages++;
918         active_bpages--;
919         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
920                 if (reserve_bounce_pages(map->dmat, map) == 0) {
921                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
922                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
923                                            map, links);
924                         busdma_swi_pending = 1;
925                         setsoftvm();
926                 }
927         }
928         crit_exit();
929 }
930
931 void
932 busdma_swi(void)
933 {
934         struct bus_dmamap *map;
935
936         crit_enter();
937         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
938                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
939                 crit_exit();
940                 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
941                                 map->callback, map->callback_arg, /*flags*/0);
942                 crit_enter();
943         }
944         crit_exit();
945 }