Commit | Line | Data |
---|---|---|
d7f50089 YY |
1 | /* |
2 | * Copyright (c) 1997, 1998 Justin T. Gibbs. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions, and the following disclaimer, | |
10 | * without modification, immediately at the beginning of the file. | |
11 | * 2. The name of the author may not be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |
15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
17 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR | |
18 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
24 | * SUCH DAMAGE. | |
25 | * | |
20d01610 SZ |
26 | * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $ |
27 | * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $ | |
d7f50089 YY |
28 | */ |
29 | ||
30 | #include <sys/param.h> | |
31 | #include <sys/systm.h> | |
32 | #include <sys/malloc.h> | |
33 | #include <sys/mbuf.h> | |
34 | #include <sys/uio.h> | |
35 | #include <sys/thread2.h> | |
36 | #include <sys/bus_dma.h> | |
20d01610 SZ |
37 | #include <sys/kernel.h> |
38 | #include <sys/sysctl.h> | |
39 | #include <sys/lock.h> | |
40 | #include <sys/spinlock2.h> | |
d7f50089 YY |
41 | |
42 | #include <vm/vm.h> | |
43 | #include <vm/vm_page.h> | |
44 | ||
45 | /* XXX needed for to access pmap to convert per-proc virtual to physical */ | |
46 | #include <sys/proc.h> | |
47 | #include <sys/lock.h> | |
48 | #include <vm/vm_map.h> | |
49 | ||
50 | #include <machine/md_var.h> | |
51 | ||
20d01610 SZ |
52 | #define MAX_BPAGES 1024 |
53 | ||
6eed46e7 MD |
54 | /* |
55 | * 16 x N declared on stack. | |
56 | */ | |
bc2cd3f9 | 57 | #define BUS_DMA_CACHE_SEGMENTS 8 |
6eed46e7 | 58 | |
20d01610 SZ |
59 | struct bounce_zone; |
60 | struct bus_dmamap; | |
d7f50089 YY |
61 | |
62 | struct bus_dma_tag { | |
20d01610 SZ |
63 | bus_dma_tag_t parent; |
64 | bus_size_t alignment; | |
65 | bus_size_t boundary; | |
66 | bus_addr_t lowaddr; | |
67 | bus_addr_t highaddr; | |
d7f50089 | 68 | bus_dma_filter_t *filter; |
20d01610 SZ |
69 | void *filterarg; |
70 | bus_size_t maxsize; | |
71 | u_int nsegments; | |
72 | bus_size_t maxsegsz; | |
73 | int flags; | |
74 | int ref_count; | |
75 | int map_count; | |
d7f50089 | 76 | bus_dma_segment_t *segments; |
20d01610 | 77 | struct bounce_zone *bounce_zone; |
6eed46e7 | 78 | #ifdef SMP |
bc2cd3f9 | 79 | struct spinlock spin; |
6eed46e7 MD |
80 | #else |
81 | int unused0; | |
82 | #endif | |
d7f50089 YY |
83 | }; |
84 | ||
20d01610 SZ |
85 | /* |
86 | * bus_dma_tag private flags | |
87 | */ | |
88 | #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2 | |
89 | #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3 | |
90 | #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 | |
91 | ||
92 | #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN) | |
93 | ||
94 | #define BUS_DMAMEM_KMALLOC(dmat) \ | |
95 | ((dmat)->maxsize <= PAGE_SIZE && \ | |
96 | (dmat)->alignment <= PAGE_SIZE && \ | |
97 | (dmat)->lowaddr >= ptoa(Maxmem)) | |
98 | ||
d7f50089 YY |
99 | struct bounce_page { |
100 | vm_offset_t vaddr; /* kva of bounce buffer */ | |
101 | bus_addr_t busaddr; /* Physical address */ | |
102 | vm_offset_t datavaddr; /* kva of client data */ | |
103 | bus_size_t datacount; /* client data count */ | |
104 | STAILQ_ENTRY(bounce_page) links; | |
105 | }; | |
106 | ||
20d01610 SZ |
107 | struct bounce_zone { |
108 | STAILQ_ENTRY(bounce_zone) links; | |
109 | STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; | |
110 | STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; | |
111 | #ifdef SMP | |
112 | struct spinlock spin; | |
113 | #else | |
114 | int unused0; | |
115 | #endif | |
116 | int total_bpages; | |
117 | int free_bpages; | |
118 | int reserved_bpages; | |
119 | int active_bpages; | |
120 | int total_bounced; | |
121 | int total_deferred; | |
122 | int reserve_failed; | |
123 | bus_size_t alignment; | |
124 | bus_addr_t lowaddr; | |
125 | char zoneid[8]; | |
126 | char lowaddrid[20]; | |
127 | struct sysctl_ctx_list sysctl_ctx; | |
128 | struct sysctl_oid *sysctl_tree; | |
129 | }; | |
130 | ||
131 | #ifdef SMP | |
132 | #define BZ_LOCK(bz) spin_lock_wr(&(bz)->spin) | |
133 | #define BZ_UNLOCK(bz) spin_unlock_wr(&(bz)->spin) | |
134 | #else | |
135 | #define BZ_LOCK(bz) crit_enter() | |
136 | #define BZ_UNLOCK(bz) crit_exit() | |
137 | #endif | |
138 | ||
139 | static struct lwkt_token bounce_zone_tok = | |
140 | LWKT_TOKEN_INITIALIZER(bounce_zone_tok); | |
141 | static int busdma_zonecount; | |
142 | static STAILQ_HEAD(, bounce_zone) bounce_zone_list = | |
143 | STAILQ_HEAD_INITIALIZER(bounce_zone_list); | |
144 | ||
d7f50089 | 145 | int busdma_swi_pending; |
20d01610 SZ |
146 | static int total_bounce_pages; |
147 | static int max_bounce_pages = MAX_BPAGES; | |
148 | static int bounce_alignment = 1; /* XXX temporary */ | |
d7f50089 | 149 | |
20d01610 SZ |
150 | TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages); |
151 | TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment); | |
d7f50089 YY |
152 | |
153 | struct bus_dmamap { | |
20d01610 SZ |
154 | struct bp_list bpages; |
155 | int pagesneeded; | |
156 | int pagesreserved; | |
157 | bus_dma_tag_t dmat; | |
158 | void *buf; /* unmapped buffer pointer */ | |
159 | bus_size_t buflen; /* unmapped buffer length */ | |
d7f50089 | 160 | bus_dmamap_callback_t *callback; |
20d01610 | 161 | void *callback_arg; |
d7f50089 YY |
162 | STAILQ_ENTRY(bus_dmamap) links; |
163 | }; | |
164 | ||
20d01610 SZ |
165 | static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist = |
166 | STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist); | |
167 | ||
d7f50089 YY |
168 | static struct bus_dmamap nobounce_dmamap; |
169 | ||
20d01610 SZ |
170 | static int alloc_bounce_zone(bus_dma_tag_t); |
171 | static int alloc_bounce_pages(bus_dma_tag_t, u_int, int); | |
172 | static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); | |
173 | static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); | |
174 | static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, | |
175 | vm_offset_t, bus_size_t); | |
176 | static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); | |
177 | ||
178 | static bus_dmamap_t get_map_waiting(bus_dma_tag_t); | |
179 | static void add_map_callback(bus_dmamap_t); | |
180 | ||
181 | SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); | |
182 | SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages, | |
183 | 0, "Total bounce pages"); | |
184 | SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages, | |
185 | 0, "Max bounce pages per bounce zone"); | |
186 | SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD, | |
187 | &bounce_alignment, 0, "Obey alignment constraint"); | |
d7f50089 YY |
188 | |
189 | static __inline int | |
190 | run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) | |
191 | { | |
192 | int retval; | |
193 | ||
194 | retval = 0; | |
195 | do { | |
20d01610 SZ |
196 | if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || |
197 | (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0)) | |
198 | && (dmat->filter == NULL || | |
199 | dmat->filter(dmat->filterarg, paddr) != 0)) | |
d7f50089 YY |
200 | retval = 1; |
201 | ||
20d01610 | 202 | dmat = dmat->parent; |
d7f50089 YY |
203 | } while (retval == 0 && dmat != NULL); |
204 | return (retval); | |
205 | } | |
206 | ||
bc2cd3f9 MD |
207 | static __inline |
208 | bus_dma_segment_t * | |
209 | bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) | |
210 | { | |
211 | if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) | |
212 | return(cache); | |
213 | #ifdef SMP | |
214 | spin_lock_wr(&tag->spin); | |
215 | #endif | |
216 | return(tag->segments); | |
217 | } | |
218 | ||
219 | static __inline | |
220 | void | |
221 | bus_dma_tag_unlock(bus_dma_tag_t tag) | |
222 | { | |
223 | #ifdef SMP | |
224 | if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) | |
225 | spin_unlock_wr(&tag->spin); | |
226 | #endif | |
227 | } | |
228 | ||
d7f50089 YY |
229 | /* |
230 | * Allocate a device specific dma_tag. | |
231 | */ | |
232 | int | |
233 | bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, | |
234 | bus_size_t boundary, bus_addr_t lowaddr, | |
235 | bus_addr_t highaddr, bus_dma_filter_t *filter, | |
236 | void *filterarg, bus_size_t maxsize, int nsegments, | |
237 | bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) | |
238 | { | |
239 | bus_dma_tag_t newtag; | |
240 | int error = 0; | |
241 | ||
20d01610 SZ |
242 | /* |
243 | * Sanity checks | |
244 | */ | |
245 | ||
246 | if (alignment == 0) | |
247 | alignment = 1; | |
248 | if (alignment & (alignment - 1)) | |
249 | panic("alignment must be power of 2\n"); | |
250 | ||
251 | if (boundary != 0) { | |
252 | if (boundary & (boundary - 1)) | |
253 | panic("boundary must be power of 2\n"); | |
254 | if (boundary < maxsegsz) { | |
255 | kprintf("boundary < maxsegsz:\n"); | |
5e6bd7dc | 256 | print_backtrace(); |
20d01610 SZ |
257 | maxsegsz = boundary; |
258 | } | |
259 | } | |
260 | ||
d7f50089 YY |
261 | /* Return a NULL tag on failure */ |
262 | *dmat = NULL; | |
263 | ||
6eed46e7 | 264 | newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); |
d7f50089 | 265 | |
bc2cd3f9 | 266 | #ifdef SMP |
6eed46e7 | 267 | spin_init(&newtag->spin); |
bc2cd3f9 | 268 | #endif |
d7f50089 YY |
269 | newtag->parent = parent; |
270 | newtag->alignment = alignment; | |
271 | newtag->boundary = boundary; | |
272 | newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); | |
273 | newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); | |
274 | newtag->filter = filter; | |
275 | newtag->filterarg = filterarg; | |
276 | newtag->maxsize = maxsize; | |
277 | newtag->nsegments = nsegments; | |
278 | newtag->maxsegsz = maxsegsz; | |
279 | newtag->flags = flags; | |
280 | newtag->ref_count = 1; /* Count ourself */ | |
281 | newtag->map_count = 0; | |
282 | newtag->segments = NULL; | |
20d01610 SZ |
283 | newtag->bounce_zone = NULL; |
284 | ||
d7f50089 YY |
285 | /* Take into account any restrictions imposed by our parent tag */ |
286 | if (parent != NULL) { | |
287 | newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); | |
288 | newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); | |
20d01610 SZ |
289 | |
290 | if (newtag->boundary == 0) { | |
291 | newtag->boundary = parent->boundary; | |
292 | } else if (parent->boundary != 0) { | |
293 | newtag->boundary = MIN(parent->boundary, | |
294 | newtag->boundary); | |
295 | } | |
296 | ||
297 | #ifdef notyet | |
298 | newtag->alignment = MAX(parent->alignment, newtag->alignment); | |
299 | #endif | |
300 | ||
d7f50089 YY |
301 | if (newtag->filter == NULL) { |
302 | /* | |
303 | * Short circuit looking at our parent directly | |
304 | * since we have encapsulated all of its information | |
305 | */ | |
306 | newtag->filter = parent->filter; | |
307 | newtag->filterarg = parent->filterarg; | |
308 | newtag->parent = parent->parent; | |
309 | } | |
20d01610 | 310 | if (newtag->parent != NULL) |
d7f50089 | 311 | parent->ref_count++; |
d7f50089 | 312 | } |
20d01610 SZ |
313 | |
314 | if (newtag->lowaddr < ptoa(Maxmem)) | |
315 | newtag->flags |= BUS_DMA_BOUNCE_LOWADDR; | |
316 | if (bounce_alignment && newtag->alignment > 1 && | |
317 | !(newtag->flags & BUS_DMA_ALIGNED)) | |
318 | newtag->flags |= BUS_DMA_BOUNCE_ALIGN; | |
319 | ||
320 | if ((newtag->flags & BUS_DMA_COULD_BOUNCE) && | |
d7f50089 | 321 | (flags & BUS_DMA_ALLOCNOW) != 0) { |
20d01610 SZ |
322 | struct bounce_zone *bz; |
323 | ||
d7f50089 YY |
324 | /* Must bounce */ |
325 | ||
20d01610 SZ |
326 | error = alloc_bounce_zone(newtag); |
327 | if (error) | |
328 | goto back; | |
329 | bz = newtag->bounce_zone; | |
330 | ||
331 | if (ptoa(bz->total_bpages) < maxsize) { | |
d7f50089 YY |
332 | int pages; |
333 | ||
20d01610 SZ |
334 | if (flags & BUS_DMA_ONEBPAGE) { |
335 | pages = 1; | |
336 | } else { | |
337 | pages = atop(round_page(maxsize)) - | |
338 | bz->total_bpages; | |
339 | pages = MAX(pages, 1); | |
340 | } | |
d7f50089 YY |
341 | |
342 | /* Add pages to our bounce pool */ | |
20d01610 | 343 | if (alloc_bounce_pages(newtag, pages, flags) < pages) |
d7f50089 | 344 | error = ENOMEM; |
20d01610 SZ |
345 | |
346 | /* Performed initial allocation */ | |
347 | newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; | |
d7f50089 | 348 | } |
d7f50089 | 349 | } |
20d01610 SZ |
350 | back: |
351 | if (error) | |
d7f50089 | 352 | kfree(newtag, M_DEVBUF); |
20d01610 | 353 | else |
d7f50089 | 354 | *dmat = newtag; |
20d01610 | 355 | return error; |
d7f50089 YY |
356 | } |
357 | ||
358 | int | |
359 | bus_dma_tag_destroy(bus_dma_tag_t dmat) | |
360 | { | |
361 | if (dmat != NULL) { | |
d7f50089 YY |
362 | if (dmat->map_count != 0) |
363 | return (EBUSY); | |
364 | ||
365 | while (dmat != NULL) { | |
366 | bus_dma_tag_t parent; | |
367 | ||
368 | parent = dmat->parent; | |
369 | dmat->ref_count--; | |
370 | if (dmat->ref_count == 0) { | |
371 | if (dmat->segments != NULL) | |
372 | kfree(dmat->segments, M_DEVBUF); | |
373 | kfree(dmat, M_DEVBUF); | |
374 | /* | |
375 | * Last reference count, so | |
376 | * release our reference | |
377 | * count on our parent. | |
378 | */ | |
379 | dmat = parent; | |
380 | } else | |
381 | dmat = NULL; | |
382 | } | |
383 | } | |
384 | return (0); | |
385 | } | |
386 | ||
6eed46e7 MD |
387 | bus_size_t |
388 | bus_dma_tag_getmaxsize(bus_dma_tag_t tag) | |
389 | { | |
390 | return(tag->maxsize); | |
391 | } | |
392 | ||
d7f50089 YY |
393 | /* |
394 | * Allocate a handle for mapping from kva/uva/physical | |
395 | * address space into bus device space. | |
396 | */ | |
397 | int | |
398 | bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) | |
399 | { | |
400 | int error; | |
401 | ||
402 | error = 0; | |
403 | ||
404 | if (dmat->segments == NULL) { | |
405 | KKASSERT(dmat->nsegments && dmat->nsegments < 16384); | |
406 | dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * | |
407 | dmat->nsegments, M_DEVBUF, M_INTWAIT); | |
408 | } | |
409 | ||
20d01610 SZ |
410 | if (dmat->flags & BUS_DMA_COULD_BOUNCE) { |
411 | struct bounce_zone *bz; | |
d7f50089 YY |
412 | int maxpages; |
413 | ||
20d01610 SZ |
414 | /* Must bounce */ |
415 | ||
416 | if (dmat->bounce_zone == NULL) { | |
417 | error = alloc_bounce_zone(dmat); | |
418 | if (error) | |
419 | return error; | |
420 | } | |
421 | bz = dmat->bounce_zone; | |
422 | ||
e7b4468c | 423 | *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO); |
20d01610 | 424 | |
e7b4468c SW |
425 | /* Initialize the new map */ |
426 | STAILQ_INIT(&((*mapp)->bpages)); | |
20d01610 | 427 | |
d7f50089 YY |
428 | /* |
429 | * Attempt to add pages to our pool on a per-instance | |
430 | * basis up to a sane limit. | |
431 | */ | |
20d01610 SZ |
432 | if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) { |
433 | maxpages = max_bounce_pages; | |
434 | } else { | |
435 | maxpages = MIN(max_bounce_pages, | |
436 | Maxmem - atop(dmat->lowaddr)); | |
437 | } | |
d7f50089 YY |
438 | if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 |
439 | || (dmat->map_count > 0 | |
20d01610 | 440 | && bz->total_bpages < maxpages)) { |
d7f50089 YY |
441 | int pages; |
442 | ||
20d01610 SZ |
443 | if (flags & BUS_DMA_ONEBPAGE) { |
444 | pages = 1; | |
445 | } else { | |
446 | pages = atop(round_page(dmat->maxsize)); | |
447 | pages = MIN(maxpages - bz->total_bpages, pages); | |
448 | pages = MAX(pages, 1); | |
d7f50089 | 449 | } |
20d01610 SZ |
450 | if (alloc_bounce_pages(dmat, pages, flags) < pages) |
451 | error = ENOMEM; | |
d7f50089 YY |
452 | |
453 | if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { | |
20d01610 | 454 | if (!error) |
d7f50089 YY |
455 | dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; |
456 | } else { | |
457 | error = 0; | |
458 | } | |
459 | } | |
460 | } else { | |
461 | *mapp = NULL; | |
462 | } | |
20d01610 | 463 | if (!error) |
d7f50089 | 464 | dmat->map_count++; |
20d01610 | 465 | return error; |
d7f50089 YY |
466 | } |
467 | ||
468 | /* | |
469 | * Destroy a handle for mapping from kva/uva/physical | |
470 | * address space into bus device space. | |
471 | */ | |
472 | int | |
473 | bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) | |
474 | { | |
475 | if (map != NULL) { | |
476 | if (STAILQ_FIRST(&map->bpages) != NULL) | |
477 | return (EBUSY); | |
478 | kfree(map, M_DEVBUF); | |
479 | } | |
480 | dmat->map_count--; | |
481 | return (0); | |
482 | } | |
483 | ||
20d01610 SZ |
484 | static __inline bus_size_t |
485 | check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify) | |
486 | { | |
487 | bus_size_t maxsize = 0; | |
488 | uintptr_t vaddr = (uintptr_t)vaddr0; | |
489 | ||
490 | if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) { | |
491 | kprintf("boundary check failed\n"); | |
492 | if (verify) | |
5e6bd7dc | 493 | print_backtrace(); /* XXX panic */ |
20d01610 SZ |
494 | maxsize = dmat->maxsize; |
495 | } | |
496 | if (vaddr & (dmat->alignment - 1)) { | |
497 | kprintf("alignment check failed\n"); | |
498 | if (verify) | |
5e6bd7dc | 499 | print_backtrace(); /* XXX panic */ |
20d01610 SZ |
500 | if (dmat->maxsize < dmat->alignment) |
501 | maxsize = dmat->alignment; | |
502 | else | |
503 | maxsize = dmat->maxsize; | |
504 | } | |
505 | return maxsize; | |
506 | } | |
d7f50089 YY |
507 | |
508 | /* | |
509 | * Allocate a piece of memory that can be efficiently mapped into | |
510 | * bus device space based on the constraints lited in the dma tag. | |
511 | * | |
512 | * mapp is degenerate. By definition this allocation should not require | |
513 | * bounce buffers so do not allocate a dma map. | |
514 | */ | |
515 | int | |
20d01610 | 516 | bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, |
d7f50089 YY |
517 | bus_dmamap_t *mapp) |
518 | { | |
519 | int mflags; | |
20d01610 | 520 | |
d7f50089 YY |
521 | /* If we succeed, no mapping/bouncing will be required */ |
522 | *mapp = NULL; | |
523 | ||
524 | if (dmat->segments == NULL) { | |
525 | KKASSERT(dmat->nsegments < 16384); | |
526 | dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * | |
527 | dmat->nsegments, M_DEVBUF, M_INTWAIT); | |
528 | } | |
529 | ||
530 | if (flags & BUS_DMA_NOWAIT) | |
531 | mflags = M_NOWAIT; | |
532 | else | |
533 | mflags = M_WAITOK; | |
534 | if (flags & BUS_DMA_ZERO) | |
535 | mflags |= M_ZERO; | |
536 | ||
20d01610 SZ |
537 | if (BUS_DMAMEM_KMALLOC(dmat)) { |
538 | bus_size_t maxsize; | |
539 | ||
d7f50089 | 540 | *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags); |
20d01610 | 541 | |
d7f50089 | 542 | /* |
20d01610 SZ |
543 | * XXX |
544 | * Check whether the allocation | |
545 | * - crossed a page boundary | |
546 | * - was not aligned | |
547 | * Retry with power-of-2 alignment in the above cases. | |
d7f50089 | 548 | */ |
20d01610 SZ |
549 | maxsize = check_kmalloc(dmat, *vaddr, 0); |
550 | if (maxsize) { | |
d7f50089 | 551 | size_t size; |
20d01610 | 552 | |
d7f50089 YY |
553 | kfree(*vaddr, M_DEVBUF); |
554 | /* XXX check for overflow? */ | |
20d01610 | 555 | for (size = 1; size <= maxsize; size <<= 1) |
d7f50089 YY |
556 | ; |
557 | *vaddr = kmalloc(size, M_DEVBUF, mflags); | |
20d01610 | 558 | check_kmalloc(dmat, *vaddr, 1); |
d7f50089 YY |
559 | } |
560 | } else { | |
561 | /* | |
562 | * XXX Use Contigmalloc until it is merged into this facility | |
563 | * and handles multi-seg allocations. Nobody is doing | |
564 | * multi-seg allocations yet though. | |
565 | */ | |
566 | *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, | |
20d01610 | 567 | 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary); |
d7f50089 YY |
568 | } |
569 | if (*vaddr == NULL) | |
570 | return (ENOMEM); | |
571 | return (0); | |
572 | } | |
573 | ||
574 | /* | |
575 | * Free a piece of memory and it's allociated dmamap, that was allocated | |
576 | * via bus_dmamem_alloc. Make the same choice for free/contigfree. | |
577 | */ | |
578 | void | |
579 | bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) | |
580 | { | |
581 | /* | |
582 | * dmamem does not need to be bounced, so the map should be | |
583 | * NULL | |
584 | */ | |
585 | if (map != NULL) | |
586 | panic("bus_dmamem_free: Invalid map freed\n"); | |
20d01610 | 587 | if (BUS_DMAMEM_KMALLOC(dmat)) |
d7f50089 YY |
588 | kfree(vaddr, M_DEVBUF); |
589 | else | |
590 | contigfree(vaddr, dmat->maxsize, M_DEVBUF); | |
591 | } | |
592 | ||
20d01610 SZ |
593 | static __inline vm_paddr_t |
594 | _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr) | |
595 | { | |
596 | if (pmap) | |
597 | return pmap_extract(pmap, vaddr); | |
598 | else | |
599 | return pmap_kextract(vaddr); | |
600 | } | |
d7f50089 YY |
601 | |
602 | /* | |
20d01610 SZ |
603 | * Utility function to load a linear buffer. lastaddrp holds state |
604 | * between invocations (for multiple-buffer loads). segp contains | |
605 | * the segment following the starting one on entrace, and the ending | |
606 | * segment on exit. first indicates if this is the first invocation | |
607 | * of this function. | |
d7f50089 | 608 | */ |
20d01610 SZ |
609 | static int |
610 | _bus_dmamap_load_buffer(bus_dma_tag_t dmat, | |
611 | bus_dmamap_t map, | |
612 | void *buf, bus_size_t buflen, | |
613 | bus_dma_segment_t *segments, | |
614 | int nsegments, | |
615 | pmap_t pmap, | |
616 | int flags, | |
617 | vm_paddr_t *lastpaddrp, | |
618 | int *segp, | |
619 | int first) | |
d7f50089 | 620 | { |
20d01610 SZ |
621 | vm_offset_t vaddr; |
622 | vm_paddr_t paddr, nextpaddr; | |
623 | bus_dma_segment_t *sg; | |
624 | bus_addr_t bmask; | |
625 | int seg, error = 0; | |
d7f50089 YY |
626 | |
627 | if (map == NULL) | |
628 | map = &nobounce_dmamap; | |
629 | ||
20d01610 SZ |
630 | #ifdef INVARIANTS |
631 | if (dmat->flags & BUS_DMA_ALIGNED) | |
632 | KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0); | |
633 | #endif | |
634 | ||
d7f50089 YY |
635 | /* |
636 | * If we are being called during a callback, pagesneeded will | |
637 | * be non-zero, so we can avoid doing the work twice. | |
638 | */ | |
20d01610 SZ |
639 | if ((dmat->flags & BUS_DMA_COULD_BOUNCE) && |
640 | map != &nobounce_dmamap && map->pagesneeded == 0) { | |
641 | vm_offset_t vendaddr; | |
d7f50089 YY |
642 | |
643 | /* | |
644 | * Count the number of bounce pages | |
645 | * needed in order to complete this transfer | |
646 | */ | |
20d01610 | 647 | vaddr = (vm_offset_t)buf; |
d7f50089 YY |
648 | vendaddr = (vm_offset_t)buf + buflen; |
649 | ||
650 | while (vaddr < vendaddr) { | |
20d01610 SZ |
651 | paddr = _bus_dma_extract(pmap, vaddr); |
652 | if (run_filter(dmat, paddr) != 0) | |
d7f50089 | 653 | map->pagesneeded++; |
20d01610 | 654 | vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); |
d7f50089 YY |
655 | } |
656 | } | |
657 | ||
658 | /* Reserve Necessary Bounce Pages */ | |
659 | if (map->pagesneeded != 0) { | |
20d01610 | 660 | struct bounce_zone *bz; |
d7f50089 | 661 | |
20d01610 SZ |
662 | bz = dmat->bounce_zone; |
663 | BZ_LOCK(bz); | |
664 | if (flags & BUS_DMA_NOWAIT) { | |
665 | if (reserve_bounce_pages(dmat, map, 0) != 0) { | |
666 | BZ_UNLOCK(bz); | |
667 | error = ENOMEM; | |
668 | goto free_bounce; | |
669 | } | |
670 | } else { | |
671 | if (reserve_bounce_pages(dmat, map, 1) != 0) { | |
672 | /* Queue us for resources */ | |
673 | map->dmat = dmat; | |
674 | map->buf = buf; | |
675 | map->buflen = buflen; | |
676 | ||
677 | STAILQ_INSERT_TAIL( | |
678 | &dmat->bounce_zone->bounce_map_waitinglist, | |
679 | map, links); | |
680 | BZ_UNLOCK(bz); | |
681 | ||
682 | return (EINPROGRESS); | |
683 | } | |
d7f50089 | 684 | } |
20d01610 | 685 | BZ_UNLOCK(bz); |
d7f50089 YY |
686 | } |
687 | ||
20d01610 SZ |
688 | KKASSERT(*segp >= 1 && *segp <= nsegments); |
689 | seg = *segp; | |
690 | sg = &segments[seg - 1]; | |
691 | ||
d7f50089 | 692 | vaddr = (vm_offset_t)buf; |
20d01610 SZ |
693 | nextpaddr = *lastpaddrp; |
694 | bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */ | |
d7f50089 | 695 | |
20d01610 | 696 | /* force at least one segment */ |
d7f50089 | 697 | do { |
20d01610 | 698 | bus_size_t size; |
d7f50089 | 699 | |
20d01610 SZ |
700 | /* |
701 | * Per-page main loop | |
702 | */ | |
703 | paddr = _bus_dma_extract(pmap, vaddr); | |
d7f50089 YY |
704 | size = PAGE_SIZE - (paddr & PAGE_MASK); |
705 | if (size > buflen) | |
706 | size = buflen; | |
d7f50089 | 707 | if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { |
20d01610 SZ |
708 | /* |
709 | * note: this paddr has the same in-page offset | |
710 | * as vaddr and thus the paddr above, so the | |
711 | * size does not have to be recalculated | |
712 | */ | |
d7f50089 YY |
713 | paddr = add_bounce_page(dmat, map, vaddr, size); |
714 | } | |
715 | ||
20d01610 SZ |
716 | /* |
717 | * Fill in the bus_dma_segment | |
718 | */ | |
719 | if (first) { | |
d7f50089 YY |
720 | sg->ds_addr = paddr; |
721 | sg->ds_len = size; | |
20d01610 | 722 | first = 0; |
d7f50089 YY |
723 | } else if (paddr == nextpaddr) { |
724 | sg->ds_len += size; | |
725 | } else { | |
d7f50089 YY |
726 | sg++; |
727 | seg++; | |
20d01610 | 728 | if (seg > nsegments) |
d7f50089 YY |
729 | break; |
730 | sg->ds_addr = paddr; | |
731 | sg->ds_len = size; | |
732 | } | |
d7f50089 | 733 | nextpaddr = paddr + size; |
20d01610 SZ |
734 | |
735 | /* | |
736 | * Handle maxsegsz and boundary issues with a nested loop | |
737 | */ | |
738 | for (;;) { | |
739 | bus_size_t tmpsize; | |
740 | ||
741 | /* | |
742 | * Limit to the boundary and maximum segment size | |
743 | */ | |
744 | if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) { | |
745 | tmpsize = dmat->boundary - | |
746 | (sg->ds_addr & ~bmask); | |
747 | if (tmpsize > dmat->maxsegsz) | |
748 | tmpsize = dmat->maxsegsz; | |
749 | KKASSERT(tmpsize < sg->ds_len); | |
750 | } else if (sg->ds_len > dmat->maxsegsz) { | |
751 | tmpsize = dmat->maxsegsz; | |
752 | } else { | |
753 | break; | |
754 | } | |
755 | ||
756 | /* | |
757 | * Futz, split the data into a new segment. | |
758 | */ | |
759 | if (seg >= nsegments) | |
760 | goto fail; | |
761 | sg[1].ds_len = sg[0].ds_len - tmpsize; | |
762 | sg[1].ds_addr = sg[0].ds_addr + tmpsize; | |
763 | sg[0].ds_len = tmpsize; | |
764 | sg++; | |
765 | seg++; | |
766 | } | |
767 | ||
768 | /* | |
769 | * Adjust for loop | |
770 | */ | |
d7f50089 | 771 | buflen -= size; |
20d01610 | 772 | vaddr += size; |
d7f50089 | 773 | } while (buflen > 0); |
20d01610 SZ |
774 | fail: |
775 | if (buflen != 0) | |
d7f50089 | 776 | error = EFBIG; |
d7f50089 | 777 | |
20d01610 SZ |
778 | *segp = seg; |
779 | *lastpaddrp = nextpaddr; | |
d7f50089 | 780 | |
20d01610 SZ |
781 | free_bounce: |
782 | if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) && | |
783 | map != &nobounce_dmamap) { | |
784 | _bus_dmamap_unload(dmat, map); | |
785 | return_bounce_pages(dmat, map); | |
786 | } | |
787 | return error; | |
d7f50089 YY |
788 | } |
789 | ||
790 | /* | |
20d01610 | 791 | * Map the buffer buf into bus space using the dmamap map. |
d7f50089 | 792 | */ |
20d01610 SZ |
793 | int |
794 | bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, | |
795 | bus_size_t buflen, bus_dmamap_callback_t *callback, | |
796 | void *callback_arg, int flags) | |
d7f50089 | 797 | { |
6eed46e7 MD |
798 | bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; |
799 | bus_dma_segment_t *segments; | |
20d01610 SZ |
800 | vm_paddr_t lastaddr = 0; |
801 | int error, nsegs = 1; | |
d7f50089 | 802 | |
20d01610 | 803 | if (map != NULL) { |
d7f50089 | 804 | /* |
20d01610 SZ |
805 | * XXX |
806 | * Follow old semantics. Once all of the callers are fixed, | |
807 | * we should get rid of these internal flag "adjustment". | |
d7f50089 | 808 | */ |
20d01610 SZ |
809 | flags &= ~BUS_DMA_NOWAIT; |
810 | flags |= BUS_DMA_WAITOK; | |
d7f50089 | 811 | |
20d01610 SZ |
812 | map->callback = callback; |
813 | map->callback_arg = callback_arg; | |
d7f50089 YY |
814 | } |
815 | ||
6eed46e7 | 816 | segments = bus_dma_tag_lock(dmat, cache_segments); |
20d01610 | 817 | error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, |
6eed46e7 | 818 | segments, dmat->nsegments, |
20d01610 | 819 | NULL, flags, &lastaddr, &nsegs, 1); |
6eed46e7 MD |
820 | if (error == EINPROGRESS) { |
821 | bus_dma_tag_unlock(dmat); | |
20d01610 | 822 | return error; |
6eed46e7 | 823 | } |
6eed46e7 MD |
824 | callback(callback_arg, segments, nsegs, error); |
825 | bus_dma_tag_unlock(dmat); | |
20d01610 | 826 | return 0; |
d7f50089 YY |
827 | } |
828 | ||
829 | /* | |
830 | * Like _bus_dmamap_load(), but for mbufs. | |
831 | */ | |
832 | int | |
833 | bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, | |
834 | struct mbuf *m0, | |
835 | bus_dmamap_callback2_t *callback, void *callback_arg, | |
836 | int flags) | |
837 | { | |
6eed46e7 MD |
838 | bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; |
839 | bus_dma_segment_t *segments; | |
d7f50089 YY |
840 | int nsegs, error; |
841 | ||
20d01610 SZ |
842 | /* |
843 | * XXX | |
844 | * Follow old semantics. Once all of the callers are fixed, | |
845 | * we should get rid of these internal flag "adjustment". | |
846 | */ | |
847 | flags &= ~BUS_DMA_WAITOK; | |
848 | flags |= BUS_DMA_NOWAIT; | |
849 | ||
6eed46e7 | 850 | segments = bus_dma_tag_lock(dmat, cache_segments); |
20d01610 | 851 | error = bus_dmamap_load_mbuf_segment(dmat, map, m0, |
6eed46e7 | 852 | segments, dmat->nsegments, &nsegs, flags); |
20d01610 SZ |
853 | if (error) { |
854 | /* force "no valid mappings" in callback */ | |
6eed46e7 MD |
855 | callback(callback_arg, segments, 0, |
856 | 0, error); | |
20d01610 | 857 | } else { |
6eed46e7 | 858 | callback(callback_arg, segments, nsegs, |
20d01610 SZ |
859 | m0->m_pkthdr.len, error); |
860 | } | |
6eed46e7 | 861 | bus_dma_tag_unlock(dmat); |
20d01610 SZ |
862 | return error; |
863 | } | |
864 | ||
865 | int | |
866 | bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, | |
867 | struct mbuf *m0, | |
868 | bus_dma_segment_t *segs, int maxsegs, | |
869 | int *nsegs, int flags) | |
870 | { | |
871 | int error; | |
872 | ||
873 | M_ASSERTPKTHDR(m0); | |
874 | ||
875 | KASSERT(maxsegs >= 1, ("invalid maxsegs %d\n", maxsegs)); | |
876 | KASSERT(maxsegs <= dmat->nsegments, | |
877 | ("%d too many segments, dmat only support %d segments\n", | |
878 | maxsegs, dmat->nsegments)); | |
879 | KASSERT(flags & BUS_DMA_NOWAIT, | |
880 | ("only BUS_DMA_NOWAIT is supported\n")); | |
d7f50089 | 881 | |
d7f50089 YY |
882 | if (m0->m_pkthdr.len <= dmat->maxsize) { |
883 | int first = 1; | |
20d01610 | 884 | vm_paddr_t lastaddr = 0; |
d7f50089 YY |
885 | struct mbuf *m; |
886 | ||
20d01610 SZ |
887 | *nsegs = 1; |
888 | error = 0; | |
d7f50089 | 889 | for (m = m0; m != NULL && error == 0; m = m->m_next) { |
20d01610 | 890 | if (m->m_len == 0) |
d7f50089 | 891 | continue; |
20d01610 SZ |
892 | |
893 | error = _bus_dmamap_load_buffer(dmat, map, | |
d7f50089 | 894 | m->m_data, m->m_len, |
20d01610 SZ |
895 | segs, maxsegs, |
896 | NULL, flags, &lastaddr, | |
897 | nsegs, first); | |
898 | if (error == ENOMEM && !first) { | |
899 | /* | |
900 | * Out of bounce pages due to too many | |
901 | * fragments in the mbuf chain; return | |
902 | * EFBIG instead. | |
903 | */ | |
904 | error = EFBIG; | |
905 | } | |
d7f50089 YY |
906 | first = 0; |
907 | } | |
20d01610 SZ |
908 | #ifdef INVARIANTS |
909 | if (!error) | |
910 | KKASSERT(*nsegs <= maxsegs && *nsegs >= 1); | |
911 | #endif | |
d7f50089 | 912 | } else { |
20d01610 | 913 | *nsegs = 0; |
d7f50089 YY |
914 | error = EINVAL; |
915 | } | |
20d01610 SZ |
916 | KKASSERT(error != EINPROGRESS); |
917 | return error; | |
d7f50089 YY |
918 | } |
919 | ||
920 | /* | |
921 | * Like _bus_dmamap_load(), but for uios. | |
922 | */ | |
923 | int | |
924 | bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, | |
925 | struct uio *uio, | |
926 | bus_dmamap_callback2_t *callback, void *callback_arg, | |
927 | int flags) | |
928 | { | |
20d01610 | 929 | vm_paddr_t lastaddr; |
d7f50089 YY |
930 | int nsegs, error, first, i; |
931 | bus_size_t resid; | |
932 | struct iovec *iov; | |
20d01610 | 933 | pmap_t pmap; |
6eed46e7 MD |
934 | bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; |
935 | bus_dma_segment_t *segments; | |
936 | bus_dma_segment_t *segs; | |
937 | int nsegs_left; | |
938 | ||
939 | if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS) | |
940 | segments = cache_segments; | |
941 | else | |
942 | segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments, | |
943 | M_DEVBUF, M_WAITOK | M_ZERO); | |
d7f50089 | 944 | |
20d01610 SZ |
945 | /* |
946 | * XXX | |
947 | * Follow old semantics. Once all of the callers are fixed, | |
948 | * we should get rid of these internal flag "adjustment". | |
949 | */ | |
950 | flags &= ~BUS_DMA_WAITOK; | |
951 | flags |= BUS_DMA_NOWAIT; | |
d7f50089 | 952 | |
e54488bb | 953 | resid = (bus_size_t)uio->uio_resid; |
d7f50089 YY |
954 | iov = uio->uio_iov; |
955 | ||
6eed46e7 MD |
956 | segs = segments; |
957 | nsegs_left = dmat->nsegments; | |
958 | ||
d7f50089 | 959 | if (uio->uio_segflg == UIO_USERSPACE) { |
20d01610 SZ |
960 | struct thread *td; |
961 | ||
d7f50089 YY |
962 | td = uio->uio_td; |
963 | KASSERT(td != NULL && td->td_proc != NULL, | |
964 | ("bus_dmamap_load_uio: USERSPACE but no proc")); | |
20d01610 SZ |
965 | pmap = vmspace_pmap(td->td_proc->p_vmspace); |
966 | } else { | |
967 | pmap = NULL; | |
d7f50089 YY |
968 | } |
969 | ||
d7f50089 | 970 | error = 0; |
20d01610 | 971 | nsegs = 1; |
d7f50089 | 972 | first = 1; |
20d01610 | 973 | lastaddr = 0; |
d7f50089 YY |
974 | for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { |
975 | /* | |
976 | * Now at the first iovec to load. Load each iovec | |
977 | * until we have exhausted the residual count. | |
978 | */ | |
979 | bus_size_t minlen = | |
980 | resid < iov[i].iov_len ? resid : iov[i].iov_len; | |
981 | caddr_t addr = (caddr_t) iov[i].iov_base; | |
982 | ||
20d01610 | 983 | error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, |
6eed46e7 | 984 | segs, nsegs_left, |
20d01610 | 985 | pmap, flags, &lastaddr, &nsegs, first); |
d7f50089 YY |
986 | first = 0; |
987 | ||
988 | resid -= minlen; | |
6eed46e7 MD |
989 | if (error == 0) { |
990 | nsegs_left -= nsegs; | |
991 | segs += nsegs; | |
992 | } | |
d7f50089 YY |
993 | } |
994 | ||
6eed46e7 MD |
995 | /* |
996 | * Minimum one DMA segment, even if 0-length buffer. | |
997 | */ | |
998 | if (nsegs_left == dmat->nsegments) | |
999 | --nsegs_left; | |
1000 | ||
d7f50089 YY |
1001 | if (error) { |
1002 | /* force "no valid mappings" in callback */ | |
6eed46e7 MD |
1003 | callback(callback_arg, segments, 0, |
1004 | 0, error); | |
d7f50089 | 1005 | } else { |
6eed46e7 | 1006 | callback(callback_arg, segments, dmat->nsegments - nsegs_left, |
e54488bb | 1007 | (bus_size_t)uio->uio_resid, error); |
d7f50089 | 1008 | } |
6eed46e7 MD |
1009 | if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS) |
1010 | kfree(segments, M_DEVBUF); | |
20d01610 | 1011 | return error; |
d7f50089 YY |
1012 | } |
1013 | ||
1014 | /* | |
1015 | * Release the mapping held by map. | |
1016 | */ | |
1017 | void | |
1018 | _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) | |
1019 | { | |
1020 | struct bounce_page *bpage; | |
1021 | ||
1022 | while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { | |
1023 | STAILQ_REMOVE_HEAD(&map->bpages, links); | |
1024 | free_bounce_page(dmat, bpage); | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | void | |
1029 | _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) | |
1030 | { | |
1031 | struct bounce_page *bpage; | |
1032 | ||
1033 | if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { | |
d7f50089 YY |
1034 | /* |
1035 | * Handle data bouncing. We might also | |
1036 | * want to add support for invalidating | |
1037 | * the caches on broken hardware | |
1038 | */ | |
1039 | switch (op) { | |
1040 | case BUS_DMASYNC_PREWRITE: | |
1041 | while (bpage != NULL) { | |
1042 | bcopy((void *)bpage->datavaddr, | |
1043 | (void *)bpage->vaddr, | |
1044 | bpage->datacount); | |
1045 | bpage = STAILQ_NEXT(bpage, links); | |
1046 | } | |
20d01610 | 1047 | dmat->bounce_zone->total_bounced++; |
d7f50089 YY |
1048 | break; |
1049 | ||
1050 | case BUS_DMASYNC_POSTREAD: | |
1051 | while (bpage != NULL) { | |
1052 | bcopy((void *)bpage->vaddr, | |
1053 | (void *)bpage->datavaddr, | |
1054 | bpage->datacount); | |
1055 | bpage = STAILQ_NEXT(bpage, links); | |
1056 | } | |
20d01610 | 1057 | dmat->bounce_zone->total_bounced++; |
d7f50089 | 1058 | break; |
20d01610 | 1059 | |
d7f50089 YY |
1060 | case BUS_DMASYNC_PREREAD: |
1061 | case BUS_DMASYNC_POSTWRITE: | |
1062 | /* No-ops */ | |
1063 | break; | |
1064 | } | |
1065 | } | |
1066 | } | |
1067 | ||
1068 | static int | |
20d01610 | 1069 | alloc_bounce_zone(bus_dma_tag_t dmat) |
d7f50089 | 1070 | { |
20d01610 SZ |
1071 | struct bounce_zone *bz, *new_bz; |
1072 | lwkt_tokref ref; | |
1073 | ||
1074 | KASSERT(dmat->bounce_zone == NULL, | |
1075 | ("bounce zone was already assigned\n")); | |
1076 | ||
1077 | new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); | |
d7f50089 | 1078 | |
20d01610 SZ |
1079 | lwkt_gettoken(&ref, &bounce_zone_tok); |
1080 | ||
1081 | /* Check to see if we already have a suitable zone */ | |
1082 | STAILQ_FOREACH(bz, &bounce_zone_list, links) { | |
1083 | if (dmat->alignment <= bz->alignment && | |
1084 | dmat->lowaddr >= bz->lowaddr) { | |
1085 | lwkt_reltoken(&ref); | |
1086 | ||
1087 | dmat->bounce_zone = bz; | |
1088 | kfree(new_bz, M_DEVBUF); | |
1089 | return 0; | |
1090 | } | |
d7f50089 | 1091 | } |
20d01610 SZ |
1092 | bz = new_bz; |
1093 | ||
1094 | #ifdef SMP | |
1095 | spin_init(&bz->spin); | |
1096 | #endif | |
1097 | STAILQ_INIT(&bz->bounce_page_list); | |
1098 | STAILQ_INIT(&bz->bounce_map_waitinglist); | |
1099 | bz->free_bpages = 0; | |
1100 | bz->reserved_bpages = 0; | |
1101 | bz->active_bpages = 0; | |
1102 | bz->lowaddr = dmat->lowaddr; | |
1103 | bz->alignment = round_page(dmat->alignment); | |
1104 | ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); | |
1105 | busdma_zonecount++; | |
1106 | ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); | |
1107 | STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); | |
1108 | ||
1109 | lwkt_reltoken(&ref); | |
1110 | ||
1111 | dmat->bounce_zone = bz; | |
1112 | ||
1113 | sysctl_ctx_init(&bz->sysctl_ctx); | |
1114 | bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx, | |
1115 | SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, | |
1116 | CTLFLAG_RD, 0, ""); | |
1117 | if (bz->sysctl_tree == NULL) { | |
1118 | sysctl_ctx_free(&bz->sysctl_ctx); | |
1119 | return 0; /* XXX error code? */ | |
1120 | } | |
1121 | ||
1122 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1123 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1124 | "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, | |
1125 | "Total bounce pages"); | |
1126 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1127 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1128 | "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, | |
1129 | "Free bounce pages"); | |
1130 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1131 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1132 | "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, | |
1133 | "Reserved bounce pages"); | |
1134 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1135 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1136 | "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, | |
1137 | "Active bounce pages"); | |
1138 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1139 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1140 | "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, | |
1141 | "Total bounce requests"); | |
1142 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1143 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1144 | "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, | |
1145 | "Total bounce requests that were deferred"); | |
1146 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1147 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1148 | "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0, | |
1149 | "Total bounce page reservations that were failed"); | |
1150 | SYSCTL_ADD_STRING(&bz->sysctl_ctx, | |
1151 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1152 | "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); | |
1153 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1154 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1155 | "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); | |
1156 | ||
1157 | return 0; | |
1158 | } | |
1159 | ||
1160 | static int | |
1161 | alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags) | |
1162 | { | |
1163 | struct bounce_zone *bz = dmat->bounce_zone; | |
1164 | int count = 0, mflags; | |
1165 | ||
1166 | if (flags & BUS_DMA_NOWAIT) | |
1167 | mflags = M_NOWAIT; | |
1168 | else | |
1169 | mflags = M_WAITOK; | |
1170 | ||
d7f50089 YY |
1171 | while (numpages > 0) { |
1172 | struct bounce_page *bpage; | |
1173 | ||
20d01610 | 1174 | bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO); |
d7f50089 | 1175 | |
d7f50089 | 1176 | bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, |
20d01610 SZ |
1177 | mflags, 0ul, |
1178 | bz->lowaddr, | |
1179 | bz->alignment, 0); | |
1180 | if (bpage->vaddr == 0) { | |
d7f50089 YY |
1181 | kfree(bpage, M_DEVBUF); |
1182 | break; | |
1183 | } | |
1184 | bpage->busaddr = pmap_kextract(bpage->vaddr); | |
20d01610 SZ |
1185 | |
1186 | BZ_LOCK(bz); | |
1187 | STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); | |
1188 | total_bounce_pages++; | |
1189 | bz->total_bpages++; | |
1190 | bz->free_bpages++; | |
1191 | BZ_UNLOCK(bz); | |
1192 | ||
d7f50089 YY |
1193 | count++; |
1194 | numpages--; | |
1195 | } | |
20d01610 | 1196 | return count; |
d7f50089 YY |
1197 | } |
1198 | ||
20d01610 | 1199 | /* Assume caller holds bounce zone spinlock */ |
d7f50089 | 1200 | static int |
20d01610 | 1201 | reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) |
d7f50089 | 1202 | { |
20d01610 | 1203 | struct bounce_zone *bz = dmat->bounce_zone; |
d7f50089 YY |
1204 | int pages; |
1205 | ||
20d01610 SZ |
1206 | pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); |
1207 | if (!commit && map->pagesneeded > (map->pagesreserved + pages)) { | |
1208 | bz->reserve_failed++; | |
1209 | return (map->pagesneeded - (map->pagesreserved + pages)); | |
1210 | } | |
1211 | ||
1212 | bz->free_bpages -= pages; | |
1213 | ||
1214 | bz->reserved_bpages += pages; | |
1215 | KKASSERT(bz->reserved_bpages <= bz->total_bpages); | |
1216 | ||
d7f50089 YY |
1217 | map->pagesreserved += pages; |
1218 | pages = map->pagesneeded - map->pagesreserved; | |
1219 | ||
20d01610 SZ |
1220 | return pages; |
1221 | } | |
1222 | ||
1223 | static void | |
1224 | return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) | |
1225 | { | |
1226 | struct bounce_zone *bz = dmat->bounce_zone; | |
1227 | int reserved = map->pagesreserved; | |
1228 | bus_dmamap_t wait_map; | |
1229 | ||
1230 | map->pagesreserved = 0; | |
1231 | map->pagesneeded = 0; | |
1232 | ||
1233 | if (reserved == 0) | |
1234 | return; | |
1235 | ||
1236 | BZ_LOCK(bz); | |
1237 | ||
1238 | bz->free_bpages += reserved; | |
1239 | KKASSERT(bz->free_bpages <= bz->total_bpages); | |
1240 | ||
1241 | KKASSERT(bz->reserved_bpages >= reserved); | |
1242 | bz->reserved_bpages -= reserved; | |
1243 | ||
1244 | wait_map = get_map_waiting(dmat); | |
1245 | ||
1246 | BZ_UNLOCK(bz); | |
1247 | ||
1248 | if (wait_map != NULL) | |
1249 | add_map_callback(map); | |
d7f50089 YY |
1250 | } |
1251 | ||
1252 | static bus_addr_t | |
1253 | add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, | |
1254 | bus_size_t size) | |
1255 | { | |
20d01610 | 1256 | struct bounce_zone *bz = dmat->bounce_zone; |
d7f50089 YY |
1257 | struct bounce_page *bpage; |
1258 | ||
20d01610 | 1259 | KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); |
d7f50089 YY |
1260 | map->pagesneeded--; |
1261 | ||
20d01610 | 1262 | KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages")); |
d7f50089 YY |
1263 | map->pagesreserved--; |
1264 | ||
20d01610 | 1265 | BZ_LOCK(bz); |
d7f50089 | 1266 | |
20d01610 SZ |
1267 | bpage = STAILQ_FIRST(&bz->bounce_page_list); |
1268 | KASSERT(bpage != NULL, ("free page list is empty")); | |
1269 | STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); | |
1270 | ||
1271 | KKASSERT(bz->reserved_bpages > 0); | |
1272 | bz->reserved_bpages--; | |
1273 | ||
1274 | bz->active_bpages++; | |
1275 | KKASSERT(bz->active_bpages <= bz->total_bpages); | |
1276 | ||
1277 | BZ_UNLOCK(bz); | |
d7f50089 YY |
1278 | |
1279 | bpage->datavaddr = vaddr; | |
1280 | bpage->datacount = size; | |
20d01610 SZ |
1281 | STAILQ_INSERT_TAIL(&map->bpages, bpage, links); |
1282 | return bpage->busaddr; | |
d7f50089 YY |
1283 | } |
1284 | ||
1285 | static void | |
1286 | free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) | |
1287 | { | |
20d01610 SZ |
1288 | struct bounce_zone *bz = dmat->bounce_zone; |
1289 | bus_dmamap_t map; | |
d7f50089 YY |
1290 | |
1291 | bpage->datavaddr = 0; | |
1292 | bpage->datacount = 0; | |
1293 | ||
20d01610 SZ |
1294 | BZ_LOCK(bz); |
1295 | ||
1296 | STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); | |
1297 | ||
1298 | bz->free_bpages++; | |
1299 | KKASSERT(bz->free_bpages <= bz->total_bpages); | |
1300 | ||
1301 | KKASSERT(bz->active_bpages > 0); | |
1302 | bz->active_bpages--; | |
1303 | ||
1304 | map = get_map_waiting(dmat); | |
1305 | ||
1306 | BZ_UNLOCK(bz); | |
1307 | ||
1308 | if (map != NULL) | |
1309 | add_map_callback(map); | |
1310 | } | |
1311 | ||
1312 | /* Assume caller holds bounce zone spinlock */ | |
1313 | static bus_dmamap_t | |
1314 | get_map_waiting(bus_dma_tag_t dmat) | |
1315 | { | |
1316 | struct bounce_zone *bz = dmat->bounce_zone; | |
1317 | bus_dmamap_t map; | |
1318 | ||
1319 | map = STAILQ_FIRST(&bz->bounce_map_waitinglist); | |
1320 | if (map != NULL) { | |
1321 | if (reserve_bounce_pages(map->dmat, map, 1) == 0) { | |
1322 | STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); | |
1323 | bz->total_deferred++; | |
1324 | } else { | |
1325 | map = NULL; | |
d7f50089 YY |
1326 | } |
1327 | } | |
20d01610 | 1328 | return map; |
d7f50089 YY |
1329 | } |
1330 | ||
20d01610 SZ |
1331 | static void |
1332 | add_map_callback(bus_dmamap_t map) | |
1333 | { | |
1334 | /* XXX callbacklist is not MPSAFE */ | |
1335 | crit_enter(); | |
1336 | get_mplock(); | |
1337 | STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); | |
1338 | busdma_swi_pending = 1; | |
1339 | setsoftvm(); | |
1340 | rel_mplock(); | |
1341 | crit_exit(); | |
1342 | } | |
d7f50089 YY |
1343 | |
1344 | void | |
1345 | busdma_swi(void) | |
1346 | { | |
20d01610 | 1347 | bus_dmamap_t map; |
d7f50089 YY |
1348 | |
1349 | crit_enter(); | |
1350 | while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { | |
1351 | STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); | |
1352 | crit_exit(); | |
1353 | bus_dmamap_load(map->dmat, map, map->buf, map->buflen, | |
1354 | map->callback, map->callback_arg, /*flags*/0); | |
1355 | crit_enter(); | |
1356 | } | |
1357 | crit_exit(); | |
1358 | } |