Commit | Line | Data |
---|---|---|
984263bc MD |
1 | /* |
2 | * Copyright (c) 1997, 1998 Justin T. Gibbs. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions, and the following disclaimer, | |
10 | * without modification, immediately at the beginning of the file. | |
11 | * 2. The name of the author may not be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |
15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
17 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR | |
18 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
24 | * SUCH DAMAGE. | |
25 | * | |
497a413c | 26 | * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $ |
3641b7ca | 27 | * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $ |
984263bc MD |
28 | */ |
29 | ||
30 | #include <sys/param.h> | |
31 | #include <sys/systm.h> | |
32 | #include <sys/malloc.h> | |
33 | #include <sys/mbuf.h> | |
34 | #include <sys/uio.h> | |
1f7ab7c9 | 35 | #include <sys/bus_dma.h> |
1fae3d5f SZ |
36 | #include <sys/kernel.h> |
37 | #include <sys/sysctl.h> | |
78c79c21 | 38 | #include <sys/lock.h> |
684a93c4 MD |
39 | |
40 | #include <sys/thread2.h> | |
78c79c21 | 41 | #include <sys/spinlock2.h> |
684a93c4 | 42 | #include <sys/mplock2.h> |
984263bc MD |
43 | |
44 | #include <vm/vm.h> | |
45 | #include <vm/vm_page.h> | |
46 | ||
47 | /* XXX needed for to access pmap to convert per-proc virtual to physical */ | |
48 | #include <sys/proc.h> | |
49 | #include <sys/lock.h> | |
50 | #include <vm/vm_map.h> | |
51 | ||
984263bc MD |
52 | #include <machine/md_var.h> |
53 | ||
f3513319 | 54 | #define MAX_BPAGES 1024 |
984263bc | 55 | |
6eed46e7 MD |
56 | /* |
57 | * 16 x N declared on stack. | |
58 | */ | |
59 | #define BUS_DMA_CACHE_SEGMENTS 8 | |
60 | ||
5efcff53 | 61 | struct bounce_zone; |
eec44d94 | 62 | struct bus_dmamap; |
5efcff53 | 63 | |
984263bc | 64 | struct bus_dma_tag { |
141ebb24 SZ |
65 | bus_dma_tag_t parent; |
66 | bus_size_t alignment; | |
67 | bus_size_t boundary; | |
68 | bus_addr_t lowaddr; | |
69 | bus_addr_t highaddr; | |
984263bc | 70 | bus_dma_filter_t *filter; |
141ebb24 SZ |
71 | void *filterarg; |
72 | bus_size_t maxsize; | |
73 | u_int nsegments; | |
74 | bus_size_t maxsegsz; | |
75 | int flags; | |
76 | int ref_count; | |
77 | int map_count; | |
06577778 | 78 | bus_dma_segment_t *segments; |
5efcff53 | 79 | struct bounce_zone *bounce_zone; |
6eed46e7 MD |
80 | #ifdef SMP |
81 | struct spinlock spin; | |
82 | #else | |
83 | int unused0; | |
84 | #endif | |
984263bc MD |
85 | }; |
86 | ||
f3513319 SZ |
87 | /* |
88 | * bus_dma_tag private flags | |
89 | */ | |
46fbe6c5 SZ |
90 | #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2 |
91 | #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3 | |
f3513319 SZ |
92 | #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 |
93 | ||
46fbe6c5 SZ |
94 | #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN) |
95 | ||
96 | #define BUS_DMAMEM_KMALLOC(dmat) \ | |
97 | ((dmat)->maxsize <= PAGE_SIZE && \ | |
98 | (dmat)->alignment <= PAGE_SIZE && \ | |
99 | (dmat)->lowaddr >= ptoa(Maxmem)) | |
100 | ||
984263bc MD |
101 | struct bounce_page { |
102 | vm_offset_t vaddr; /* kva of bounce buffer */ | |
103 | bus_addr_t busaddr; /* Physical address */ | |
104 | vm_offset_t datavaddr; /* kva of client data */ | |
105 | bus_size_t datacount; /* client data count */ | |
106 | STAILQ_ENTRY(bounce_page) links; | |
107 | }; | |
108 | ||
5efcff53 SZ |
109 | struct bounce_zone { |
110 | STAILQ_ENTRY(bounce_zone) links; | |
111 | STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; | |
eec44d94 | 112 | STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; |
78c79c21 SZ |
113 | #ifdef SMP |
114 | struct spinlock spin; | |
115 | #else | |
116 | int unused0; | |
117 | #endif | |
5efcff53 SZ |
118 | int total_bpages; |
119 | int free_bpages; | |
120 | int reserved_bpages; | |
121 | int active_bpages; | |
122 | int total_bounced; | |
123 | int total_deferred; | |
63fbdfd2 | 124 | int reserve_failed; |
5efcff53 | 125 | bus_size_t alignment; |
5efcff53 SZ |
126 | bus_addr_t lowaddr; |
127 | char zoneid[8]; | |
128 | char lowaddrid[20]; | |
129 | struct sysctl_ctx_list sysctl_ctx; | |
130 | struct sysctl_oid *sysctl_tree; | |
131 | }; | |
132 | ||
78c79c21 SZ |
133 | #ifdef SMP |
134 | #define BZ_LOCK(bz) spin_lock_wr(&(bz)->spin) | |
135 | #define BZ_UNLOCK(bz) spin_unlock_wr(&(bz)->spin) | |
136 | #else | |
137 | #define BZ_LOCK(bz) crit_enter() | |
138 | #define BZ_UNLOCK(bz) crit_exit() | |
139 | #endif | |
140 | ||
12586b82 MD |
141 | static struct lwkt_token bounce_zone_tok = |
142 | LWKT_TOKEN_MP_INITIALIZER(bounce_zone_tok); | |
5efcff53 SZ |
143 | static int busdma_zonecount; |
144 | static STAILQ_HEAD(, bounce_zone) bounce_zone_list = | |
145 | STAILQ_HEAD_INITIALIZER(bounce_zone_list); | |
984263bc | 146 | |
5efcff53 SZ |
147 | int busdma_swi_pending; |
148 | static int total_bounce_pages; | |
854f18b2 | 149 | static int max_bounce_pages = MAX_BPAGES; |
5eba2447 | 150 | static int bounce_alignment = 1; /* XXX temporary */ |
984263bc | 151 | |
854f18b2 | 152 | TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages); |
46fbe6c5 | 153 | TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment); |
854f18b2 | 154 | |
984263bc | 155 | struct bus_dmamap { |
141ebb24 SZ |
156 | struct bp_list bpages; |
157 | int pagesneeded; | |
158 | int pagesreserved; | |
159 | bus_dma_tag_t dmat; | |
160 | void *buf; /* unmapped buffer pointer */ | |
161 | bus_size_t buflen; /* unmapped buffer length */ | |
984263bc | 162 | bus_dmamap_callback_t *callback; |
141ebb24 | 163 | void *callback_arg; |
984263bc MD |
164 | STAILQ_ENTRY(bus_dmamap) links; |
165 | }; | |
166 | ||
5efcff53 SZ |
167 | static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist = |
168 | STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist); | |
169 | ||
984263bc MD |
170 | static struct bus_dmamap nobounce_dmamap; |
171 | ||
5efcff53 | 172 | static int alloc_bounce_zone(bus_dma_tag_t); |
f3f55019 | 173 | static int alloc_bounce_pages(bus_dma_tag_t, u_int, int); |
46cbb173 | 174 | static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); |
65cf4e39 | 175 | static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); |
96bdebb8 SZ |
176 | static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, |
177 | vm_offset_t, bus_size_t); | |
178 | static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); | |
984263bc | 179 | |
65cf4e39 SZ |
180 | static bus_dmamap_t get_map_waiting(bus_dma_tag_t); |
181 | static void add_map_callback(bus_dmamap_t); | |
182 | ||
1fae3d5f | 183 | SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); |
5efcff53 SZ |
184 | SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages, |
185 | 0, "Total bounce pages"); | |
854f18b2 SZ |
186 | SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages, |
187 | 0, "Max bounce pages per bounce zone"); | |
5eba2447 SZ |
188 | SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD, |
189 | &bounce_alignment, 0, "Obey alignment constraint"); | |
1fae3d5f | 190 | |
984263bc MD |
191 | static __inline int |
192 | run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) | |
193 | { | |
194 | int retval; | |
195 | ||
196 | retval = 0; | |
197 | do { | |
46fbe6c5 SZ |
198 | if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || |
199 | (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0)) | |
200 | && (dmat->filter == NULL || | |
201 | dmat->filter(dmat->filterarg, paddr) != 0)) | |
984263bc MD |
202 | retval = 1; |
203 | ||
141ebb24 | 204 | dmat = dmat->parent; |
984263bc MD |
205 | } while (retval == 0 && dmat != NULL); |
206 | return (retval); | |
207 | } | |
208 | ||
6eed46e7 MD |
209 | static __inline |
210 | bus_dma_segment_t * | |
211 | bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) | |
212 | { | |
213 | if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) | |
214 | return(cache); | |
215 | #ifdef SMP | |
216 | spin_lock_wr(&tag->spin); | |
217 | #endif | |
218 | return(tag->segments); | |
219 | } | |
220 | ||
221 | static __inline | |
222 | void | |
223 | bus_dma_tag_unlock(bus_dma_tag_t tag) | |
224 | { | |
225 | #ifdef SMP | |
226 | if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) | |
227 | spin_unlock_wr(&tag->spin); | |
228 | #endif | |
229 | } | |
230 | ||
984263bc MD |
231 | /* |
232 | * Allocate a device specific dma_tag. | |
233 | */ | |
234 | int | |
235 | bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, | |
236 | bus_size_t boundary, bus_addr_t lowaddr, | |
237 | bus_addr_t highaddr, bus_dma_filter_t *filter, | |
238 | void *filterarg, bus_size_t maxsize, int nsegments, | |
239 | bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) | |
240 | { | |
241 | bus_dma_tag_t newtag; | |
242 | int error = 0; | |
243 | ||
f3f55019 SZ |
244 | /* |
245 | * Sanity checks | |
246 | */ | |
247 | ||
24d9d29b SZ |
248 | if (alignment == 0) |
249 | alignment = 1; | |
f3f55019 SZ |
250 | if (alignment & (alignment - 1)) |
251 | panic("alignment must be power of 2\n"); | |
252 | ||
253 | if (boundary != 0) { | |
254 | if (boundary & (boundary - 1)) | |
255 | panic("boundary must be power of 2\n"); | |
256 | if (boundary < maxsegsz) { | |
257 | kprintf("boundary < maxsegsz:\n"); | |
7ce2998e | 258 | print_backtrace(-1); |
f3f55019 SZ |
259 | maxsegsz = boundary; |
260 | } | |
261 | } | |
24d9d29b | 262 | |
984263bc MD |
263 | /* Return a NULL tag on failure */ |
264 | *dmat = NULL; | |
265 | ||
6eed46e7 | 266 | newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); |
984263bc | 267 | |
bc2cd3f9 | 268 | #ifdef SMP |
6eed46e7 | 269 | spin_init(&newtag->spin); |
bc2cd3f9 | 270 | #endif |
984263bc MD |
271 | newtag->parent = parent; |
272 | newtag->alignment = alignment; | |
273 | newtag->boundary = boundary; | |
6ef943a3 MD |
274 | newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); |
275 | newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); | |
984263bc MD |
276 | newtag->filter = filter; |
277 | newtag->filterarg = filterarg; | |
278 | newtag->maxsize = maxsize; | |
279 | newtag->nsegments = nsegments; | |
280 | newtag->maxsegsz = maxsegsz; | |
281 | newtag->flags = flags; | |
282 | newtag->ref_count = 1; /* Count ourself */ | |
283 | newtag->map_count = 0; | |
3a2114c4 | 284 | newtag->segments = NULL; |
5efcff53 | 285 | newtag->bounce_zone = NULL; |
141ebb24 | 286 | |
984263bc MD |
287 | /* Take into account any restrictions imposed by our parent tag */ |
288 | if (parent != NULL) { | |
289 | newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); | |
290 | newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); | |
167a836d SZ |
291 | |
292 | if (newtag->boundary == 0) { | |
293 | newtag->boundary = parent->boundary; | |
294 | } else if (parent->boundary != 0) { | |
295 | newtag->boundary = MIN(parent->boundary, | |
296 | newtag->boundary); | |
297 | } | |
298 | ||
24d9d29b SZ |
299 | #ifdef notyet |
300 | newtag->alignment = MAX(parent->alignment, newtag->alignment); | |
301 | #endif | |
302 | ||
984263bc MD |
303 | if (newtag->filter == NULL) { |
304 | /* | |
305 | * Short circuit looking at our parent directly | |
306 | * since we have encapsulated all of its information | |
307 | */ | |
308 | newtag->filter = parent->filter; | |
309 | newtag->filterarg = parent->filterarg; | |
310 | newtag->parent = parent->parent; | |
311 | } | |
141ebb24 | 312 | if (newtag->parent != NULL) |
984263bc | 313 | parent->ref_count++; |
984263bc | 314 | } |
141ebb24 | 315 | |
e7029048 | 316 | if (newtag->lowaddr < ptoa(Maxmem)) |
46fbe6c5 SZ |
317 | newtag->flags |= BUS_DMA_BOUNCE_LOWADDR; |
318 | if (bounce_alignment && newtag->alignment > 1 && | |
319 | !(newtag->flags & BUS_DMA_ALIGNED)) | |
320 | newtag->flags |= BUS_DMA_BOUNCE_ALIGN; | |
e7029048 SZ |
321 | |
322 | if ((newtag->flags & BUS_DMA_COULD_BOUNCE) && | |
6ef943a3 | 323 | (flags & BUS_DMA_ALLOCNOW) != 0) { |
5efcff53 SZ |
324 | struct bounce_zone *bz; |
325 | ||
984263bc MD |
326 | /* Must bounce */ |
327 | ||
5efcff53 SZ |
328 | error = alloc_bounce_zone(newtag); |
329 | if (error) | |
330 | goto back; | |
331 | bz = newtag->bounce_zone; | |
332 | ||
333 | if (ptoa(bz->total_bpages) < maxsize) { | |
984263bc MD |
334 | int pages; |
335 | ||
90a9e482 | 336 | if (flags & BUS_DMA_ONEBPAGE) { |
12aa407c SZ |
337 | pages = 1; |
338 | } else { | |
339 | pages = atop(round_page(maxsize)) - | |
340 | bz->total_bpages; | |
341 | pages = MAX(pages, 1); | |
342 | } | |
984263bc MD |
343 | |
344 | /* Add pages to our bounce pool */ | |
f3f55019 | 345 | if (alloc_bounce_pages(newtag, pages, flags) < pages) |
984263bc | 346 | error = ENOMEM; |
88af19d8 SZ |
347 | |
348 | /* Performed initial allocation */ | |
349 | newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; | |
984263bc | 350 | } |
984263bc | 351 | } |
5efcff53 SZ |
352 | back: |
353 | if (error) | |
efda3bd0 | 354 | kfree(newtag, M_DEVBUF); |
5efcff53 | 355 | else |
984263bc | 356 | *dmat = newtag; |
5efcff53 | 357 | return error; |
984263bc MD |
358 | } |
359 | ||
360 | int | |
361 | bus_dma_tag_destroy(bus_dma_tag_t dmat) | |
362 | { | |
363 | if (dmat != NULL) { | |
984263bc MD |
364 | if (dmat->map_count != 0) |
365 | return (EBUSY); | |
366 | ||
367 | while (dmat != NULL) { | |
368 | bus_dma_tag_t parent; | |
369 | ||
370 | parent = dmat->parent; | |
371 | dmat->ref_count--; | |
372 | if (dmat->ref_count == 0) { | |
06577778 | 373 | if (dmat->segments != NULL) |
efda3bd0 MD |
374 | kfree(dmat->segments, M_DEVBUF); |
375 | kfree(dmat, M_DEVBUF); | |
984263bc MD |
376 | /* |
377 | * Last reference count, so | |
378 | * release our reference | |
379 | * count on our parent. | |
380 | */ | |
381 | dmat = parent; | |
382 | } else | |
383 | dmat = NULL; | |
384 | } | |
385 | } | |
386 | return (0); | |
387 | } | |
388 | ||
6eed46e7 MD |
389 | bus_size_t |
390 | bus_dma_tag_getmaxsize(bus_dma_tag_t tag) | |
391 | { | |
392 | return(tag->maxsize); | |
393 | } | |
394 | ||
984263bc MD |
395 | /* |
396 | * Allocate a handle for mapping from kva/uva/physical | |
397 | * address space into bus device space. | |
398 | */ | |
399 | int | |
400 | bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) | |
401 | { | |
402 | int error; | |
403 | ||
404 | error = 0; | |
405 | ||
3a2114c4 MD |
406 | if (dmat->segments == NULL) { |
407 | KKASSERT(dmat->nsegments && dmat->nsegments < 16384); | |
77652cad | 408 | dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * |
3a2114c4 MD |
409 | dmat->nsegments, M_DEVBUF, M_INTWAIT); |
410 | } | |
411 | ||
e7029048 | 412 | if (dmat->flags & BUS_DMA_COULD_BOUNCE) { |
5efcff53 | 413 | struct bounce_zone *bz; |
984263bc MD |
414 | int maxpages; |
415 | ||
5efcff53 SZ |
416 | /* Must bounce */ |
417 | ||
418 | if (dmat->bounce_zone == NULL) { | |
419 | error = alloc_bounce_zone(dmat); | |
420 | if (error) | |
421 | return error; | |
422 | } | |
423 | bz = dmat->bounce_zone; | |
424 | ||
e7b4468c | 425 | *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO); |
58d9c764 | 426 | |
e7b4468c SW |
427 | /* Initialize the new map */ |
428 | STAILQ_INIT(&((*mapp)->bpages)); | |
46fbe6c5 | 429 | |
984263bc MD |
430 | /* |
431 | * Attempt to add pages to our pool on a per-instance | |
432 | * basis up to a sane limit. | |
433 | */ | |
46fbe6c5 SZ |
434 | if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) { |
435 | maxpages = max_bounce_pages; | |
436 | } else { | |
437 | maxpages = MIN(max_bounce_pages, | |
438 | Maxmem - atop(dmat->lowaddr)); | |
439 | } | |
984263bc MD |
440 | if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 |
441 | || (dmat->map_count > 0 | |
5efcff53 | 442 | && bz->total_bpages < maxpages)) { |
984263bc MD |
443 | int pages; |
444 | ||
90a9e482 | 445 | if (flags & BUS_DMA_ONEBPAGE) { |
12aa407c SZ |
446 | pages = 1; |
447 | } else { | |
448 | pages = atop(round_page(dmat->maxsize)); | |
449 | pages = MIN(maxpages - bz->total_bpages, pages); | |
450 | pages = MAX(pages, 1); | |
451 | } | |
f3f55019 | 452 | if (alloc_bounce_pages(dmat, pages, flags) < pages) |
4343fdc9 | 453 | error = ENOMEM; |
984263bc MD |
454 | |
455 | if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { | |
5efcff53 | 456 | if (!error) |
984263bc MD |
457 | dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; |
458 | } else { | |
459 | error = 0; | |
460 | } | |
461 | } | |
462 | } else { | |
463 | *mapp = NULL; | |
464 | } | |
5efcff53 | 465 | if (!error) |
984263bc | 466 | dmat->map_count++; |
5efcff53 | 467 | return error; |
984263bc MD |
468 | } |
469 | ||
470 | /* | |
471 | * Destroy a handle for mapping from kva/uva/physical | |
472 | * address space into bus device space. | |
473 | */ | |
474 | int | |
475 | bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) | |
476 | { | |
477 | if (map != NULL) { | |
478 | if (STAILQ_FIRST(&map->bpages) != NULL) | |
479 | return (EBUSY); | |
efda3bd0 | 480 | kfree(map, M_DEVBUF); |
984263bc MD |
481 | } |
482 | dmat->map_count--; | |
483 | return (0); | |
484 | } | |
485 | ||
46fbe6c5 SZ |
486 | static __inline bus_size_t |
487 | check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify) | |
488 | { | |
489 | bus_size_t maxsize = 0; | |
490 | uintptr_t vaddr = (uintptr_t)vaddr0; | |
491 | ||
492 | if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) { | |
e94a9b8c SZ |
493 | if (verify || bootverbose) |
494 | kprintf("boundary check failed\n"); | |
46fbe6c5 | 495 | if (verify) |
7ce2998e | 496 | print_backtrace(-1); /* XXX panic */ |
46fbe6c5 SZ |
497 | maxsize = dmat->maxsize; |
498 | } | |
499 | if (vaddr & (dmat->alignment - 1)) { | |
e94a9b8c SZ |
500 | if (verify || bootverbose) |
501 | kprintf("alignment check failed\n"); | |
46fbe6c5 | 502 | if (verify) |
7ce2998e | 503 | print_backtrace(-1); /* XXX panic */ |
46fbe6c5 SZ |
504 | if (dmat->maxsize < dmat->alignment) |
505 | maxsize = dmat->alignment; | |
506 | else | |
507 | maxsize = dmat->maxsize; | |
508 | } | |
509 | return maxsize; | |
510 | } | |
511 | ||
984263bc MD |
512 | /* |
513 | * Allocate a piece of memory that can be efficiently mapped into | |
514 | * bus device space based on the constraints lited in the dma tag. | |
ff8e0d00 MD |
515 | * |
516 | * mapp is degenerate. By definition this allocation should not require | |
517 | * bounce buffers so do not allocate a dma map. | |
984263bc MD |
518 | */ |
519 | int | |
46fbe6c5 | 520 | bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, |
984263bc MD |
521 | bus_dmamap_t *mapp) |
522 | { | |
ba0fefd4 | 523 | int mflags; |
141ebb24 | 524 | |
984263bc MD |
525 | /* If we succeed, no mapping/bouncing will be required */ |
526 | *mapp = NULL; | |
527 | ||
3a2114c4 MD |
528 | if (dmat->segments == NULL) { |
529 | KKASSERT(dmat->nsegments < 16384); | |
77652cad | 530 | dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * |
3a2114c4 MD |
531 | dmat->nsegments, M_DEVBUF, M_INTWAIT); |
532 | } | |
533 | ||
ba0fefd4 JS |
534 | if (flags & BUS_DMA_NOWAIT) |
535 | mflags = M_NOWAIT; | |
536 | else | |
537 | mflags = M_WAITOK; | |
538 | if (flags & BUS_DMA_ZERO) | |
539 | mflags |= M_ZERO; | |
540 | ||
46fbe6c5 SZ |
541 | if (BUS_DMAMEM_KMALLOC(dmat)) { |
542 | bus_size_t maxsize; | |
543 | ||
efda3bd0 | 544 | *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags); |
46fbe6c5 | 545 | |
68aa5c00 | 546 | /* |
46fbe6c5 SZ |
547 | * XXX |
548 | * Check whether the allocation | |
549 | * - crossed a page boundary | |
550 | * - was not aligned | |
551 | * Retry with power-of-2 alignment in the above cases. | |
68aa5c00 | 552 | */ |
46fbe6c5 SZ |
553 | maxsize = check_kmalloc(dmat, *vaddr, 0); |
554 | if (maxsize) { | |
68aa5c00 | 555 | size_t size; |
9d9878b7 | 556 | |
efda3bd0 | 557 | kfree(*vaddr, M_DEVBUF); |
68aa5c00 | 558 | /* XXX check for overflow? */ |
46fbe6c5 | 559 | for (size = 1; size <= maxsize; size <<= 1) |
68aa5c00 | 560 | ; |
efda3bd0 | 561 | *vaddr = kmalloc(size, M_DEVBUF, mflags); |
46fbe6c5 | 562 | check_kmalloc(dmat, *vaddr, 1); |
68aa5c00 | 563 | } |
984263bc MD |
564 | } else { |
565 | /* | |
566 | * XXX Use Contigmalloc until it is merged into this facility | |
567 | * and handles multi-seg allocations. Nobody is doing | |
568 | * multi-seg allocations yet though. | |
569 | */ | |
ba0fefd4 | 570 | *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, |
24d9d29b | 571 | 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary); |
984263bc MD |
572 | } |
573 | if (*vaddr == NULL) | |
574 | return (ENOMEM); | |
575 | return (0); | |
576 | } | |
577 | ||
578 | /* | |
579 | * Free a piece of memory and it's allociated dmamap, that was allocated | |
580 | * via bus_dmamem_alloc. Make the same choice for free/contigfree. | |
581 | */ | |
582 | void | |
583 | bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) | |
584 | { | |
585 | /* | |
586 | * dmamem does not need to be bounced, so the map should be | |
587 | * NULL | |
588 | */ | |
589 | if (map != NULL) | |
590 | panic("bus_dmamem_free: Invalid map freed\n"); | |
46fbe6c5 | 591 | if (BUS_DMAMEM_KMALLOC(dmat)) |
efda3bd0 | 592 | kfree(vaddr, M_DEVBUF); |
984263bc MD |
593 | else |
594 | contigfree(vaddr, dmat->maxsize, M_DEVBUF); | |
595 | } | |
596 | ||
070a7c5f SZ |
597 | static __inline vm_paddr_t |
598 | _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr) | |
599 | { | |
600 | if (pmap) | |
601 | return pmap_extract(pmap, vaddr); | |
602 | else | |
603 | return pmap_kextract(vaddr); | |
604 | } | |
605 | ||
606 | /* | |
607 | * Utility function to load a linear buffer. lastaddrp holds state | |
608 | * between invocations (for multiple-buffer loads). segp contains | |
609 | * the segment following the starting one on entrace, and the ending | |
610 | * segment on exit. first indicates if this is the first invocation | |
611 | * of this function. | |
612 | */ | |
b2436634 | 613 | static int |
1884d7c3 | 614 | _bus_dmamap_load_buffer(bus_dma_tag_t dmat, |
b2436634 SZ |
615 | bus_dmamap_t map, |
616 | void *buf, bus_size_t buflen, | |
aa2d9ae8 SZ |
617 | bus_dma_segment_t *segments, |
618 | int nsegments, | |
070a7c5f | 619 | pmap_t pmap, |
b2436634 SZ |
620 | int flags, |
621 | vm_paddr_t *lastpaddrp, | |
622 | int *segp, | |
623 | int first) | |
984263bc | 624 | { |
9d9878b7 SZ |
625 | vm_offset_t vaddr; |
626 | vm_paddr_t paddr, nextpaddr; | |
627 | bus_dma_segment_t *sg; | |
628 | bus_addr_t bmask; | |
b2436634 | 629 | int seg, error = 0; |
984263bc MD |
630 | |
631 | if (map == NULL) | |
632 | map = &nobounce_dmamap; | |
633 | ||
46fbe6c5 SZ |
634 | #ifdef INVARIANTS |
635 | if (dmat->flags & BUS_DMA_ALIGNED) | |
636 | KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0); | |
637 | #endif | |
638 | ||
984263bc MD |
639 | /* |
640 | * If we are being called during a callback, pagesneeded will | |
641 | * be non-zero, so we can avoid doing the work twice. | |
642 | */ | |
e7029048 | 643 | if ((dmat->flags & BUS_DMA_COULD_BOUNCE) && |
60405791 | 644 | map != &nobounce_dmamap && map->pagesneeded == 0) { |
9d9878b7 | 645 | vm_offset_t vendaddr; |
984263bc MD |
646 | |
647 | /* | |
648 | * Count the number of bounce pages | |
649 | * needed in order to complete this transfer | |
650 | */ | |
60405791 | 651 | vaddr = (vm_offset_t)buf; |
984263bc MD |
652 | vendaddr = (vm_offset_t)buf + buflen; |
653 | ||
654 | while (vaddr < vendaddr) { | |
070a7c5f | 655 | paddr = _bus_dma_extract(pmap, vaddr); |
141ebb24 | 656 | if (run_filter(dmat, paddr) != 0) |
984263bc | 657 | map->pagesneeded++; |
60405791 | 658 | vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); |
984263bc MD |
659 | } |
660 | } | |
661 | ||
662 | /* Reserve Necessary Bounce Pages */ | |
663 | if (map->pagesneeded != 0) { | |
78c79c21 SZ |
664 | struct bounce_zone *bz; |
665 | ||
666 | bz = dmat->bounce_zone; | |
667 | BZ_LOCK(bz); | |
46cbb173 SZ |
668 | if (flags & BUS_DMA_NOWAIT) { |
669 | if (reserve_bounce_pages(dmat, map, 0) != 0) { | |
78c79c21 | 670 | BZ_UNLOCK(bz); |
eda6a747 SZ |
671 | error = ENOMEM; |
672 | goto free_bounce; | |
46cbb173 SZ |
673 | } |
674 | } else { | |
675 | if (reserve_bounce_pages(dmat, map, 1) != 0) { | |
676 | /* Queue us for resources */ | |
677 | map->dmat = dmat; | |
678 | map->buf = buf; | |
679 | map->buflen = buflen; | |
984263bc | 680 | |
eec44d94 SZ |
681 | STAILQ_INSERT_TAIL( |
682 | &dmat->bounce_zone->bounce_map_waitinglist, | |
683 | map, links); | |
78c79c21 | 684 | BZ_UNLOCK(bz); |
984263bc | 685 | |
46cbb173 SZ |
686 | return (EINPROGRESS); |
687 | } | |
984263bc | 688 | } |
78c79c21 | 689 | BZ_UNLOCK(bz); |
984263bc MD |
690 | } |
691 | ||
aa2d9ae8 | 692 | KKASSERT(*segp >= 1 && *segp <= nsegments); |
b2436634 | 693 | seg = *segp; |
aa2d9ae8 | 694 | sg = &segments[seg - 1]; |
b2436634 | 695 | |
984263bc | 696 | vaddr = (vm_offset_t)buf; |
b2436634 | 697 | nextpaddr = *lastpaddrp; |
270d2794 MD |
698 | bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */ |
699 | ||
700 | /* force at least one segment */ | |
984263bc | 701 | do { |
9d9878b7 | 702 | bus_size_t size; |
984263bc | 703 | |
270d2794 MD |
704 | /* |
705 | * Per-page main loop | |
706 | */ | |
070a7c5f | 707 | paddr = _bus_dma_extract(pmap, vaddr); |
984263bc MD |
708 | size = PAGE_SIZE - (paddr & PAGE_MASK); |
709 | if (size > buflen) | |
710 | size = buflen; | |
984263bc | 711 | if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { |
b2436634 | 712 | /* |
270d2794 MD |
713 | * note: this paddr has the same in-page offset |
714 | * as vaddr and thus the paddr above, so the | |
715 | * size does not have to be recalculated | |
716 | */ | |
984263bc MD |
717 | paddr = add_bounce_page(dmat, map, vaddr, size); |
718 | } | |
719 | ||
270d2794 MD |
720 | /* |
721 | * Fill in the bus_dma_segment | |
722 | */ | |
b2436634 | 723 | if (first) { |
984263bc MD |
724 | sg->ds_addr = paddr; |
725 | sg->ds_len = size; | |
b2436634 | 726 | first = 0; |
984263bc MD |
727 | } else if (paddr == nextpaddr) { |
728 | sg->ds_len += size; | |
729 | } else { | |
984263bc MD |
730 | sg++; |
731 | seg++; | |
aa2d9ae8 | 732 | if (seg > nsegments) |
984263bc MD |
733 | break; |
734 | sg->ds_addr = paddr; | |
735 | sg->ds_len = size; | |
736 | } | |
984263bc | 737 | nextpaddr = paddr + size; |
270d2794 MD |
738 | |
739 | /* | |
740 | * Handle maxsegsz and boundary issues with a nested loop | |
741 | */ | |
742 | for (;;) { | |
9d9878b7 | 743 | bus_size_t tmpsize; |
270d2794 MD |
744 | |
745 | /* | |
746 | * Limit to the boundary and maximum segment size | |
747 | */ | |
6b02d9de | 748 | if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) { |
270d2794 MD |
749 | tmpsize = dmat->boundary - |
750 | (sg->ds_addr & ~bmask); | |
751 | if (tmpsize > dmat->maxsegsz) | |
752 | tmpsize = dmat->maxsegsz; | |
753 | KKASSERT(tmpsize < sg->ds_len); | |
754 | } else if (sg->ds_len > dmat->maxsegsz) { | |
755 | tmpsize = dmat->maxsegsz; | |
756 | } else { | |
757 | break; | |
758 | } | |
759 | ||
760 | /* | |
761 | * Futz, split the data into a new segment. | |
762 | */ | |
aa2d9ae8 | 763 | if (seg >= nsegments) |
270d2794 MD |
764 | goto fail; |
765 | sg[1].ds_len = sg[0].ds_len - tmpsize; | |
766 | sg[1].ds_addr = sg[0].ds_addr + tmpsize; | |
767 | sg[0].ds_len = tmpsize; | |
768 | sg++; | |
769 | seg++; | |
770 | } | |
771 | ||
772 | /* | |
773 | * Adjust for loop | |
774 | */ | |
984263bc | 775 | buflen -= size; |
270d2794 | 776 | vaddr += size; |
984263bc | 777 | } while (buflen > 0); |
270d2794 | 778 | fail: |
65cf4e39 | 779 | if (buflen != 0) |
984263bc | 780 | error = EFBIG; |
984263bc | 781 | |
b2436634 SZ |
782 | *segp = seg; |
783 | *lastpaddrp = nextpaddr; | |
984263bc | 784 | |
eda6a747 | 785 | free_bounce: |
65cf4e39 SZ |
786 | if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) && |
787 | map != &nobounce_dmamap) { | |
788 | _bus_dmamap_unload(dmat, map); | |
789 | return_bounce_pages(dmat, map); | |
790 | } | |
b2436634 SZ |
791 | return error; |
792 | } | |
793 | ||
794 | /* | |
795 | * Map the buffer buf into bus space using the dmamap map. | |
796 | */ | |
797 | int | |
798 | bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, | |
799 | bus_size_t buflen, bus_dmamap_callback_t *callback, | |
800 | void *callback_arg, int flags) | |
801 | { | |
6eed46e7 MD |
802 | bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; |
803 | bus_dma_segment_t *segments; | |
b2436634 SZ |
804 | vm_paddr_t lastaddr = 0; |
805 | int error, nsegs = 1; | |
806 | ||
807 | if (map != NULL) { | |
46cbb173 SZ |
808 | /* |
809 | * XXX | |
810 | * Follow old semantics. Once all of the callers are fixed, | |
811 | * we should get rid of these internal flag "adjustment". | |
812 | */ | |
813 | flags &= ~BUS_DMA_NOWAIT; | |
b2436634 | 814 | flags |= BUS_DMA_WAITOK; |
46cbb173 | 815 | |
b2436634 SZ |
816 | map->callback = callback; |
817 | map->callback_arg = callback_arg; | |
818 | } | |
819 | ||
6eed46e7 | 820 | segments = bus_dma_tag_lock(dmat, cache_segments); |
1884d7c3 | 821 | error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, |
6eed46e7 | 822 | segments, dmat->nsegments, |
1884d7c3 | 823 | NULL, flags, &lastaddr, &nsegs, 1); |
6eed46e7 MD |
824 | if (error == EINPROGRESS) { |
825 | bus_dma_tag_unlock(dmat); | |
b2436634 | 826 | return error; |
6eed46e7 MD |
827 | } |
828 | callback(callback_arg, segments, nsegs, error); | |
829 | bus_dma_tag_unlock(dmat); | |
b2436634 | 830 | return 0; |
984263bc MD |
831 | } |
832 | ||
984263bc MD |
833 | /* |
834 | * Like _bus_dmamap_load(), but for mbufs. | |
835 | */ | |
836 | int | |
837 | bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, | |
838 | struct mbuf *m0, | |
839 | bus_dmamap_callback2_t *callback, void *callback_arg, | |
840 | int flags) | |
841 | { | |
6eed46e7 MD |
842 | bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; |
843 | bus_dma_segment_t *segments; | |
984263bc MD |
844 | int nsegs, error; |
845 | ||
18d561ae SZ |
846 | /* |
847 | * XXX | |
848 | * Follow old semantics. Once all of the callers are fixed, | |
849 | * we should get rid of these internal flag "adjustment". | |
850 | */ | |
851 | flags &= ~BUS_DMA_WAITOK; | |
852 | flags |= BUS_DMA_NOWAIT; | |
984263bc | 853 | |
6eed46e7 | 854 | segments = bus_dma_tag_lock(dmat, cache_segments); |
aa2d9ae8 | 855 | error = bus_dmamap_load_mbuf_segment(dmat, map, m0, |
6eed46e7 | 856 | segments, dmat->nsegments, &nsegs, flags); |
aa2d9ae8 SZ |
857 | if (error) { |
858 | /* force "no valid mappings" in callback */ | |
6eed46e7 MD |
859 | callback(callback_arg, segments, 0, |
860 | 0, error); | |
aa2d9ae8 | 861 | } else { |
6eed46e7 | 862 | callback(callback_arg, segments, nsegs, |
aa2d9ae8 SZ |
863 | m0->m_pkthdr.len, error); |
864 | } | |
6eed46e7 | 865 | bus_dma_tag_unlock(dmat); |
aa2d9ae8 SZ |
866 | return error; |
867 | } | |
868 | ||
869 | int | |
870 | bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, | |
871 | struct mbuf *m0, | |
872 | bus_dma_segment_t *segs, int maxsegs, | |
873 | int *nsegs, int flags) | |
874 | { | |
875 | int error; | |
876 | ||
877 | M_ASSERTPKTHDR(m0); | |
878 | ||
879 | KASSERT(maxsegs >= 1, ("invalid maxsegs %d\n", maxsegs)); | |
880 | KASSERT(maxsegs <= dmat->nsegments, | |
881 | ("%d too many segments, dmat only support %d segments\n", | |
882 | maxsegs, dmat->nsegments)); | |
883 | KASSERT(flags & BUS_DMA_NOWAIT, | |
884 | ("only BUS_DMA_NOWAIT is supported\n")); | |
885 | ||
984263bc MD |
886 | if (m0->m_pkthdr.len <= dmat->maxsize) { |
887 | int first = 1; | |
18d561ae | 888 | vm_paddr_t lastaddr = 0; |
984263bc MD |
889 | struct mbuf *m; |
890 | ||
aa2d9ae8 | 891 | *nsegs = 1; |
18d561ae | 892 | error = 0; |
984263bc | 893 | for (m = m0; m != NULL && error == 0; m = m->m_next) { |
18d561ae | 894 | if (m->m_len == 0) |
485dcc96 | 895 | continue; |
18d561ae | 896 | |
1884d7c3 | 897 | error = _bus_dmamap_load_buffer(dmat, map, |
984263bc | 898 | m->m_data, m->m_len, |
aa2d9ae8 | 899 | segs, maxsegs, |
18d561ae | 900 | NULL, flags, &lastaddr, |
aa2d9ae8 | 901 | nsegs, first); |
eda6a747 SZ |
902 | if (error == ENOMEM && !first) { |
903 | /* | |
904 | * Out of bounce pages due to too many | |
905 | * fragments in the mbuf chain; return | |
906 | * EFBIG instead. | |
907 | */ | |
908 | error = EFBIG; | |
909 | } | |
984263bc MD |
910 | first = 0; |
911 | } | |
aa2d9ae8 SZ |
912 | #ifdef INVARIANTS |
913 | if (!error) | |
914 | KKASSERT(*nsegs <= maxsegs && *nsegs >= 1); | |
915 | #endif | |
984263bc | 916 | } else { |
aa2d9ae8 | 917 | *nsegs = 0; |
984263bc MD |
918 | error = EINVAL; |
919 | } | |
aa2d9ae8 | 920 | KKASSERT(error != EINPROGRESS); |
18d561ae | 921 | return error; |
984263bc MD |
922 | } |
923 | ||
924 | /* | |
925 | * Like _bus_dmamap_load(), but for uios. | |
926 | */ | |
927 | int | |
928 | bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, | |
929 | struct uio *uio, | |
930 | bus_dmamap_callback2_t *callback, void *callback_arg, | |
931 | int flags) | |
932 | { | |
070a7c5f | 933 | vm_paddr_t lastaddr; |
984263bc MD |
934 | int nsegs, error, first, i; |
935 | bus_size_t resid; | |
936 | struct iovec *iov; | |
070a7c5f | 937 | pmap_t pmap; |
6eed46e7 MD |
938 | bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; |
939 | bus_dma_segment_t *segments; | |
940 | bus_dma_segment_t *segs; | |
941 | int nsegs_left; | |
942 | ||
943 | if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS) | |
944 | segments = cache_segments; | |
945 | else | |
946 | segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments, | |
947 | M_DEVBUF, M_WAITOK | M_ZERO); | |
984263bc | 948 | |
070a7c5f SZ |
949 | /* |
950 | * XXX | |
951 | * Follow old semantics. Once all of the callers are fixed, | |
952 | * we should get rid of these internal flag "adjustment". | |
953 | */ | |
954 | flags &= ~BUS_DMA_WAITOK; | |
955 | flags |= BUS_DMA_NOWAIT; | |
984263bc | 956 | |
e54488bb | 957 | resid = (bus_size_t)uio->uio_resid; |
984263bc MD |
958 | iov = uio->uio_iov; |
959 | ||
6eed46e7 MD |
960 | segs = segments; |
961 | nsegs_left = dmat->nsegments; | |
962 | ||
984263bc | 963 | if (uio->uio_segflg == UIO_USERSPACE) { |
070a7c5f SZ |
964 | struct thread *td; |
965 | ||
dadab5e9 MD |
966 | td = uio->uio_td; |
967 | KASSERT(td != NULL && td->td_proc != NULL, | |
984263bc | 968 | ("bus_dmamap_load_uio: USERSPACE but no proc")); |
070a7c5f SZ |
969 | pmap = vmspace_pmap(td->td_proc->p_vmspace); |
970 | } else { | |
971 | pmap = NULL; | |
984263bc MD |
972 | } |
973 | ||
984263bc | 974 | error = 0; |
070a7c5f | 975 | nsegs = 1; |
984263bc | 976 | first = 1; |
070a7c5f | 977 | lastaddr = 0; |
984263bc MD |
978 | for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { |
979 | /* | |
980 | * Now at the first iovec to load. Load each iovec | |
981 | * until we have exhausted the residual count. | |
982 | */ | |
983 | bus_size_t minlen = | |
984 | resid < iov[i].iov_len ? resid : iov[i].iov_len; | |
985 | caddr_t addr = (caddr_t) iov[i].iov_base; | |
986 | ||
1884d7c3 | 987 | error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, |
6eed46e7 | 988 | segs, nsegs_left, |
070a7c5f | 989 | pmap, flags, &lastaddr, &nsegs, first); |
984263bc MD |
990 | first = 0; |
991 | ||
992 | resid -= minlen; | |
6eed46e7 MD |
993 | if (error == 0) { |
994 | nsegs_left -= nsegs; | |
995 | segs += nsegs; | |
996 | } | |
984263bc MD |
997 | } |
998 | ||
6eed46e7 MD |
999 | /* |
1000 | * Minimum one DMA segment, even if 0-length buffer. | |
1001 | */ | |
1002 | if (nsegs_left == dmat->nsegments) | |
1003 | --nsegs_left; | |
1004 | ||
984263bc MD |
1005 | if (error) { |
1006 | /* force "no valid mappings" in callback */ | |
bc2cd3f9 MD |
1007 | callback(callback_arg, segments, 0, |
1008 | 0, error); | |
984263bc | 1009 | } else { |
6eed46e7 | 1010 | callback(callback_arg, segments, dmat->nsegments - nsegs_left, |
e54488bb | 1011 | (bus_size_t)uio->uio_resid, error); |
984263bc | 1012 | } |
6eed46e7 MD |
1013 | if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS) |
1014 | kfree(segments, M_DEVBUF); | |
070a7c5f | 1015 | return error; |
984263bc MD |
1016 | } |
1017 | ||
1018 | /* | |
1019 | * Release the mapping held by map. | |
1020 | */ | |
1021 | void | |
1022 | _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) | |
1023 | { | |
1024 | struct bounce_page *bpage; | |
1025 | ||
1026 | while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { | |
1027 | STAILQ_REMOVE_HEAD(&map->bpages, links); | |
1028 | free_bounce_page(dmat, bpage); | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | void | |
1033 | _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) | |
1034 | { | |
1035 | struct bounce_page *bpage; | |
1036 | ||
1037 | if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { | |
984263bc MD |
1038 | /* |
1039 | * Handle data bouncing. We might also | |
1040 | * want to add support for invalidating | |
1041 | * the caches on broken hardware | |
1042 | */ | |
1043 | switch (op) { | |
1044 | case BUS_DMASYNC_PREWRITE: | |
1045 | while (bpage != NULL) { | |
1046 | bcopy((void *)bpage->datavaddr, | |
1047 | (void *)bpage->vaddr, | |
1048 | bpage->datacount); | |
1049 | bpage = STAILQ_NEXT(bpage, links); | |
1050 | } | |
5efcff53 | 1051 | dmat->bounce_zone->total_bounced++; |
984263bc MD |
1052 | break; |
1053 | ||
1054 | case BUS_DMASYNC_POSTREAD: | |
1055 | while (bpage != NULL) { | |
1056 | bcopy((void *)bpage->vaddr, | |
1057 | (void *)bpage->datavaddr, | |
1058 | bpage->datacount); | |
1059 | bpage = STAILQ_NEXT(bpage, links); | |
1060 | } | |
5efcff53 | 1061 | dmat->bounce_zone->total_bounced++; |
984263bc | 1062 | break; |
141ebb24 | 1063 | |
984263bc MD |
1064 | case BUS_DMASYNC_PREREAD: |
1065 | case BUS_DMASYNC_POSTWRITE: | |
1066 | /* No-ops */ | |
1067 | break; | |
1068 | } | |
1069 | } | |
1070 | } | |
1071 | ||
1072 | static int | |
5efcff53 | 1073 | alloc_bounce_zone(bus_dma_tag_t dmat) |
984263bc | 1074 | { |
5a162f17 | 1075 | struct bounce_zone *bz, *new_bz; |
5efcff53 SZ |
1076 | |
1077 | KASSERT(dmat->bounce_zone == NULL, | |
1078 | ("bounce zone was already assigned\n")); | |
1079 | ||
5a162f17 SZ |
1080 | new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); |
1081 | ||
3b998fa9 | 1082 | lwkt_gettoken(&bounce_zone_tok); |
5a162f17 | 1083 | |
5efcff53 SZ |
1084 | /* Check to see if we already have a suitable zone */ |
1085 | STAILQ_FOREACH(bz, &bounce_zone_list, links) { | |
1086 | if (dmat->alignment <= bz->alignment && | |
5efcff53 | 1087 | dmat->lowaddr >= bz->lowaddr) { |
3b998fa9 | 1088 | lwkt_reltoken(&bounce_zone_tok); |
5a162f17 | 1089 | |
5efcff53 | 1090 | dmat->bounce_zone = bz; |
5a162f17 | 1091 | kfree(new_bz, M_DEVBUF); |
5efcff53 SZ |
1092 | return 0; |
1093 | } | |
1094 | } | |
5a162f17 | 1095 | bz = new_bz; |
5efcff53 | 1096 | |
78c79c21 SZ |
1097 | #ifdef SMP |
1098 | spin_init(&bz->spin); | |
1099 | #endif | |
5efcff53 | 1100 | STAILQ_INIT(&bz->bounce_page_list); |
eec44d94 | 1101 | STAILQ_INIT(&bz->bounce_map_waitinglist); |
5efcff53 SZ |
1102 | bz->free_bpages = 0; |
1103 | bz->reserved_bpages = 0; | |
1104 | bz->active_bpages = 0; | |
1105 | bz->lowaddr = dmat->lowaddr; | |
f3f55019 | 1106 | bz->alignment = round_page(dmat->alignment); |
5efcff53 SZ |
1107 | ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); |
1108 | busdma_zonecount++; | |
1109 | ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); | |
1110 | STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); | |
5a162f17 | 1111 | |
3b998fa9 | 1112 | lwkt_reltoken(&bounce_zone_tok); |
5a162f17 | 1113 | |
5efcff53 SZ |
1114 | dmat->bounce_zone = bz; |
1115 | ||
1116 | sysctl_ctx_init(&bz->sysctl_ctx); | |
1117 | bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx, | |
1118 | SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, | |
1119 | CTLFLAG_RD, 0, ""); | |
1120 | if (bz->sysctl_tree == NULL) { | |
1121 | sysctl_ctx_free(&bz->sysctl_ctx); | |
1122 | return 0; /* XXX error code? */ | |
984263bc | 1123 | } |
141ebb24 | 1124 | |
5efcff53 SZ |
1125 | SYSCTL_ADD_INT(&bz->sysctl_ctx, |
1126 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1127 | "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, | |
1128 | "Total bounce pages"); | |
1129 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1130 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1131 | "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, | |
1132 | "Free bounce pages"); | |
1133 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1134 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1135 | "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, | |
1136 | "Reserved bounce pages"); | |
1137 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1138 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1139 | "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, | |
1140 | "Active bounce pages"); | |
1141 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1142 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1143 | "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, | |
1144 | "Total bounce requests"); | |
1145 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1146 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1147 | "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, | |
1148 | "Total bounce requests that were deferred"); | |
63fbdfd2 SZ |
1149 | SYSCTL_ADD_INT(&bz->sysctl_ctx, |
1150 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1151 | "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0, | |
1152 | "Total bounce page reservations that were failed"); | |
5efcff53 SZ |
1153 | SYSCTL_ADD_STRING(&bz->sysctl_ctx, |
1154 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1155 | "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); | |
1156 | SYSCTL_ADD_INT(&bz->sysctl_ctx, | |
1157 | SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, | |
1158 | "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); | |
5efcff53 SZ |
1159 | |
1160 | return 0; | |
1161 | } | |
1162 | ||
1163 | static int | |
f3f55019 | 1164 | alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags) |
5efcff53 SZ |
1165 | { |
1166 | struct bounce_zone *bz = dmat->bounce_zone; | |
f3f55019 SZ |
1167 | int count = 0, mflags; |
1168 | ||
1169 | if (flags & BUS_DMA_NOWAIT) | |
1170 | mflags = M_NOWAIT; | |
1171 | else | |
1172 | mflags = M_WAITOK; | |
5efcff53 | 1173 | |
984263bc MD |
1174 | while (numpages > 0) { |
1175 | struct bounce_page *bpage; | |
984263bc | 1176 | |
58d9c764 | 1177 | bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO); |
984263bc | 1178 | |
984263bc | 1179 | bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, |
f3f55019 SZ |
1180 | mflags, 0ul, |
1181 | bz->lowaddr, | |
ca611aa8 | 1182 | bz->alignment, 0); |
3641b7ca | 1183 | if (bpage->vaddr == 0) { |
efda3bd0 | 1184 | kfree(bpage, M_DEVBUF); |
984263bc MD |
1185 | break; |
1186 | } | |
1187 | bpage->busaddr = pmap_kextract(bpage->vaddr); | |
5efcff53 | 1188 | |
78c79c21 | 1189 | BZ_LOCK(bz); |
5efcff53 SZ |
1190 | STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); |
1191 | total_bounce_pages++; | |
1192 | bz->total_bpages++; | |
1193 | bz->free_bpages++; | |
78c79c21 | 1194 | BZ_UNLOCK(bz); |
5efcff53 | 1195 | |
984263bc MD |
1196 | count++; |
1197 | numpages--; | |
1198 | } | |
5efcff53 | 1199 | return count; |
984263bc MD |
1200 | } |
1201 | ||
65cf4e39 | 1202 | /* Assume caller holds bounce zone spinlock */ |
984263bc | 1203 | static int |
46cbb173 | 1204 | reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) |
984263bc | 1205 | { |
5efcff53 | 1206 | struct bounce_zone *bz = dmat->bounce_zone; |
984263bc MD |
1207 | int pages; |
1208 | ||
5efcff53 | 1209 | pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); |
63fbdfd2 SZ |
1210 | if (!commit && map->pagesneeded > (map->pagesreserved + pages)) { |
1211 | bz->reserve_failed++; | |
46cbb173 | 1212 | return (map->pagesneeded - (map->pagesreserved + pages)); |
63fbdfd2 | 1213 | } |
ad196c46 | 1214 | |
5efcff53 | 1215 | bz->free_bpages -= pages; |
ad196c46 | 1216 | |
5efcff53 | 1217 | bz->reserved_bpages += pages; |
ad196c46 SZ |
1218 | KKASSERT(bz->reserved_bpages <= bz->total_bpages); |
1219 | ||
984263bc MD |
1220 | map->pagesreserved += pages; |
1221 | pages = map->pagesneeded - map->pagesreserved; | |
1222 | ||
5efcff53 | 1223 | return pages; |
984263bc MD |
1224 | } |
1225 | ||
65cf4e39 SZ |
1226 | static void |
1227 | return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) | |
1228 | { | |
1229 | struct bounce_zone *bz = dmat->bounce_zone; | |
1230 | int reserved = map->pagesreserved; | |
1231 | bus_dmamap_t wait_map; | |
1232 | ||
1233 | map->pagesreserved = 0; | |
1234 | map->pagesneeded = 0; | |
1235 | ||
1236 | if (reserved == 0) | |
1237 | return; | |
1238 | ||
1239 | BZ_LOCK(bz); | |
ad196c46 | 1240 | |
65cf4e39 | 1241 | bz->free_bpages += reserved; |
ad196c46 SZ |
1242 | KKASSERT(bz->free_bpages <= bz->total_bpages); |
1243 | ||
1244 | KKASSERT(bz->reserved_bpages >= reserved); | |
65cf4e39 | 1245 | bz->reserved_bpages -= reserved; |
ad196c46 | 1246 | |
65cf4e39 | 1247 | wait_map = get_map_waiting(dmat); |
ad196c46 | 1248 | |
65cf4e39 SZ |
1249 | BZ_UNLOCK(bz); |
1250 | ||
1251 | if (wait_map != NULL) | |
1252 | add_map_callback(map); | |
1253 | } | |
1254 | ||
6ef943a3 | 1255 | static bus_addr_t |
984263bc MD |
1256 | add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, |
1257 | bus_size_t size) | |
1258 | { | |
5efcff53 | 1259 | struct bounce_zone *bz = dmat->bounce_zone; |
984263bc MD |
1260 | struct bounce_page *bpage; |
1261 | ||
ad196c46 | 1262 | KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); |
984263bc MD |
1263 | map->pagesneeded--; |
1264 | ||
ad196c46 | 1265 | KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages")); |
984263bc MD |
1266 | map->pagesreserved--; |
1267 | ||
78c79c21 | 1268 | BZ_LOCK(bz); |
984263bc | 1269 | |
ad196c46 SZ |
1270 | bpage = STAILQ_FIRST(&bz->bounce_page_list); |
1271 | KASSERT(bpage != NULL, ("free page list is empty")); | |
5efcff53 | 1272 | STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); |
ad196c46 SZ |
1273 | |
1274 | KKASSERT(bz->reserved_bpages > 0); | |
5efcff53 | 1275 | bz->reserved_bpages--; |
ad196c46 | 1276 | |
5efcff53 | 1277 | bz->active_bpages++; |
ad196c46 SZ |
1278 | KKASSERT(bz->active_bpages <= bz->total_bpages); |
1279 | ||
78c79c21 | 1280 | BZ_UNLOCK(bz); |
984263bc MD |
1281 | |
1282 | bpage->datavaddr = vaddr; | |
1283 | bpage->datacount = size; | |
5efcff53 SZ |
1284 | STAILQ_INSERT_TAIL(&map->bpages, bpage, links); |
1285 | return bpage->busaddr; | |
984263bc MD |
1286 | } |
1287 | ||
1288 | static void | |
1289 | free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) | |
1290 | { | |
5efcff53 | 1291 | struct bounce_zone *bz = dmat->bounce_zone; |
65cf4e39 | 1292 | bus_dmamap_t map; |
984263bc MD |
1293 | |
1294 | bpage->datavaddr = 0; | |
1295 | bpage->datacount = 0; | |
1296 | ||
78c79c21 | 1297 | BZ_LOCK(bz); |
ad196c46 | 1298 | |
5efcff53 | 1299 | STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); |
ad196c46 | 1300 | |
5efcff53 | 1301 | bz->free_bpages++; |
ad196c46 SZ |
1302 | KKASSERT(bz->free_bpages <= bz->total_bpages); |
1303 | ||
1304 | KKASSERT(bz->active_bpages > 0); | |
5efcff53 | 1305 | bz->active_bpages--; |
ad196c46 | 1306 | |
65cf4e39 | 1307 | map = get_map_waiting(dmat); |
ad196c46 | 1308 | |
65cf4e39 SZ |
1309 | BZ_UNLOCK(bz); |
1310 | ||
1311 | if (map != NULL) | |
1312 | add_map_callback(map); | |
1313 | } | |
1314 | ||
1315 | /* Assume caller holds bounce zone spinlock */ | |
1316 | static bus_dmamap_t | |
1317 | get_map_waiting(bus_dma_tag_t dmat) | |
1318 | { | |
1319 | struct bounce_zone *bz = dmat->bounce_zone; | |
1320 | bus_dmamap_t map; | |
1321 | ||
1322 | map = STAILQ_FIRST(&bz->bounce_map_waitinglist); | |
1323 | if (map != NULL) { | |
46cbb173 | 1324 | if (reserve_bounce_pages(map->dmat, map, 1) == 0) { |
eec44d94 | 1325 | STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); |
5efcff53 | 1326 | bz->total_deferred++; |
78c79c21 SZ |
1327 | } else { |
1328 | map = NULL; | |
984263bc MD |
1329 | } |
1330 | } | |
65cf4e39 SZ |
1331 | return map; |
1332 | } | |
78c79c21 | 1333 | |
65cf4e39 SZ |
1334 | static void |
1335 | add_map_callback(bus_dmamap_t map) | |
1336 | { | |
1337 | /* XXX callbacklist is not MPSAFE */ | |
1338 | crit_enter(); | |
1339 | get_mplock(); | |
1340 | STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); | |
1341 | busdma_swi_pending = 1; | |
1342 | setsoftvm(); | |
1343 | rel_mplock(); | |
1344 | crit_exit(); | |
984263bc MD |
1345 | } |
1346 | ||
1347 | void | |
f123d5a1 | 1348 | busdma_swi(void) |
984263bc | 1349 | { |
65cf4e39 | 1350 | bus_dmamap_t map; |
984263bc | 1351 | |
9acd5bbb | 1352 | crit_enter(); |
984263bc MD |
1353 | while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { |
1354 | STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); | |
9acd5bbb | 1355 | crit_exit(); |
984263bc MD |
1356 | bus_dmamap_load(map->dmat, map, map->buf, map->buflen, |
1357 | map->callback, map->callback_arg, /*flags*/0); | |
9acd5bbb | 1358 | crit_enter(); |
984263bc | 1359 | } |
9acd5bbb | 1360 | crit_exit(); |
984263bc | 1361 | } |