From 33025e3191fd0f489d9450d14e00e833fc66ee27 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sat, 16 Aug 2014 13:32:22 -0700 Subject: [PATCH] kernel - Adjust BUS_DMASYNC_* API * Change the BUS_DMASYNC_* enumeration to a bitmask to allow multiple flags to be specified in one call. * Now more compatible with FreeBSD and one less thing to worry about when porting a driver. * While we are at it, allow add_bounce_page() to modify the size. This has no effect on the current codebase since bounce pages are page-aligned (so the page-residual will never be less than the passed address), but is more correct. * Fixes bounce-buffer issue with if_ath. --- sys/platform/pc32/i386/busdma_machdep.c | 14 ++----- sys/platform/pc64/x86_64/busdma_machdep.c | 49 ++++++++++++++--------- sys/sys/bus_dma.h | 12 +++--- 3 files changed, 40 insertions(+), 35 deletions(-) diff --git a/sys/platform/pc32/i386/busdma_machdep.c b/sys/platform/pc32/i386/busdma_machdep.c index cb50f1129c..2d7290fff1 100644 --- a/sys/platform/pc32/i386/busdma_machdep.c +++ b/sys/platform/pc32/i386/busdma_machdep.c @@ -1044,8 +1044,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) * want to add support for invalidating * the caches on broken hardware */ - switch (op) { - case BUS_DMASYNC_PREWRITE: + if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, @@ -1053,9 +1052,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; - break; - - case BUS_DMASYNC_POSTREAD: + } + if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, @@ -1063,12 +1061,6 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; - break; - - case BUS_DMASYNC_PREREAD: - case BUS_DMASYNC_POSTWRITE: - /* No-ops */ - break; } } } diff --git a/sys/platform/pc64/x86_64/busdma_machdep.c b/sys/platform/pc64/x86_64/busdma_machdep.c index 9b7d74db1c..97808dc4f1 100644 --- a/sys/platform/pc64/x86_64/busdma_machdep.c +++ b/sys/platform/pc64/x86_64/busdma_machdep.c @@ -165,7 +165,7 @@ static void free_bounce_zone(bus_dma_tag_t); static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, - vm_offset_t, bus_size_t); + vm_offset_t, bus_size_t *); static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); static bus_dmamap_t get_map_waiting(bus_dma_tag_t); @@ -716,11 +716,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, size = buflen; if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { /* - * note: this paddr has the same in-page offset - * as vaddr and thus the paddr above, so the - * size does not have to be recalculated + * NOTE: paddr may have different in-page offset, + * unless BUS_DMA_KEEP_PG_OFFSET is set. */ - paddr = add_bounce_page(dmat, map, vaddr, size); + paddr = add_bounce_page(dmat, map, vaddr, &size); } /* @@ -919,6 +918,7 @@ bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, * EFBIG instead. */ error = EFBIG; + break; } first = 0; } @@ -1053,18 +1053,18 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) * want to add support for invalidating * the caches on broken hardware */ - switch (op) { - case BUS_DMASYNC_PREWRITE: + if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } + cpu_sfence(); dmat->bounce_zone->total_bounced++; - break; - - case BUS_DMASYNC_POSTREAD: + } + if (op & BUS_DMASYNC_POSTREAD) { + cpu_lfence(); while (bpage != NULL) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, @@ -1072,13 +1072,9 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; - break; - - case BUS_DMASYNC_PREREAD: - case BUS_DMASYNC_POSTWRITE: - /* No-ops */ - break; } + /* BUS_DMASYNC_PREREAD - no operation on intel */ + /* BUS_DMASYNC_POSTWRITE - no operation on intel */ } } @@ -1329,10 +1325,11 @@ return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_size_t *sizep) { struct bounce_zone *bz = dmat->bounce_zone; struct bounce_page *bpage; + bus_size_t size; KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); map->pagesneeded--; @@ -1355,9 +1352,25 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, BZ_UNLOCK(bz); if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ + /* + * Page offset needs to be preserved. No size adjustments + * needed. + */ bpage->vaddr |= vaddr & PAGE_MASK; bpage->busaddr |= vaddr & PAGE_MASK; + size = *sizep; + } else { + /* + * Realign to bounce page base address, reduce size if + * necessary. Bounce pages are typically already + * page-aligned. + */ + size = PAGE_SIZE - (bpage->busaddr & PAGE_MASK); + if (size < *sizep) { + *sizep = size; + } else { + size = *sizep; + } } bpage->datavaddr = vaddr; diff --git a/sys/sys/bus_dma.h b/sys/sys/bus_dma.h index c88528572c..348078ea67 100644 --- a/sys/sys/bus_dma.h +++ b/sys/sys/bus_dma.h @@ -112,12 +112,12 @@ struct uio; * * Operations performed by bus_dmamap_sync(). */ -typedef enum { - BUS_DMASYNC_PREREAD, - BUS_DMASYNC_POSTREAD, - BUS_DMASYNC_PREWRITE, - BUS_DMASYNC_POSTWRITE -} bus_dmasync_op_t; +typedef int bus_dmasync_op_t; + +#define BUS_DMASYNC_PREREAD 0x01 +#define BUS_DMASYNC_POSTREAD 0x02 +#define BUS_DMASYNC_PREWRITE 0x04 +#define BUS_DMASYNC_POSTWRITE 0x08 /* * bus_dma_tag_t -- 2.41.0