kernel - Adjust BUS_DMASYNC_* API
authorMatthew Dillon <dillon@apollo.backplane.com>
Sat, 16 Aug 2014 20:32:22 +0000 (13:32 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Sat, 16 Aug 2014 20:32:22 +0000 (13:32 -0700)
* Change the BUS_DMASYNC_* enumeration to a bitmask to allow multiple
  flags to be specified in one call.

* Now more compatible with FreeBSD and one less thing to worry about when
  porting a driver.

* While we are at it, allow add_bounce_page() to modify the size.  This has
  no effect on the current codebase since bounce pages are page-aligned (so the
  page-residual will never be less than the passed address), but is more
  correct.

* Fixes bounce-buffer issue with if_ath.

sys/platform/pc32/i386/busdma_machdep.c
sys/platform/pc64/x86_64/busdma_machdep.c
sys/sys/bus_dma.h

index cb50f11..2d7290f 100644 (file)
@@ -1044,8 +1044,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
                 * want to add support for invalidating
                 * the caches on broken hardware
                 */
-               switch (op) {
-               case BUS_DMASYNC_PREWRITE:
+               if (op & BUS_DMASYNC_PREWRITE) {
                        while (bpage != NULL) {
                                bcopy((void *)bpage->datavaddr,
                                      (void *)bpage->vaddr,
@@ -1053,9 +1052,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
                                bpage = STAILQ_NEXT(bpage, links);
                        }
                        dmat->bounce_zone->total_bounced++;
-                       break;
-
-               case BUS_DMASYNC_POSTREAD:
+               }
+               if (op & BUS_DMASYNC_POSTREAD) {
                        while (bpage != NULL) {
                                bcopy((void *)bpage->vaddr,
                                      (void *)bpage->datavaddr,
@@ -1063,12 +1061,6 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
                                bpage = STAILQ_NEXT(bpage, links);
                        }
                        dmat->bounce_zone->total_bounced++;
-                       break;
-
-               case BUS_DMASYNC_PREREAD:
-               case BUS_DMASYNC_POSTWRITE:
-                       /* No-ops */
-                       break;
                }
        }
 }
index 9b7d74d..97808dc 100644 (file)
@@ -165,7 +165,7 @@ static void         free_bounce_zone(bus_dma_tag_t);
 static int             reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
 static void            return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
 static bus_addr_t      add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
-                           vm_offset_t, bus_size_t);
+                           vm_offset_t, bus_size_t *);
 static void            free_bounce_page(bus_dma_tag_t, struct bounce_page *);
 
 static bus_dmamap_t    get_map_waiting(bus_dma_tag_t);
@@ -716,11 +716,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
                        size = buflen;
                if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
                        /*
-                        * note: this paddr has the same in-page offset
-                        * as vaddr and thus the paddr above, so the
-                        * size does not have to be recalculated
+                        * NOTE: paddr may have different in-page offset,
+                        *       unless BUS_DMA_KEEP_PG_OFFSET is set.
                         */
-                       paddr = add_bounce_page(dmat, map, vaddr, size);
+                       paddr = add_bounce_page(dmat, map, vaddr, &size);
                }
 
                /*
@@ -919,6 +918,7 @@ bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map,
                                 * EFBIG instead.
                                 */
                                error = EFBIG;
+                               break;
                        }
                        first = 0;
                }
@@ -1053,18 +1053,18 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
                 * want to add support for invalidating
                 * the caches on broken hardware
                 */
-               switch (op) {
-               case BUS_DMASYNC_PREWRITE:
+               if (op & BUS_DMASYNC_PREWRITE) {
                        while (bpage != NULL) {
                                bcopy((void *)bpage->datavaddr,
                                      (void *)bpage->vaddr,
                                      bpage->datacount);
                                bpage = STAILQ_NEXT(bpage, links);
                        }
+                       cpu_sfence();
                        dmat->bounce_zone->total_bounced++;
-                       break;
-
-               case BUS_DMASYNC_POSTREAD:
+               }
+               if (op & BUS_DMASYNC_POSTREAD) {
+                       cpu_lfence();
                        while (bpage != NULL) {
                                bcopy((void *)bpage->vaddr,
                                      (void *)bpage->datavaddr,
@@ -1072,13 +1072,9 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
                                bpage = STAILQ_NEXT(bpage, links);
                        }
                        dmat->bounce_zone->total_bounced++;
-                       break;
-
-               case BUS_DMASYNC_PREREAD:
-               case BUS_DMASYNC_POSTWRITE:
-                       /* No-ops */
-                       break;
                }
+               /* BUS_DMASYNC_PREREAD          - no operation on intel */
+               /* BUS_DMASYNC_POSTWRITE        - no operation on intel */
        }
 }
 
@@ -1329,10 +1325,11 @@ return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
 
 static bus_addr_t
 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
-               bus_size_t size)
+               bus_size_t *sizep)
 {
        struct bounce_zone *bz = dmat->bounce_zone;
        struct bounce_page *bpage;
+       bus_size_t size;
 
        KASSERT(map->pagesneeded > 0, ("map doesn't need any pages"));
        map->pagesneeded--;
@@ -1355,9 +1352,25 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
        BZ_UNLOCK(bz);
 
        if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
-               /* Page offset needs to be preserved. */
+               /*
+                * Page offset needs to be preserved.  No size adjustments
+                * needed.
+                */
                bpage->vaddr |= vaddr & PAGE_MASK;
                bpage->busaddr |= vaddr & PAGE_MASK;
+               size = *sizep;
+       } else {
+               /*
+                * Realign to bounce page base address, reduce size if
+                * necessary.  Bounce pages are typically already
+                * page-aligned.
+                */
+               size = PAGE_SIZE - (bpage->busaddr & PAGE_MASK);
+               if (size < *sizep) {
+                       *sizep = size;
+               } else {
+                       size = *sizep;
+               }
        }
 
        bpage->datavaddr = vaddr;
index c885285..348078e 100644 (file)
@@ -112,12 +112,12 @@ struct uio;
  *
  *     Operations performed by bus_dmamap_sync().
  */
-typedef enum {
-       BUS_DMASYNC_PREREAD,
-       BUS_DMASYNC_POSTREAD,
-       BUS_DMASYNC_PREWRITE,
-       BUS_DMASYNC_POSTWRITE
-} bus_dmasync_op_t;
+typedef int bus_dmasync_op_t;
+
+#define        BUS_DMASYNC_PREREAD     0x01
+#define        BUS_DMASYNC_POSTREAD    0x02
+#define        BUS_DMASYNC_PREWRITE    0x04
+#define        BUS_DMASYNC_POSTWRITE   0x08
 
 /*
  *     bus_dma_tag_t