busdma: Add PRIVBZONE and ALLOCALL bus_dma_tag_create() flags.
authorSepherosa Ziehau <sephe@dragonflybsd.org>
Mon, 27 Dec 2010 07:44:23 +0000 (15:44 +0800)
committerSepherosa Ziehau <sephe@dragonflybsd.org>
Sat, 8 Jan 2011 15:14:50 +0000 (23:14 +0800)
PRIVBZONE
  Use a private bounce zone instead of a shared one.  A private bounce
  zone will vanish if the dma_tag is destroyed.

ALLOCALL
  Allocate all required resources (mainly bounce buffer), if any
  allocation failes, bus_dmamap_create() will fail.

Using PRIVBZONE alone could possiblely reduce the contention between
driver instances.

Using PRIVBZONE and ALLOCALL could promise that the "defered dmamap
load callback" will not happen.  This could be used to ease some driver
work and reduce the work load to fix the existing drivers which may
(incorrectly) rely on or may not expect the "defered dmamap load
callback" semantic of bus_dmamap_load().

sys/platform/pc32/i386/busdma_machdep.c
sys/platform/pc64/x86_64/busdma_machdep.c
sys/sys/bus_dma.h

index c656159..354d708 100644 (file)
@@ -144,6 +144,8 @@ static int busdma_zonecount;
 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
        STAILQ_HEAD_INITIALIZER(bounce_zone_list);
 
+static int busdma_priv_zonecount = -1;
+
 int busdma_swi_pending;
 static int total_bounce_pages;
 static int max_bounce_pages = MAX_BPAGES;
@@ -173,6 +175,8 @@ static struct bus_dmamap nobounce_dmamap;
 
 static int             alloc_bounce_zone(bus_dma_tag_t);
 static int             alloc_bounce_pages(bus_dma_tag_t, u_int, int);
+static void            free_bounce_pages_all(bus_dma_tag_t);
+static void            free_bounce_zone(bus_dma_tag_t);
 static int             reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
 static void            return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
 static bus_addr_t      add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
@@ -332,7 +336,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
                        goto back;
                bz = newtag->bounce_zone;
 
-               if (ptoa(bz->total_bpages) < maxsize) {
+               if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 &&
+                   ptoa(bz->total_bpages) < maxsize) {
                        int pages;
 
                        if (flags & BUS_DMA_ONEBPAGE) {
@@ -372,6 +377,7 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
                        parent = dmat->parent;
                        dmat->ref_count--;
                        if (dmat->ref_count == 0) {
+                               free_bounce_zone(dmat);
                                if (dmat->segments != NULL)
                                        kfree(dmat->segments, M_DEVBUF);
                                kfree(dmat, M_DEVBUF);
@@ -433,15 +439,16 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
                 * Attempt to add pages to our pool on a per-instance
                 * basis up to a sane limit.
                 */
-               if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
+               if (dmat->flags & BUS_DMA_ALLOCALL) {
+                       maxpages = Maxmem - atop(dmat->lowaddr);
+               } else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
                        maxpages = max_bounce_pages;
                } else {
                        maxpages = MIN(max_bounce_pages,
                                       Maxmem - atop(dmat->lowaddr));
                }
-               if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
-                || (dmat->map_count > 0
-                 && bz->total_bpages < maxpages)) {
+               if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
+                   (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
                        int pages;
 
                        if (flags & BUS_DMA_ONEBPAGE) {
@@ -455,7 +462,8 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
                                error = ENOMEM;
 
                        if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
-                               if (!error)
+                               if (!error &&
+                                   (dmat->flags & BUS_DMA_ALLOCALL) == 0)
                                        dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
                        } else {
                                error = 0;
@@ -824,6 +832,9 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
                        segments, dmat->nsegments,
                        NULL, flags, &lastaddr, &nsegs, 1);
        if (error == EINPROGRESS) {
+               KKASSERT((dmat->flags &
+                         (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) !=
+                        (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL));
                bus_dma_tag_unlock(dmat);
                return error;
        }
@@ -1083,15 +1094,20 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
 
        lwkt_gettoken(&bounce_zone_tok);
 
-       /* Check to see if we already have a suitable zone */
-       STAILQ_FOREACH(bz, &bounce_zone_list, links) {
-               if (dmat->alignment <= bz->alignment &&
-                   dmat->lowaddr >= bz->lowaddr) {
-                       lwkt_reltoken(&bounce_zone_tok);
-
-                       dmat->bounce_zone = bz;
-                       kfree(new_bz, M_DEVBUF);
-                       return 0;
+       if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) {
+               /*
+                * For shared bounce zone, check to see
+                * if we already have a suitable zone
+                */
+               STAILQ_FOREACH(bz, &bounce_zone_list, links) {
+                       if (dmat->alignment <= bz->alignment &&
+                           dmat->lowaddr >= bz->lowaddr) {
+                               lwkt_reltoken(&bounce_zone_tok);
+
+                               dmat->bounce_zone = bz;
+                               kfree(new_bz, M_DEVBUF);
+                               return 0;
+                       }
                }
        }
        bz = new_bz;
@@ -1106,10 +1122,16 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
        bz->active_bpages = 0;
        bz->lowaddr = dmat->lowaddr;
        bz->alignment = round_page(dmat->alignment);
-       ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
-       busdma_zonecount++;
        ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
-       STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+
+       if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) {
+               ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
+               busdma_zonecount++;
+               STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+       } else {
+               ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount);
+               busdma_priv_zonecount--;
+       }
 
        lwkt_reltoken(&bounce_zone_tok);
 
@@ -1201,6 +1223,56 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
        return count;
 }
 
+static void
+free_bounce_pages_all(bus_dma_tag_t dmat)
+{
+       struct bounce_zone *bz = dmat->bounce_zone;
+       struct bounce_page *bpage;
+
+       BZ_LOCK(bz);
+
+       while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) {
+               STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
+
+               KKASSERT(total_bounce_pages > 0);
+               total_bounce_pages--;
+
+               KKASSERT(bz->total_bpages > 0);
+               bz->total_bpages--;
+
+               KKASSERT(bz->free_bpages > 0);
+               bz->free_bpages--;
+
+               contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF);
+               kfree(bpage, M_DEVBUF);
+       }
+       if (bz->total_bpages) {
+               kprintf("#%d bounce pages are still in use\n",
+                       bz->total_bpages);
+               print_backtrace(-1);
+       }
+
+       BZ_UNLOCK(bz);
+}
+
+static void
+free_bounce_zone(bus_dma_tag_t dmat)
+{
+       struct bounce_zone *bz = dmat->bounce_zone;
+
+       if (bz == NULL)
+               return;
+
+       if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0)
+               return;
+
+       free_bounce_pages_all(dmat);
+       dmat->bounce_zone = NULL;
+
+       sysctl_ctx_free(&bz->sysctl_ctx);
+       kfree(bz, M_DEVBUF);
+}
+
 /* Assume caller holds bounce zone spinlock */
 static int
 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
index 9f3f557..fe62403 100644 (file)
@@ -144,6 +144,8 @@ static int busdma_zonecount;
 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
        STAILQ_HEAD_INITIALIZER(bounce_zone_list);
 
+static int busdma_priv_zonecount = -1;
+
 int busdma_swi_pending;
 static int total_bounce_pages;
 static int max_bounce_pages = MAX_BPAGES;
@@ -173,6 +175,8 @@ static struct bus_dmamap nobounce_dmamap;
 
 static int             alloc_bounce_zone(bus_dma_tag_t);
 static int             alloc_bounce_pages(bus_dma_tag_t, u_int, int);
+static void            free_bounce_pages_all(bus_dma_tag_t);
+static void            free_bounce_zone(bus_dma_tag_t);
 static int             reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
 static void            return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
 static bus_addr_t      add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
@@ -332,7 +336,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
                        goto back;
                bz = newtag->bounce_zone;
 
-               if (ptoa(bz->total_bpages) < maxsize) {
+               if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 &&
+                   ptoa(bz->total_bpages) < maxsize) {
                        int pages;
 
                        if (flags & BUS_DMA_ONEBPAGE) {
@@ -372,6 +377,7 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
                        parent = dmat->parent;
                        dmat->ref_count--;
                        if (dmat->ref_count == 0) {
+                               free_bounce_zone(dmat);
                                if (dmat->segments != NULL)
                                        kfree(dmat->segments, M_DEVBUF);
                                kfree(dmat, M_DEVBUF);
@@ -433,15 +439,16 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
                 * Attempt to add pages to our pool on a per-instance
                 * basis up to a sane limit.
                 */
-               if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
+               if (dmat->flags & BUS_DMA_ALLOCALL) {
+                       maxpages = Maxmem - atop(dmat->lowaddr);
+               } else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
                        maxpages = max_bounce_pages;
                } else {
                        maxpages = MIN(max_bounce_pages,
                                       Maxmem - atop(dmat->lowaddr));
                }
-               if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
-                || (dmat->map_count > 0
-                 && bz->total_bpages < maxpages)) {
+               if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
+                   (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
                        int pages;
 
                        if (flags & BUS_DMA_ONEBPAGE) {
@@ -455,7 +462,8 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
                                error = ENOMEM;
 
                        if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
-                               if (!error)
+                               if (!error &&
+                                   (dmat->flags & BUS_DMA_ALLOCALL) == 0)
                                        dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
                        } else {
                                error = 0;
@@ -824,6 +832,9 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
                        segments, dmat->nsegments,
                        NULL, flags, &lastaddr, &nsegs, 1);
        if (error == EINPROGRESS) {
+               KKASSERT((dmat->flags &
+                         (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) !=
+                        (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL));
                bus_dma_tag_unlock(dmat);
                return error;
        }
@@ -1083,15 +1094,20 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
 
        lwkt_gettoken(&bounce_zone_tok);
 
-       /* Check to see if we already have a suitable zone */
-       STAILQ_FOREACH(bz, &bounce_zone_list, links) {
-               if (dmat->alignment <= bz->alignment &&
-                   dmat->lowaddr >= bz->lowaddr) {
-                       lwkt_reltoken(&bounce_zone_tok);
-
-                       dmat->bounce_zone = bz;
-                       kfree(new_bz, M_DEVBUF);
-                       return 0;
+       if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) {
+               /*
+                * For shared bounce zone, check to see
+                * if we already have a suitable zone
+                */
+               STAILQ_FOREACH(bz, &bounce_zone_list, links) {
+                       if (dmat->alignment <= bz->alignment &&
+                           dmat->lowaddr >= bz->lowaddr) {
+                               lwkt_reltoken(&bounce_zone_tok);
+
+                               dmat->bounce_zone = bz;
+                               kfree(new_bz, M_DEVBUF);
+                               return 0;
+                       }
                }
        }
        bz = new_bz;
@@ -1106,10 +1122,16 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
        bz->active_bpages = 0;
        bz->lowaddr = dmat->lowaddr;
        bz->alignment = round_page(dmat->alignment);
-       ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
-       busdma_zonecount++;
        ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
-       STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+
+       if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) {
+               ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
+               busdma_zonecount++;
+               STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+       } else {
+               ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount);
+               busdma_priv_zonecount--;
+       }
 
        lwkt_reltoken(&bounce_zone_tok);
 
@@ -1201,6 +1223,56 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
        return count;
 }
 
+static void
+free_bounce_pages_all(bus_dma_tag_t dmat)
+{
+       struct bounce_zone *bz = dmat->bounce_zone;
+       struct bounce_page *bpage;
+
+       BZ_LOCK(bz);
+
+       while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) {
+               STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
+
+               KKASSERT(total_bounce_pages > 0);
+               total_bounce_pages--;
+
+               KKASSERT(bz->total_bpages > 0);
+               bz->total_bpages--;
+
+               KKASSERT(bz->free_bpages > 0);
+               bz->free_bpages--;
+
+               contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF);
+               kfree(bpage, M_DEVBUF);
+       }
+       if (bz->total_bpages) {
+               kprintf("#%d bounce pages are still in use\n",
+                       bz->total_bpages);
+               print_backtrace(-1);
+       }
+
+       BZ_UNLOCK(bz);
+}
+
+static void
+free_bounce_zone(bus_dma_tag_t dmat)
+{
+       struct bounce_zone *bz = dmat->bounce_zone;
+
+       if (bz == NULL)
+               return;
+
+       if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0)
+               return;
+
+       free_bounce_pages_all(dmat);
+       dmat->bounce_zone = NULL;
+
+       sysctl_ctx_free(&bz->sysctl_ctx);
+       kfree(bz, M_DEVBUF);
+}
+
 /* Assume caller holds bounce zone spinlock */
 static int
 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
index 2c3a0fe..6acedd5 100644 (file)
@@ -96,6 +96,8 @@
 #define        BUS_DMA_ALIGNED         0x0200  /* no bpage should be allocated due to
                                         * alignment requirement; all to-be-
                                         * loaded memory is properly aligned */
+#define BUS_DMA_PRIVBZONE      0x0400  /* need private bounce zone */
+#define BUS_DMA_ALLOCALL       0x0800  /* allocate all needed resources */
 
 /* Forwards needed by prototypes below. */
 struct mbuf;