2 * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/ata/ata-dma.c,v 1.147 2007/04/08 21:53:52 sos Exp $
27 * $DragonFly: src/sys/dev/disk/nata/ata-dma.c,v 1.5 2007/07/23 19:26:09 dillon Exp $
32 #include <sys/param.h>
34 #include <sys/bus_dma.h>
35 #include <sys/endian.h>
36 #include <sys/malloc.h>
38 #include <sys/resourcevar.h>
40 #include <machine/bus_dma.h>
47 static void ata_dmaalloc(device_t);
48 static void ata_dmafree(device_t);
49 static void ata_dmasetprd(void *, bus_dma_segment_t *, int, int);
50 static int ata_dmaload(device_t, caddr_t, int32_t, int, void *, int *);
51 static int ata_dmaunload(device_t);
54 static MALLOC_DEFINE(M_ATADMA, "ata_dma", "ATA driver DMA");
57 #define MAXTABSZ PAGE_SIZE
58 #define MAXWSPCSZ PAGE_SIZE*2
60 struct ata_dc_cb_args {
66 ata_dmainit(device_t dev)
68 struct ata_channel *ch = device_get_softc(dev);
70 ch->dma = kmalloc(sizeof(struct ata_dma), M_ATADMA, M_INTWAIT|M_ZERO);
71 ch->dma->alloc = ata_dmaalloc;
72 ch->dma->free = ata_dmafree;
73 ch->dma->setprd = ata_dmasetprd;
74 ch->dma->load = ata_dmaload;
75 ch->dma->unload = ata_dmaunload;
76 ch->dma->alignment = 2;
77 ch->dma->boundary = 128 * DEV_BSIZE;
78 ch->dma->segsize = 128 * DEV_BSIZE;
79 ch->dma->max_iosize = 128 * DEV_BSIZE;
80 ch->dma->max_address = BUS_SPACE_MAXADDR_32BIT;
84 ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
86 struct ata_dc_cb_args *cba = (struct ata_dc_cb_args *)xsc;
88 if (!(cba->error = error))
89 cba->maddr = segs[0].ds_addr;
93 ata_dmaalloc(device_t dev)
95 struct ata_channel *ch = device_get_softc(dev);
96 struct ata_dc_cb_args ccba;
98 if (bus_dma_tag_create(NULL, ch->dma->alignment, 0,
99 ch->dma->max_address, BUS_SPACE_MAXADDR,
100 NULL, NULL, ch->dma->max_iosize,
101 ATA_DMA_ENTRIES, ch->dma->segsize,
102 0, &ch->dma->dmatag))
105 if (bus_dma_tag_create(ch->dma->dmatag, PAGE_SIZE, PAGE_SIZE,
106 ch->dma->max_address, BUS_SPACE_MAXADDR,
107 NULL, NULL, MAXTABSZ, 1, MAXTABSZ,
108 0, &ch->dma->sg_tag))
111 if (bus_dma_tag_create(ch->dma->dmatag,ch->dma->alignment,ch->dma->boundary,
112 ch->dma->max_address, BUS_SPACE_MAXADDR,
113 NULL, NULL, ch->dma->max_iosize,
114 ATA_DMA_ENTRIES, ch->dma->segsize,
115 0, &ch->dma->data_tag))
118 if (bus_dmamem_alloc(ch->dma->sg_tag, (void **)&ch->dma->sg, 0,
122 if (bus_dmamap_load(ch->dma->sg_tag, ch->dma->sg_map, ch->dma->sg,
123 MAXTABSZ, ata_dmasetupc_cb, &ccba, 0) || ccba.error) {
124 bus_dmamem_free(ch->dma->sg_tag, ch->dma->sg, ch->dma->sg_map);
127 ch->dma->sg_bus = ccba.maddr;
129 if (bus_dmamap_create(ch->dma->data_tag, 0, &ch->dma->data_map))
132 if (bus_dma_tag_create(ch->dma->dmatag, PAGE_SIZE, 64 * 1024,
133 ch->dma->max_address, BUS_SPACE_MAXADDR,
134 NULL, NULL, MAXWSPCSZ, 1, MAXWSPCSZ,
135 0, &ch->dma->work_tag))
138 if (bus_dmamem_alloc(ch->dma->work_tag, (void *)&ch->dma->work, 0,
139 (void *)&ch->dma->work_map))
142 if (bus_dmamap_load(ch->dma->work_tag, ch->dma->work_map,ch->dma->work,
143 MAXWSPCSZ, ata_dmasetupc_cb, &ccba, 0) || ccba.error) {
144 bus_dmamem_free(ch->dma->work_tag,ch->dma->work, ch->dma->work_map);
147 ch->dma->work_bus = ccba.maddr;
152 device_printf(dev, "WARNING - DMA allocation failed, disabling DMA\n");
154 kfree(ch->dma, M_ATADMA);
159 ata_dmafree(device_t dev)
161 struct ata_channel *ch = device_get_softc(dev);
163 if (ch->dma->work_bus) {
164 bus_dmamap_unload(ch->dma->work_tag, ch->dma->work_map);
165 bus_dmamem_free(ch->dma->work_tag, ch->dma->work, ch->dma->work_map);
166 ch->dma->work_bus = 0;
167 ch->dma->work_map = NULL;
168 ch->dma->work = NULL;
170 if (ch->dma->work_tag) {
171 bus_dma_tag_destroy(ch->dma->work_tag);
172 ch->dma->work_tag = NULL;
174 if (ch->dma->sg_bus) {
175 bus_dmamap_unload(ch->dma->sg_tag, ch->dma->sg_map);
176 bus_dmamem_free(ch->dma->sg_tag, ch->dma->sg, ch->dma->sg_map);
178 ch->dma->sg_map = NULL;
181 if (ch->dma->data_map) {
182 bus_dmamap_destroy(ch->dma->data_tag, ch->dma->data_map);
183 ch->dma->data_map = NULL;
185 if (ch->dma->sg_tag) {
186 bus_dma_tag_destroy(ch->dma->sg_tag);
187 ch->dma->sg_tag = NULL;
189 if (ch->dma->data_tag) {
190 bus_dma_tag_destroy(ch->dma->data_tag);
191 ch->dma->data_tag = NULL;
193 if (ch->dma->dmatag) {
194 bus_dma_tag_destroy(ch->dma->dmatag);
195 ch->dma->dmatag = NULL;
198 kfree(ch->dma, M_ATADMA);
202 ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
204 struct ata_dmasetprd_args *args = xsc;
205 struct ata_dma_prdentry *prd = args->dmatab;
208 if ((args->error = error))
211 for (i = 0; i < nsegs; i++) {
212 prd[i].addr = htole32(segs[i].ds_addr);
213 prd[i].count = htole32(segs[i].ds_len);
215 prd[i - 1].count |= htole32(ATA_DMA_EOT);
220 ata_dmaload(device_t dev, caddr_t data, int32_t count, int dir,
221 void *addr, int *entries)
223 struct ata_channel *ch = device_get_softc(dev);
224 struct ata_dmasetprd_args cba;
225 static struct krate krate_nata_ovdma = { .freq = 1 };
228 if (ch->dma->flags & ATA_DMA_LOADED) {
229 device_printf(dev, "FAILURE - already active DMA on this device\n");
233 device_printf(dev, "FAILURE - zero length DMA transfer attempted\n");
234 panic("zero length DMA transfer");
237 if (((uintptr_t)data & (ch->dma->alignment - 1)) ||
238 (count & (ch->dma->alignment - 1))) {
239 device_printf(dev, "FAILURE - non aligned DMA transfer attempted\n");
242 if (count > ch->dma->max_iosize) {
243 krateprintf(&krate_nata_ovdma,
244 "%s: FAILURE - oversized DMA transfer "
246 device_get_nameunit(dev), count, ch->dma->max_iosize);
252 if ((error = bus_dmamap_load(ch->dma->data_tag, ch->dma->data_map,
253 data, count, ch->dma->setprd, &cba,
254 BUS_DMA_NOWAIT)) || (error = cba.error))
257 *entries = cba.nsegs;
259 bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map, BUS_DMASYNC_PREWRITE);
261 bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map,
262 dir ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
264 ch->dma->cur_iosize = count;
265 ch->dma->flags = dir ? (ATA_DMA_LOADED | ATA_DMA_READ) : ATA_DMA_LOADED;
270 ata_dmaunload(device_t dev)
272 struct ata_channel *ch = device_get_softc(dev);
274 if (ch->dma->flags & ATA_DMA_LOADED) {
275 bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map,
276 BUS_DMASYNC_POSTWRITE);
278 bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map,
279 (ch->dma->flags & ATA_DMA_READ) ?
280 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
281 bus_dmamap_unload(ch->dma->data_tag, ch->dma->data_map);
283 ch->dma->cur_iosize = 0;
284 ch->dma->flags &= ~ATA_DMA_LOADED;