2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #ifndef _LINUX_SCATTERLIST_H_
31 #define _LINUX_SCATTERLIST_H_
36 * If flags bit 0 is set, then the sg field contains a pointer to the next sg
37 * table list. Otherwise the next entry is at sg + 1, can be determined using
38 * the sg_is_chain() function.
40 * If flags bit 1 is set, then this sg entry is the last element in a list,
41 * can be determined using the sg_is_last() function.
50 struct scatterlist *sg;
59 struct scatterlist *sgl; /* the list */
60 unsigned int nents; /* number of mapped entries */
61 unsigned int orig_nents; /* original size of list */
65 * Maximum number of entries that will be allocated in one piece, if
66 * a list larger than this is required then chaining will be utilized.
68 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
70 #define sg_dma_address(sg) (sg)->address
71 #define sg_dma_len(sg) (sg)->length
72 #define sg_page(sg) (sg)->sl_un.page
73 #define sg_scatternext(sg) (sg)->sl_un.sg
79 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
85 if (offset > PAGE_SIZE)
86 panic("sg_set_page: Invalid offset %d\n", offset);
91 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
93 sg_set_page(sg, virt_to_page(buf), buflen,
94 ((uintptr_t)buf) & ~PAGE_MASK);
99 sg_init_table(struct scatterlist *sg, unsigned int nents)
101 bzero(sg, sizeof(*sg) * nents);
102 sg[nents - 1].flags = SG_END;
105 static inline struct scatterlist *
106 sg_next(struct scatterlist *sg)
108 if (sg->flags & SG_END)
111 if (sg->flags & SG_CHAIN)
112 sg = sg_scatternext(sg);
117 static inline vm_paddr_t
118 sg_phys(struct scatterlist *sg)
120 return sg_page(sg)->phys_addr + sg->offset;
125 * sg_chain - Chain two sglists together
126 * @prv: First scatterlist
127 * @prv_nents: Number of entries in prv
128 * @sgl: Second scatterlist
131 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
135 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
136 struct scatterlist *sgl)
139 * offset and length are unused for chain entry. Clear them.
141 struct scatterlist *sg = &prv[prv_nents - 1];
147 * Indicate a link pointer, and set the link to the second list.
149 sg->flags = SG_CHAIN;
154 * sg_mark_end - Mark the end of the scatterlist
155 * @sg: SG entryScatterlist
158 * Marks the passed in sg entry as the termination point for the sg
159 * table. A call to sg_next() on this entry will return NULL.
162 static inline void sg_mark_end(struct scatterlist *sg)
168 * __sg_free_table - Free a previously mapped sg table
169 * @table: The sg table header to use
170 * @max_ents: The maximum number of entries per single scatterlist
173 * Free an sg table previously allocated and setup with
174 * __sg_alloc_table(). The @max_ents value must be identical to
175 * that previously used with __sg_alloc_table().
179 __sg_free_table(struct sg_table *table, unsigned int max_ents)
181 struct scatterlist *sgl, *next;
183 if (unlikely(!table->sgl))
187 while (table->orig_nents) {
188 unsigned int alloc_size = table->orig_nents;
189 unsigned int sg_size;
192 * If we have more than max_ents segments left,
193 * then assign 'next' to the sg table after the current one.
194 * sg_size is then one less than alloc size, since the last
195 * element is the chain pointer.
197 if (alloc_size > max_ents) {
198 next = sgl[max_ents - 1].sl_un.sg;
199 alloc_size = max_ents;
200 sg_size = alloc_size - 1;
202 sg_size = alloc_size;
206 table->orig_nents -= sg_size;
215 * sg_free_table - Free a previously allocated sg table
216 * @table: The mapped sg table header
220 sg_free_table(struct sg_table *table)
222 __sg_free_table(table, SG_MAX_SINGLE_ALLOC);
226 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
227 * @table: The sg table header to use
228 * @nents: Number of entries in sg list
229 * @max_ents: The maximum number of entries the allocator returns per call
230 * @gfp_mask: GFP allocation mask
233 * This function returns a @table @nents long. The allocator is
234 * defined to return scatterlist chunks of maximum size @max_ents.
235 * Thus if @nents is bigger than @max_ents, the scatterlists will be
236 * chained in units of @max_ents.
239 * If this function returns non-0 (eg failure), the caller must call
240 * __sg_free_table() to cleanup any leftover allocations.
244 __sg_alloc_table(struct sg_table *table, unsigned int nents,
245 unsigned int max_ents, gfp_t gfp_mask)
247 struct scatterlist *sg, *prv;
250 memset(table, 0, sizeof(*table));
257 unsigned int sg_size, alloc_size = left;
259 if (alloc_size > max_ents) {
260 alloc_size = max_ents;
261 sg_size = alloc_size - 1;
263 sg_size = alloc_size;
267 sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
270 * Adjust entry count to reflect that the last
271 * entry of the previous table won't be used for
272 * linkage. Without this, sg_kfree() may get
276 table->nents = ++table->orig_nents;
281 sg_init_table(sg, alloc_size);
282 table->nents = table->orig_nents += sg_size;
285 * If this is the first mapping, assign the sg table header.
286 * If this is not the first mapping, chain previous part.
289 sg_chain(prv, max_ents, sg);
294 * If no more entries after this one, mark the end
297 sg_mark_end(&sg[sg_size - 1]);
306 * sg_alloc_table - Allocate and initialize an sg table
307 * @table: The sg table header to use
308 * @nents: Number of entries in sg list
309 * @gfp_mask: GFP allocation mask
312 * Allocate and initialize an sg table. If @nents@ is larger than
313 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
318 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
322 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
325 __sg_free_table(table, SG_MAX_SINGLE_ALLOC);
330 #define for_each_sg(sglist, sg, sgmax, _itr) \
331 for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
333 #endif /* _LINUX_SCATTERLIST_H_ */