4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2016 by Delphix. All rights reserved.
27 * ARC buffer data (ABD).
29 * ABDs are an abstract data structure for the ARC which can use two
30 * different ways of storing the underlying data:
32 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
33 * contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
35 * +-------------------+
38 * | abd_size = ... | +--------------------------------+
39 * | abd_buf ------------->| raw buffer of size abd_size |
40 * +-------------------+ +--------------------------------+
43 * (b) Scattered buffer. In this case, the data in the ABD is split into
44 * equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
45 * to the chunks recorded in an array at the end of the ABD structure.
47 * +-------------------+
51 * | abd_offset = 0 | +-----------+
52 * | abd_chunks[0] ----------------------------->| chunk 0 |
53 * | abd_chunks[1] ---------------------+ +-----------+
54 * | ... | | +-----------+
55 * | abd_chunks[N-1] ---------+ +------->| chunk 1 |
56 * +-------------------+ | +-----------+
59 * +----------------->| chunk N-1 |
62 * Linear buffers act exactly like normal buffers and are always mapped into the
63 * kernel's virtual memory space, while scattered ABD data chunks are allocated
64 * as physical pages and then mapped in only while they are actually being
65 * accessed through one of the abd_* library functions. Using scattered ABDs
66 * provides several benefits:
68 * (1) They avoid use of kmem_*, preventing performance problems where running
69 * kmem_reap on very large memory systems never finishes and causes
70 * constant TLB shootdowns.
72 * (2) Fragmentation is less of an issue since when we are at the limit of
73 * allocatable space, we won't have to search around for a long free
74 * hole in the VA space for large ARC allocations. Each chunk is mapped in
75 * individually, so even if we weren't using segkpm (see next point) we
76 * wouldn't need to worry about finding a contiguous address range.
78 * (3) Use of segkpm will avoid the need for map / unmap / TLB shootdown costs
79 * on each ABD access. (If segkpm isn't available then we use all linear
80 * ABDs to avoid this penalty.) See seg_kpm.c for more details.
82 * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
83 * B_FALSE. However, it is not possible to use scattered ABDs if segkpm is not
84 * available, which is the case on all 32-bit systems and any 64-bit systems
85 * where kpm_enable is turned off.
87 * In addition to directly allocating a linear or scattered ABD, it is also
88 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
89 * within an existing ABD. In linear buffers this is simple (set abd_buf of
90 * the new ABD to the starting point within the original raw buffer), but
91 * scattered ABDs are a little more complex. The new ABD makes a copy of the
92 * relevant abd_chunks pointers (but not the underlying data). However, to
93 * provide arbitrary rather than only chunk-aligned starting offsets, it also
94 * tracks an abd_offset field which represents the starting point of the data
95 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
96 * creating an offset ABD marks the original ABD as the offset's parent, and the
97 * original ABD's abd_children refcount is incremented. This data allows us to
98 * ensure the root ABD isn't deleted before its children.
100 * Most consumers should never need to know what type of ABD they're using --
101 * the ABD public API ensures that it's possible to transparently switch from
102 * using a linear ABD to a scattered one when doing so would be beneficial.
104 * If you need to use the data within an ABD directly, if you know it's linear
105 * (because you allocated it) you can use abd_to_buf() to access the underlying
106 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
107 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
108 * functions to return any raw buffers that are no longer necessary when you're
111 * There are a variety of ABD APIs that implement basic buffer operations:
112 * compare, copy, read, write, and fill with zeroes. If you need a custom
113 * function which progressively accesses the whole ABD, use the abd_iterate_*
118 #include <sys/param.h>
120 #include <sys/zfs_context.h>
121 #include <sys/zfs_znode.h>
123 #include <linux/scatterlist.h>
124 #include <linux/kmap_compat.h>
129 typedef struct abd_stats {
130 kstat_named_t abdstat_struct_size;
131 kstat_named_t abdstat_linear_cnt;
132 kstat_named_t abdstat_linear_data_size;
133 kstat_named_t abdstat_scatter_cnt;
134 kstat_named_t abdstat_scatter_data_size;
135 kstat_named_t abdstat_scatter_chunk_waste;
136 kstat_named_t abdstat_scatter_orders[MAX_ORDER];
137 kstat_named_t abdstat_scatter_page_multi_chunk;
138 kstat_named_t abdstat_scatter_page_multi_zone;
139 kstat_named_t abdstat_scatter_page_alloc_retry;
140 kstat_named_t abdstat_scatter_sg_table_retry;
143 static abd_stats_t abd_stats = {
144 /* Amount of memory occupied by all of the abd_t struct allocations */
145 { "struct_size", KSTAT_DATA_UINT64 },
147 * The number of linear ABDs which are currently allocated, excluding
148 * ABDs which don't own their data (for instance the ones which were
149 * allocated through abd_get_offset() and abd_get_from_buf()). If an
150 * ABD takes ownership of its buf then it will become tracked.
152 { "linear_cnt", KSTAT_DATA_UINT64 },
153 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
154 { "linear_data_size", KSTAT_DATA_UINT64 },
156 * The number of scatter ABDs which are currently allocated, excluding
157 * ABDs which don't own their data (for instance the ones which were
158 * allocated through abd_get_offset()).
160 { "scatter_cnt", KSTAT_DATA_UINT64 },
161 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
162 { "scatter_data_size", KSTAT_DATA_UINT64 },
164 * The amount of space wasted at the end of the last chunk across all
165 * scatter ABDs tracked by scatter_cnt.
167 { "scatter_chunk_waste", KSTAT_DATA_UINT64 },
169 * The number of compound allocations of a given order. These
170 * allocations are spread over all currently allocated ABDs, and
171 * act as a measure of memory fragmentation.
173 { { "scatter_order_N", KSTAT_DATA_UINT64 } },
175 * The number of scatter ABDs which contain multiple chunks.
176 * ABDs are preferentially allocated from the minimum number of
177 * contiguous multi-page chunks, a single chunk is optimal.
179 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
181 * The number of scatter ABDs which are split across memory zones.
182 * ABDs are preferentially allocated using pages from a single zone.
184 { "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
186 * The total number of retries encountered when attempting to
187 * allocate the pages to populate the scatter ABD.
189 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
191 * The total number of retries encountered when attempting to
192 * allocate the sg table for an ABD.
194 { "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
197 #define ABDSTAT(stat) (abd_stats.stat.value.ui64)
198 #define ABDSTAT_INCR(stat, val) \
199 atomic_add_64(&abd_stats.stat.value.ui64, (val))
200 #define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1)
201 #define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1)
203 #define ABD_SCATTER(abd) (abd->abd_u.abd_scatter)
204 #define ABD_BUF(abd) (abd->abd_u.abd_linear.abd_buf)
205 #define abd_for_each_sg(abd, sg, n, i) \
206 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
208 /* see block comment above for description */
209 int zfs_abd_scatter_enabled = B_TRUE;
210 unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
212 static kmem_cache_t *abd_cache = NULL;
213 static kstat_t *abd_ksp;
216 abd_chunkcnt_for_bytes(size_t size)
218 return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
222 #ifndef CONFIG_HIGHMEM
224 #ifndef __GFP_RECLAIM
225 #define __GFP_RECLAIM __GFP_WAIT
229 abd_alloc_chunk(int nid, gfp_t gfp, unsigned int order)
233 page = alloc_pages_node(nid, gfp, order);
237 return ((unsigned long) page_address(page));
241 * The goal is to minimize fragmentation by preferentially populating ABDs
242 * with higher order compound pages from a single zone. Allocation size is
243 * progressively decreased until it can be satisfied without performing
244 * reclaim or compaction. When necessary this function will degenerate to
245 * allocating individual pages and allowing reclaim to satisfy allocations.
248 abd_alloc_pages(abd_t *abd, size_t size)
250 struct list_head pages;
251 struct sg_table table;
252 struct scatterlist *sg;
253 struct page *page, *tmp_page;
254 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
255 gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
256 int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
257 int nr_pages = abd_chunkcnt_for_bytes(size);
258 int chunks = 0, zones = 0;
259 size_t remaining_size;
260 int nid = NUMA_NO_NODE;
264 INIT_LIST_HEAD(&pages);
266 while (alloc_pages < nr_pages) {
268 unsigned chunk_pages;
270 order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
271 chunk_pages = (1U << order);
273 paddr = abd_alloc_chunk(nid, order ? gfp_comp : gfp, order);
276 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
277 schedule_timeout_interruptible(1);
279 max_order = MAX(0, order - 1);
284 page = virt_to_page(paddr);
285 list_add_tail(&page->lru, &pages);
287 if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
290 nid = page_to_nid(page);
291 ABDSTAT_BUMP(abdstat_scatter_orders[order]);
293 alloc_pages += chunk_pages;
296 ASSERT3S(alloc_pages, ==, nr_pages);
298 while (sg_alloc_table(&table, chunks, gfp)) {
299 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
300 schedule_timeout_interruptible(1);
304 remaining_size = size;
305 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
306 size_t sg_size = MIN(PAGESIZE << compound_order(page),
308 sg_set_page(sg, page, sg_size, 0);
309 remaining_size -= sg_size;
312 list_del(&page->lru);
316 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
317 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
320 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
321 abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
325 ABD_SCATTER(abd).abd_sgl = table.sgl;
326 ABD_SCATTER(abd).abd_nents = table.nents;
330 * Allocate N individual pages to construct a scatter ABD. This function
331 * makes no attempt to request contiguous pages and requires the minimal
332 * number of kernel interfaces. It's designed for maximum compatibility.
335 abd_alloc_pages(abd_t *abd, size_t size)
337 struct scatterlist *sg;
338 struct sg_table table;
340 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
341 int nr_pages = abd_chunkcnt_for_bytes(size);
344 while (sg_alloc_table(&table, nr_pages, gfp)) {
345 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
346 schedule_timeout_interruptible(1);
349 ASSERT3U(table.nents, ==, nr_pages);
350 ABD_SCATTER(abd).abd_sgl = table.sgl;
351 ABD_SCATTER(abd).abd_nents = nr_pages;
353 abd_for_each_sg(abd, sg, nr_pages, i) {
354 while ((page = __page_cache_alloc(gfp)) == NULL) {
355 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
356 schedule_timeout_interruptible(1);
359 ABDSTAT_BUMP(abdstat_scatter_orders[0]);
360 sg_set_page(sg, page, PAGESIZE, 0);
364 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
365 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
368 #endif /* !CONFIG_HIGHMEM */
371 abd_free_pages(abd_t *abd)
373 struct scatterlist *sg;
374 struct sg_table table;
376 int nr_pages = ABD_SCATTER(abd).abd_nents;
379 if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
380 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
382 if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
383 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
385 abd_for_each_sg(abd, sg, nr_pages, i) {
386 for (j = 0; j < sg->length; ) {
387 page = nth_page(sg_page(sg), j >> PAGE_SHIFT);
388 order = compound_order(page);
389 __free_pages(page, order);
390 j += (PAGESIZE << order);
391 ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
395 table.sgl = ABD_SCATTER(abd).abd_sgl;
396 table.nents = table.orig_nents = nr_pages;
397 sg_free_table(&table);
403 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
409 #define abd_alloc_chunk(o) \
410 ((struct page *)umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
411 #define abd_free_chunk(chunk, o) umem_free(chunk, PAGESIZE << (o))
412 #define zfs_kmap_atomic(chunk, km) ((void *)chunk)
413 #define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
414 #define local_irq_save(flags) do { (void)(flags); } while (0)
415 #define local_irq_restore(flags) do { (void)(flags); } while (0)
416 #define nth_page(pg, i) \
417 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
426 sg_init_table(struct scatterlist *sg, int nr)
428 memset(sg, 0, nr * sizeof (struct scatterlist));
432 #define for_each_sg(sgl, sg, nr, i) \
433 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
436 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
439 /* currently we don't use offset */
445 static inline struct page *
446 sg_page(struct scatterlist *sg)
451 static inline struct scatterlist *
452 sg_next(struct scatterlist *sg)
461 abd_alloc_pages(abd_t *abd, size_t size)
463 unsigned nr_pages = abd_chunkcnt_for_bytes(size);
464 struct scatterlist *sg;
467 ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
468 sizeof (struct scatterlist), KM_SLEEP);
469 sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
471 abd_for_each_sg(abd, sg, nr_pages, i) {
472 struct page *p = abd_alloc_chunk(0);
473 sg_set_page(sg, p, PAGESIZE, 0);
475 ABD_SCATTER(abd).abd_nents = nr_pages;
479 abd_free_pages(abd_t *abd)
481 int i, n = ABD_SCATTER(abd).abd_nents;
482 struct scatterlist *sg;
485 abd_for_each_sg(abd, sg, n, i) {
486 for (j = 0; j < sg->length; j += PAGESIZE) {
487 struct page *p = nth_page(sg_page(sg), j>>PAGE_SHIFT);
488 abd_free_chunk(p, 0);
492 vmem_free(ABD_SCATTER(abd).abd_sgl, n * sizeof (struct scatterlist));
502 abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
503 0, NULL, NULL, NULL, NULL, NULL, 0);
505 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
506 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
507 if (abd_ksp != NULL) {
508 abd_ksp->ks_data = &abd_stats;
509 kstat_install(abd_ksp);
511 for (i = 0; i < MAX_ORDER; i++) {
512 snprintf(abd_stats.abdstat_scatter_orders[i].name,
513 KSTAT_STRLEN, "scatter_order_%d", i);
514 abd_stats.abdstat_scatter_orders[i].data_type =
523 if (abd_ksp != NULL) {
524 kstat_delete(abd_ksp);
529 kmem_cache_destroy(abd_cache);
535 abd_verify(abd_t *abd)
537 ASSERT3U(abd->abd_size, >, 0);
538 ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
539 ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
540 ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
541 ABD_FLAG_MULTI_CHUNK));
542 IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
543 IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
544 if (abd_is_linear(abd)) {
545 ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL);
549 struct scatterlist *sg;
551 ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
552 ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
553 ABD_SCATTER(abd).abd_sgl->length);
554 n = ABD_SCATTER(abd).abd_nents;
555 abd_for_each_sg(abd, sg, n, i) {
556 ASSERT3P(sg_page(sg), !=, NULL);
561 static inline abd_t *
562 abd_alloc_struct(void)
564 abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
566 ASSERT3P(abd, !=, NULL);
567 ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
573 abd_free_struct(abd_t *abd)
575 kmem_cache_free(abd_cache, abd);
576 ABDSTAT_INCR(abdstat_struct_size, -sizeof (abd_t));
580 * Allocate an ABD, along with its own underlying data buffers. Use this if you
581 * don't care whether the ABD is linear or not.
584 abd_alloc(size_t size, boolean_t is_metadata)
588 if (!zfs_abd_scatter_enabled || size <= PAGESIZE)
589 return (abd_alloc_linear(size, is_metadata));
591 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
593 abd = abd_alloc_struct();
594 abd->abd_flags = ABD_FLAG_OWNER;
595 abd_alloc_pages(abd, size);
598 abd->abd_flags |= ABD_FLAG_META;
600 abd->abd_size = size;
601 abd->abd_parent = NULL;
602 refcount_create(&abd->abd_children);
604 abd->abd_u.abd_scatter.abd_offset = 0;
606 ABDSTAT_BUMP(abdstat_scatter_cnt);
607 ABDSTAT_INCR(abdstat_scatter_data_size, size);
608 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
609 P2ROUNDUP(size, PAGESIZE) - size);
615 abd_free_scatter(abd_t *abd)
619 refcount_destroy(&abd->abd_children);
620 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
621 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
622 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
623 abd->abd_size - P2ROUNDUP(abd->abd_size, PAGESIZE));
625 abd_free_struct(abd);
629 * Allocate an ABD that must be linear, along with its own underlying data
630 * buffer. Only use this when it would be very annoying to write your ABD
631 * consumer with a scattered ABD.
634 abd_alloc_linear(size_t size, boolean_t is_metadata)
636 abd_t *abd = abd_alloc_struct();
638 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
640 abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
642 abd->abd_flags |= ABD_FLAG_META;
644 abd->abd_size = size;
645 abd->abd_parent = NULL;
646 refcount_create(&abd->abd_children);
649 abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
651 abd->abd_u.abd_linear.abd_buf = zio_data_buf_alloc(size);
654 ABDSTAT_BUMP(abdstat_linear_cnt);
655 ABDSTAT_INCR(abdstat_linear_data_size, size);
661 abd_free_linear(abd_t *abd)
663 if (abd->abd_flags & ABD_FLAG_META) {
664 zio_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
666 zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
669 refcount_destroy(&abd->abd_children);
670 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
671 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
673 abd_free_struct(abd);
677 * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
678 * abd_alloc_linear().
684 ASSERT3P(abd->abd_parent, ==, NULL);
685 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
686 if (abd_is_linear(abd))
687 abd_free_linear(abd);
689 abd_free_scatter(abd);
693 * Allocate an ABD of the same format (same metadata flag, same scatterize
694 * setting) as another ABD.
697 abd_alloc_sametype(abd_t *sabd, size_t size)
699 boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
700 if (abd_is_linear(sabd)) {
701 return (abd_alloc_linear(size, is_metadata));
703 return (abd_alloc(size, is_metadata));
708 * If we're going to use this ABD for doing I/O using the block layer, the
709 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
710 * plan to store this ABD in memory for a long period of time, we should
711 * allocate the ABD type that requires the least data copying to do the I/O.
713 * On Illumos this is linear ABDs, however if ldi_strategy() can ever issue I/Os
714 * using a scatter/gather list we should switch to that and replace this call
715 * with vanilla abd_alloc().
717 * On Linux the optimal thing to do would be to use abd_get_offset() and
718 * construct a new ABD which shares the original pages thereby eliminating
719 * the copy. But for the moment a new linear ABD is allocated until this
720 * performance optimization can be implemented.
723 abd_alloc_for_io(size_t size, boolean_t is_metadata)
725 return (abd_alloc_linear(size, is_metadata));
729 * Allocate a new ABD to point to offset off of sabd. It shares the underlying
730 * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
731 * any derived ABDs exist.
733 static inline abd_t *
734 abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
739 ASSERT3U(off, <=, sabd->abd_size);
741 if (abd_is_linear(sabd)) {
742 abd = abd_alloc_struct();
745 * Even if this buf is filesystem metadata, we only track that
746 * if we own the underlying data buffer, which is not true in
747 * this case. Therefore, we don't ever use ABD_FLAG_META here.
749 abd->abd_flags = ABD_FLAG_LINEAR;
751 abd->abd_u.abd_linear.abd_buf =
752 (char *)sabd->abd_u.abd_linear.abd_buf + off;
755 struct scatterlist *sg;
756 size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off;
758 abd = abd_alloc_struct();
761 * Even if this buf is filesystem metadata, we only track that
762 * if we own the underlying data buffer, which is not true in
763 * this case. Therefore, we don't ever use ABD_FLAG_META here.
767 abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
768 if (new_offset < sg->length)
770 new_offset -= sg->length;
773 ABD_SCATTER(abd).abd_sgl = sg;
774 ABD_SCATTER(abd).abd_offset = new_offset;
775 ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
778 abd->abd_size = size;
779 abd->abd_parent = sabd;
780 refcount_create(&abd->abd_children);
781 (void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
787 abd_get_offset(abd_t *sabd, size_t off)
789 size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
791 VERIFY3U(size, >, 0);
793 return (abd_get_offset_impl(sabd, off, size));
797 abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
799 ASSERT3U(off + size, <=, sabd->abd_size);
801 return (abd_get_offset_impl(sabd, off, size));
805 * Allocate a linear ABD structure for buf. You must free this with abd_put()
806 * since the resulting ABD doesn't own its own buffer.
809 abd_get_from_buf(void *buf, size_t size)
811 abd_t *abd = abd_alloc_struct();
813 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
816 * Even if this buf is filesystem metadata, we only track that if we
817 * own the underlying data buffer, which is not true in this case.
818 * Therefore, we don't ever use ABD_FLAG_META here.
820 abd->abd_flags = ABD_FLAG_LINEAR;
821 abd->abd_size = size;
822 abd->abd_parent = NULL;
823 refcount_create(&abd->abd_children);
825 abd->abd_u.abd_linear.abd_buf = buf;
831 * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
832 * free the underlying scatterlist or buffer.
838 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
840 if (abd->abd_parent != NULL) {
841 (void) refcount_remove_many(&abd->abd_parent->abd_children,
845 refcount_destroy(&abd->abd_children);
846 abd_free_struct(abd);
850 * Get the raw buffer associated with a linear ABD.
853 abd_to_buf(abd_t *abd)
855 ASSERT(abd_is_linear(abd));
857 return (abd->abd_u.abd_linear.abd_buf);
861 * Borrow a raw buffer from an ABD without copying the contents of the ABD
862 * into the buffer. If the ABD is scattered, this will allocate a raw buffer
863 * whose contents are undefined. To copy over the existing data in the ABD, use
864 * abd_borrow_buf_copy() instead.
867 abd_borrow_buf(abd_t *abd, size_t n)
871 ASSERT3U(abd->abd_size, >=, n);
872 if (abd_is_linear(abd)) {
873 buf = abd_to_buf(abd);
875 buf = zio_buf_alloc(n);
877 (void) refcount_add_many(&abd->abd_children, n, buf);
883 abd_borrow_buf_copy(abd_t *abd, size_t n)
885 void *buf = abd_borrow_buf(abd, n);
886 if (!abd_is_linear(abd)) {
887 abd_copy_to_buf(buf, abd, n);
893 * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
894 * not change the contents of the ABD and will ASSERT that you didn't modify
895 * the buffer since it was borrowed. If you want any changes you made to buf to
896 * be copied back to abd, use abd_return_buf_copy() instead.
899 abd_return_buf(abd_t *abd, void *buf, size_t n)
902 ASSERT3U(abd->abd_size, >=, n);
903 if (abd_is_linear(abd)) {
904 ASSERT3P(buf, ==, abd_to_buf(abd));
906 ASSERT0(abd_cmp_buf(abd, buf, n));
907 zio_buf_free(buf, n);
909 (void) refcount_remove_many(&abd->abd_children, n, buf);
913 abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
915 if (!abd_is_linear(abd)) {
916 abd_copy_from_buf(abd, buf, n);
918 abd_return_buf(abd, buf, n);
922 * Give this ABD ownership of the buffer that it's storing. Can only be used on
923 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
924 * with abd_alloc_linear() which subsequently released ownership of their buf
925 * with abd_release_ownership_of_buf().
928 abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
930 ASSERT(abd_is_linear(abd));
931 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
934 abd->abd_flags |= ABD_FLAG_OWNER;
936 abd->abd_flags |= ABD_FLAG_META;
939 ABDSTAT_BUMP(abdstat_linear_cnt);
940 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
944 abd_release_ownership_of_buf(abd_t *abd)
946 ASSERT(abd_is_linear(abd));
947 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
950 abd->abd_flags &= ~ABD_FLAG_OWNER;
951 /* Disable this flag since we no longer own the data buffer */
952 abd->abd_flags &= ~ABD_FLAG_META;
954 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
955 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
958 #ifndef HAVE_1ARG_KMAP_ATOMIC
959 #define NR_KM_TYPE (6)
961 int km_table[NR_KM_TYPE] = {
973 /* public interface */
974 void *iter_mapaddr; /* addr corresponding to iter_pos */
975 size_t iter_mapsize; /* length of data valid at mapaddr */
978 abd_t *iter_abd; /* ABD being iterated through */
980 size_t iter_offset; /* offset in current sg/abd_buf, */
981 /* abd_offset included */
982 struct scatterlist *iter_sg; /* current sg */
983 #ifndef HAVE_1ARG_KMAP_ATOMIC
984 int iter_km; /* KM_* for kmap_atomic */
989 * Initialize the abd_iter.
992 abd_iter_init(struct abd_iter *aiter, abd_t *abd, int km_type)
995 aiter->iter_abd = abd;
996 aiter->iter_mapaddr = NULL;
997 aiter->iter_mapsize = 0;
999 if (abd_is_linear(abd)) {
1000 aiter->iter_offset = 0;
1001 aiter->iter_sg = NULL;
1003 aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
1004 aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
1006 #ifndef HAVE_1ARG_KMAP_ATOMIC
1007 ASSERT3U(km_type, <, NR_KM_TYPE);
1008 aiter->iter_km = km_type;
1013 * Advance the iterator by a certain amount. Cannot be called when a chunk is
1014 * in use. This can be safely called when the aiter has already exhausted, in
1015 * which case this does nothing.
1018 abd_iter_advance(struct abd_iter *aiter, size_t amount)
1020 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
1021 ASSERT0(aiter->iter_mapsize);
1023 /* There's nothing left to advance to, so do nothing */
1024 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1027 aiter->iter_pos += amount;
1028 aiter->iter_offset += amount;
1029 if (!abd_is_linear(aiter->iter_abd)) {
1030 while (aiter->iter_offset >= aiter->iter_sg->length) {
1031 aiter->iter_offset -= aiter->iter_sg->length;
1032 aiter->iter_sg = sg_next(aiter->iter_sg);
1033 if (aiter->iter_sg == NULL) {
1034 ASSERT0(aiter->iter_offset);
1042 * Map the current chunk into aiter. This can be safely called when the aiter
1043 * has already exhausted, in which case this does nothing.
1046 abd_iter_map(struct abd_iter *aiter)
1051 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
1052 ASSERT0(aiter->iter_mapsize);
1054 /* There's nothing left to iterate over, so do nothing */
1055 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1058 if (abd_is_linear(aiter->iter_abd)) {
1059 ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
1060 offset = aiter->iter_offset;
1061 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
1062 paddr = aiter->iter_abd->abd_u.abd_linear.abd_buf;
1064 offset = aiter->iter_offset;
1065 aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
1066 aiter->iter_abd->abd_size - aiter->iter_pos);
1068 paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg),
1069 km_table[aiter->iter_km]);
1072 aiter->iter_mapaddr = (char *)paddr + offset;
1076 * Unmap the current chunk from aiter. This can be safely called when the aiter
1077 * has already exhausted, in which case this does nothing.
1080 abd_iter_unmap(struct abd_iter *aiter)
1082 /* There's nothing left to unmap, so do nothing */
1083 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1086 if (!abd_is_linear(aiter->iter_abd)) {
1087 /* LINTED E_FUNC_SET_NOT_USED */
1088 zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset,
1089 km_table[aiter->iter_km]);
1092 ASSERT3P(aiter->iter_mapaddr, !=, NULL);
1093 ASSERT3U(aiter->iter_mapsize, >, 0);
1095 aiter->iter_mapaddr = NULL;
1096 aiter->iter_mapsize = 0;
1100 abd_iterate_func(abd_t *abd, size_t off, size_t size,
1101 abd_iter_func_t *func, void *private)
1104 struct abd_iter aiter;
1107 ASSERT3U(off + size, <=, abd->abd_size);
1109 abd_iter_init(&aiter, abd, 0);
1110 abd_iter_advance(&aiter, off);
1114 abd_iter_map(&aiter);
1116 len = MIN(aiter.iter_mapsize, size);
1117 ASSERT3U(len, >, 0);
1119 ret = func(aiter.iter_mapaddr, len, private);
1121 abd_iter_unmap(&aiter);
1127 abd_iter_advance(&aiter, len);
1138 abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
1140 struct buf_arg *ba_ptr = private;
1142 (void) memcpy(ba_ptr->arg_buf, buf, size);
1143 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1149 * Copy abd to buf. (off is the offset in abd.)
1152 abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
1154 struct buf_arg ba_ptr = { buf };
1156 (void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
1161 abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
1164 struct buf_arg *ba_ptr = private;
1166 ret = memcmp(buf, ba_ptr->arg_buf, size);
1167 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1173 * Compare the contents of abd to buf. (off is the offset in abd.)
1176 abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
1178 struct buf_arg ba_ptr = { (void *) buf };
1180 return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
1184 abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
1186 struct buf_arg *ba_ptr = private;
1188 (void) memcpy(buf, ba_ptr->arg_buf, size);
1189 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1195 * Copy from buf to abd. (off is the offset in abd.)
1198 abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
1200 struct buf_arg ba_ptr = { (void *) buf };
1202 (void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
1208 abd_zero_off_cb(void *buf, size_t size, void *private)
1210 (void) memset(buf, 0, size);
1215 * Zero out the abd from a particular offset to the end.
1218 abd_zero_off(abd_t *abd, size_t off, size_t size)
1220 (void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
1224 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
1225 * equal-sized chunks (passed to func as raw buffers). func could be called many
1226 * times during this iteration.
1229 abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
1230 size_t size, abd_iter_func2_t *func, void *private)
1233 struct abd_iter daiter, saiter;
1238 ASSERT3U(doff + size, <=, dabd->abd_size);
1239 ASSERT3U(soff + size, <=, sabd->abd_size);
1241 abd_iter_init(&daiter, dabd, 0);
1242 abd_iter_init(&saiter, sabd, 1);
1243 abd_iter_advance(&daiter, doff);
1244 abd_iter_advance(&saiter, soff);
1247 size_t dlen, slen, len;
1248 abd_iter_map(&daiter);
1249 abd_iter_map(&saiter);
1251 dlen = MIN(daiter.iter_mapsize, size);
1252 slen = MIN(saiter.iter_mapsize, size);
1253 len = MIN(dlen, slen);
1254 ASSERT(dlen > 0 || slen > 0);
1256 ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
1259 abd_iter_unmap(&saiter);
1260 abd_iter_unmap(&daiter);
1266 abd_iter_advance(&daiter, len);
1267 abd_iter_advance(&saiter, len);
1275 abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
1277 (void) memcpy(dbuf, sbuf, size);
1282 * Copy from sabd to dabd starting from soff and doff.
1285 abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
1287 (void) abd_iterate_func2(dabd, sabd, doff, soff, size,
1288 abd_copy_off_cb, NULL);
1293 abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
1295 return (memcmp(bufa, bufb, size));
1299 * Compares the contents of two ABDs.
1302 abd_cmp(abd_t *dabd, abd_t *sabd)
1304 ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
1305 return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
1310 * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
1312 * @cabds parity ABDs, must have equal size
1313 * @dabd data ABD. Can be NULL (in this case @dsize = 0)
1314 * @func_raidz_gen should be implemented so that its behaviour
1315 * is the same when taking linear and when taking scatter
1318 abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
1319 ssize_t csize, ssize_t dsize, const unsigned parity,
1320 void (*func_raidz_gen)(void **, const void *, size_t, size_t))
1324 struct abd_iter caiters[3];
1325 struct abd_iter daiter = {0};
1327 unsigned long flags;
1329 ASSERT3U(parity, <=, 3);
1331 for (i = 0; i < parity; i++)
1332 abd_iter_init(&caiters[i], cabds[i], i);
1335 abd_iter_init(&daiter, dabd, i);
1337 ASSERT3S(dsize, >=, 0);
1339 local_irq_save(flags);
1343 if (dabd && dsize > 0)
1344 abd_iter_map(&daiter);
1346 for (i = 0; i < parity; i++) {
1347 abd_iter_map(&caiters[i]);
1348 caddrs[i] = caiters[i].iter_mapaddr;
1353 len = MIN(caiters[2].iter_mapsize, len);
1355 len = MIN(caiters[1].iter_mapsize, len);
1357 len = MIN(caiters[0].iter_mapsize, len);
1360 /* must be progressive */
1361 ASSERT3S(len, >, 0);
1363 if (dabd && dsize > 0) {
1364 /* this needs precise iter.length */
1365 len = MIN(daiter.iter_mapsize, len);
1370 /* must be progressive */
1371 ASSERT3S(len, >, 0);
1373 * The iterated function likely will not do well if each
1374 * segment except the last one is not multiple of 512 (raidz).
1376 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1378 func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
1380 for (i = parity-1; i >= 0; i--) {
1381 abd_iter_unmap(&caiters[i]);
1382 abd_iter_advance(&caiters[i], len);
1385 if (dabd && dsize > 0) {
1386 abd_iter_unmap(&daiter);
1387 abd_iter_advance(&daiter, dlen);
1393 ASSERT3S(dsize, >=, 0);
1394 ASSERT3S(csize, >=, 0);
1396 local_irq_restore(flags);
1400 * Iterate over code ABDs and data reconstruction target ABDs and call
1401 * @func_raidz_rec. Function maps at most 6 pages atomically.
1403 * @cabds parity ABDs, must have equal size
1404 * @tabds rec target ABDs, at most 3
1405 * @tsize size of data target columns
1406 * @func_raidz_rec expects syndrome data in target columns. Function
1407 * reconstructs data and overwrites target columns.
1410 abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
1411 ssize_t tsize, const unsigned parity,
1412 void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
1413 const unsigned *mul),
1414 const unsigned *mul)
1418 struct abd_iter citers[3];
1419 struct abd_iter xiters[3];
1420 void *caddrs[3], *xaddrs[3];
1421 unsigned long flags;
1423 ASSERT3U(parity, <=, 3);
1425 for (i = 0; i < parity; i++) {
1426 abd_iter_init(&citers[i], cabds[i], 2*i);
1427 abd_iter_init(&xiters[i], tabds[i], 2*i+1);
1430 local_irq_save(flags);
1433 for (i = 0; i < parity; i++) {
1434 abd_iter_map(&citers[i]);
1435 abd_iter_map(&xiters[i]);
1436 caddrs[i] = citers[i].iter_mapaddr;
1437 xaddrs[i] = xiters[i].iter_mapaddr;
1443 len = MIN(xiters[2].iter_mapsize, len);
1444 len = MIN(citers[2].iter_mapsize, len);
1446 len = MIN(xiters[1].iter_mapsize, len);
1447 len = MIN(citers[1].iter_mapsize, len);
1449 len = MIN(xiters[0].iter_mapsize, len);
1450 len = MIN(citers[0].iter_mapsize, len);
1452 /* must be progressive */
1453 ASSERT3S(len, >, 0);
1455 * The iterated function likely will not do well if each
1456 * segment except the last one is not multiple of 512 (raidz).
1458 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1460 func_raidz_rec(xaddrs, len, caddrs, mul);
1462 for (i = parity-1; i >= 0; i--) {
1463 abd_iter_unmap(&xiters[i]);
1464 abd_iter_unmap(&citers[i]);
1465 abd_iter_advance(&xiters[i], len);
1466 abd_iter_advance(&citers[i], len);
1470 ASSERT3S(tsize, >=, 0);
1472 local_irq_restore(flags);
1475 #if defined(_KERNEL) && defined(HAVE_SPL)
1477 * bio_nr_pages for ABD.
1478 * @off is the offset in @abd
1481 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1485 if (abd_is_linear(abd))
1486 pos = (unsigned long)abd_to_buf(abd) + off;
1488 pos = abd->abd_u.abd_scatter.abd_offset + off;
1490 return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
1491 (pos >> PAGE_SHIFT);
1495 * bio_map for scatter ABD.
1496 * @off is the offset in @abd
1497 * Remaining IO size is returned
1500 abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
1501 unsigned int io_size, size_t off)
1504 struct abd_iter aiter;
1506 ASSERT(!abd_is_linear(abd));
1507 ASSERT3U(io_size, <=, abd->abd_size - off);
1509 abd_iter_init(&aiter, abd, 0);
1510 abd_iter_advance(&aiter, off);
1512 for (i = 0; i < bio->bi_max_vecs; i++) {
1514 size_t len, sgoff, pgoff;
1515 struct scatterlist *sg;
1521 sgoff = aiter.iter_offset;
1522 pgoff = sgoff & (PAGESIZE - 1);
1523 len = MIN(io_size, PAGESIZE - pgoff);
1526 pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1527 if (bio_add_page(bio, pg, len, pgoff) != len)
1531 abd_iter_advance(&aiter, len);
1537 /* Tunable Parameters */
1538 module_param(zfs_abd_scatter_enabled, int, 0644);
1539 MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1540 "Toggle whether ABD allocations must be linear.");
1542 module_param(zfs_abd_scatter_max_order, uint, 0644);
1543 MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1544 "Maximum order allocation used for a scatter ABD.");