1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers ubuf and kbuf alike */
20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \
21 size_t __maybe_unused off = 0; \
23 base = __p + i->iov_offset; \
25 i->iov_offset += len; \
29 /* covers iovec and kvec alike */
30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
32 size_t skip = i->iov_offset; \
34 len = min(n, __p->iov_len - skip); \
36 base = __p->iov_base + skip; \
41 if (skip < __p->iov_len) \
47 i->iov_offset = skip; \
51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
53 unsigned skip = i->iov_offset; \
55 unsigned offset = p->bv_offset + skip; \
57 void *kaddr = kmap_local_page(p->bv_page + \
58 offset / PAGE_SIZE); \
59 base = kaddr + offset % PAGE_SIZE; \
60 len = min(min(n, (size_t)(p->bv_len - skip)), \
61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
63 kunmap_local(kaddr); \
67 if (skip == p->bv_len) { \
75 i->iov_offset = skip; \
79 #define iterate_xarray(i, n, base, len, __off, STEP) { \
82 struct folio *folio; \
83 loff_t start = i->xarray_start + i->iov_offset; \
84 pgoff_t index = start / PAGE_SIZE; \
85 XA_STATE(xas, i->xarray, index); \
87 len = PAGE_SIZE - offset_in_page(start); \
89 xas_for_each(&xas, folio, ULONG_MAX) { \
92 if (xas_retry(&xas, folio)) \
94 if (WARN_ON(xa_is_value(folio))) \
96 if (WARN_ON(folio_test_hugetlb(folio))) \
98 offset = offset_in_folio(folio, start + __off); \
99 while (offset < folio_size(folio)) { \
100 base = kmap_local_folio(folio, offset); \
103 kunmap_local(base); \
107 if (left || n == 0) \
115 i->iov_offset += __off; \
119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
120 if (unlikely(i->count < n)) \
123 if (likely(iter_is_ubuf(i))) { \
126 iterate_buf(i, n, base, len, off, \
128 } else if (likely(iter_is_iovec(i))) { \
129 const struct iovec *iov = i->iov; \
132 iterate_iovec(i, n, base, len, off, \
134 i->nr_segs -= iov - i->iov; \
136 } else if (iov_iter_is_bvec(i)) { \
137 const struct bio_vec *bvec = i->bvec; \
140 iterate_bvec(i, n, base, len, off, \
142 i->nr_segs -= bvec - i->bvec; \
144 } else if (iov_iter_is_kvec(i)) { \
145 const struct kvec *kvec = i->kvec; \
148 iterate_iovec(i, n, base, len, off, \
150 i->nr_segs -= kvec - i->kvec; \
152 } else if (iov_iter_is_xarray(i)) { \
155 iterate_xarray(i, n, base, len, off, \
161 #define iterate_and_advance(i, n, base, len, off, I, K) \
162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
164 static int copyout(void __user *to, const void *from, size_t n)
166 if (should_fail_usercopy())
168 if (access_ok(to, n)) {
169 instrument_copy_to_user(to, from, n);
170 n = raw_copy_to_user(to, from, n);
175 static int copyin(void *to, const void __user *from, size_t n)
177 if (should_fail_usercopy())
179 if (access_ok(from, n)) {
180 instrument_copy_from_user(to, from, n);
181 n = raw_copy_from_user(to, from, n);
186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
189 return &pipe->bufs[slot & (pipe->ring_size - 1)];
193 static bool sanity(const struct iov_iter *i)
195 struct pipe_inode_info *pipe = i->pipe;
196 unsigned int p_head = pipe->head;
197 unsigned int p_tail = pipe->tail;
198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
199 unsigned int i_head = i->head;
203 struct pipe_buffer *p;
204 if (unlikely(p_occupancy == 0))
205 goto Bad; // pipe must be non-empty
206 if (unlikely(i_head != p_head - 1))
207 goto Bad; // must be at the last buffer...
209 p = pipe_buf(pipe, i_head);
210 if (unlikely(p->offset + p->len != i->iov_offset))
211 goto Bad; // ... at the end of segment
213 if (i_head != p_head)
214 goto Bad; // must be right after the last buffer
218 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
220 p_head, p_tail, pipe->ring_size);
221 for (idx = 0; idx < pipe->ring_size; idx++)
222 printk(KERN_ERR "[%p %p %d %d]\n",
224 pipe->bufs[idx].page,
225 pipe->bufs[idx].offset,
226 pipe->bufs[idx].len);
231 #define sanity(i) true
234 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
237 struct pipe_inode_info *pipe = i->pipe;
238 struct pipe_buffer *buf;
239 unsigned int p_tail = pipe->tail;
240 unsigned int p_mask = pipe->ring_size - 1;
241 unsigned int i_head = i->head;
244 if (unlikely(bytes > i->count))
247 if (unlikely(!bytes))
254 buf = &pipe->bufs[i_head & p_mask];
256 if (offset == off && buf->page == page) {
257 /* merge with the last one */
259 i->iov_offset += bytes;
263 buf = &pipe->bufs[i_head & p_mask];
265 if (pipe_full(i_head, p_tail, pipe->max_usage))
268 buf->ops = &page_cache_pipe_buf_ops;
272 buf->offset = offset;
275 pipe->head = i_head + 1;
276 i->iov_offset = offset + bytes;
284 * fault_in_iov_iter_readable - fault in iov iterator for reading
286 * @size: maximum length
288 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
289 * @size. For each iovec, fault in each page that constitutes the iovec.
291 * Returns the number of bytes not faulted in (like copy_to_user() and
294 * Always returns 0 for non-userspace iterators.
296 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
298 if (iter_is_ubuf(i)) {
299 size_t n = min(size, iov_iter_count(i));
300 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
302 } else if (iter_is_iovec(i)) {
303 size_t count = min(size, iov_iter_count(i));
304 const struct iovec *p;
308 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
309 size_t len = min(count, p->iov_len - skip);
314 ret = fault_in_readable(p->iov_base + skip, len);
323 EXPORT_SYMBOL(fault_in_iov_iter_readable);
326 * fault_in_iov_iter_writeable - fault in iov iterator for writing
328 * @size: maximum length
330 * Faults in the iterator using get_user_pages(), i.e., without triggering
331 * hardware page faults. This is primarily useful when we already know that
332 * some or all of the pages in @i aren't in memory.
334 * Returns the number of bytes not faulted in, like copy_to_user() and
337 * Always returns 0 for non-user-space iterators.
339 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
341 if (iter_is_ubuf(i)) {
342 size_t n = min(size, iov_iter_count(i));
343 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
345 } else if (iter_is_iovec(i)) {
346 size_t count = min(size, iov_iter_count(i));
347 const struct iovec *p;
351 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
352 size_t len = min(count, p->iov_len - skip);
357 ret = fault_in_safe_writeable(p->iov_base + skip, len);
366 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
368 void iov_iter_init(struct iov_iter *i, unsigned int direction,
369 const struct iovec *iov, unsigned long nr_segs,
372 WARN_ON(direction & ~(READ | WRITE));
373 *i = (struct iov_iter) {
374 .iter_type = ITER_IOVEC,
377 .data_source = direction,
384 EXPORT_SYMBOL(iov_iter_init);
386 static inline bool allocated(struct pipe_buffer *buf)
388 return buf->ops == &default_pipe_buf_ops;
391 static inline void data_start(const struct iov_iter *i,
392 unsigned int *iter_headp, size_t *offp)
394 unsigned int iter_head = i->head;
395 size_t off = i->iov_offset;
397 if (off && (!allocated(pipe_buf(i->pipe, iter_head)) ||
402 *iter_headp = iter_head;
406 static size_t push_pipe(struct iov_iter *i, size_t size,
407 int *iter_headp, size_t *offp)
409 struct pipe_inode_info *pipe = i->pipe;
410 unsigned int p_tail = pipe->tail;
411 unsigned int p_mask = pipe->ring_size - 1;
412 unsigned int iter_head;
416 if (unlikely(size > i->count))
422 data_start(i, &iter_head, &off);
423 *iter_headp = iter_head;
426 left -= PAGE_SIZE - off;
428 pipe->bufs[iter_head & p_mask].len += size;
431 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
434 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
435 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
436 struct page *page = alloc_page(GFP_USER);
440 buf->ops = &default_pipe_buf_ops;
444 buf->len = min_t(ssize_t, left, PAGE_SIZE);
447 pipe->head = iter_head;
455 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
458 struct pipe_inode_info *pipe = i->pipe;
459 unsigned int p_mask = pipe->ring_size - 1;
466 bytes = n = push_pipe(i, bytes, &i_head, &off);
470 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
471 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
473 i->iov_offset = off + chunk;
483 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
484 __wsum sum, size_t off)
486 __wsum next = csum_partial_copy_nocheck(from, to, len);
487 return csum_block_add(sum, next, off);
490 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
491 struct iov_iter *i, __wsum *sump)
493 struct pipe_inode_info *pipe = i->pipe;
494 unsigned int p_mask = pipe->ring_size - 1;
503 bytes = push_pipe(i, bytes, &i_head, &r);
505 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
506 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
507 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
510 i->iov_offset = r + chunk;
521 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
523 if (unlikely(iov_iter_is_pipe(i)))
524 return copy_pipe_to_iter(addr, bytes, i);
525 if (user_backed_iter(i))
527 iterate_and_advance(i, bytes, base, len, off,
528 copyout(base, addr + off, len),
529 memcpy(base, addr + off, len)
534 EXPORT_SYMBOL(_copy_to_iter);
536 #ifdef CONFIG_ARCH_HAS_COPY_MC
537 static int copyout_mc(void __user *to, const void *from, size_t n)
539 if (access_ok(to, n)) {
540 instrument_copy_to_user(to, from, n);
541 n = copy_mc_to_user((__force void *) to, from, n);
546 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
549 struct pipe_inode_info *pipe = i->pipe;
550 unsigned int p_mask = pipe->ring_size - 1;
552 unsigned int valid = pipe->head;
553 size_t n, off, xfer = 0;
558 n = push_pipe(i, bytes, &i_head, &off);
560 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
561 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
563 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
568 i->iov_offset = off + chunk;
573 pipe->bufs[i_head & p_mask].len -= rem;
574 pipe_discard_from(pipe, valid);
586 * _copy_mc_to_iter - copy to iter with source memory error exception handling
587 * @addr: source kernel address
588 * @bytes: total transfer length
589 * @i: destination iterator
591 * The pmem driver deploys this for the dax operation
592 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
593 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
594 * successfully copied.
596 * The main differences between this and typical _copy_to_iter().
598 * * Typical tail/residue handling after a fault retries the copy
599 * byte-by-byte until the fault happens again. Re-triggering machine
600 * checks is potentially fatal so the implementation uses source
601 * alignment and poison alignment assumptions to avoid re-triggering
602 * hardware exceptions.
604 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
605 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
608 * Return: number of bytes copied (may be %0)
610 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
612 if (unlikely(iov_iter_is_pipe(i)))
613 return copy_mc_pipe_to_iter(addr, bytes, i);
614 if (user_backed_iter(i))
616 __iterate_and_advance(i, bytes, base, len, off,
617 copyout_mc(base, addr + off, len),
618 copy_mc_to_kernel(base, addr + off, len)
623 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
624 #endif /* CONFIG_ARCH_HAS_COPY_MC */
626 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
628 if (unlikely(iov_iter_is_pipe(i))) {
632 if (user_backed_iter(i))
634 iterate_and_advance(i, bytes, base, len, off,
635 copyin(addr + off, base, len),
636 memcpy(addr + off, base, len)
641 EXPORT_SYMBOL(_copy_from_iter);
643 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
645 if (unlikely(iov_iter_is_pipe(i))) {
649 iterate_and_advance(i, bytes, base, len, off,
650 __copy_from_user_inatomic_nocache(addr + off, base, len),
651 memcpy(addr + off, base, len)
656 EXPORT_SYMBOL(_copy_from_iter_nocache);
658 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
660 * _copy_from_iter_flushcache - write destination through cpu cache
661 * @addr: destination kernel address
662 * @bytes: total transfer length
663 * @i: source iterator
665 * The pmem driver arranges for filesystem-dax to use this facility via
666 * dax_copy_from_iter() for ensuring that writes to persistent memory
667 * are flushed through the CPU cache. It is differentiated from
668 * _copy_from_iter_nocache() in that guarantees all data is flushed for
669 * all iterator types. The _copy_from_iter_nocache() only attempts to
670 * bypass the cache for the ITER_IOVEC case, and on some archs may use
671 * instructions that strand dirty-data in the cache.
673 * Return: number of bytes copied (may be %0)
675 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
677 if (unlikely(iov_iter_is_pipe(i))) {
681 iterate_and_advance(i, bytes, base, len, off,
682 __copy_from_user_flushcache(addr + off, base, len),
683 memcpy_flushcache(addr + off, base, len)
688 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
691 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
694 size_t v = n + offset;
697 * The general case needs to access the page order in order
698 * to compute the page size.
699 * However, we mostly deal with order-0 pages and thus can
700 * avoid a possible cache line miss for requests that fit all
703 if (n <= v && v <= PAGE_SIZE)
706 head = compound_head(page);
707 v += (page - head) << PAGE_SHIFT;
709 if (likely(n <= v && v <= (page_size(head))))
715 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
718 if (unlikely(iov_iter_is_pipe(i))) {
719 return copy_page_to_iter_pipe(page, offset, bytes, i);
721 void *kaddr = kmap_local_page(page);
722 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
728 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
732 if (unlikely(!page_copy_sane(page, offset, bytes)))
734 page += offset / PAGE_SIZE; // first subpage
737 size_t n = __copy_page_to_iter(page, offset,
738 min(bytes, (size_t)PAGE_SIZE - offset), i);
744 if (offset == PAGE_SIZE) {
751 EXPORT_SYMBOL(copy_page_to_iter);
753 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
756 if (page_copy_sane(page, offset, bytes)) {
757 void *kaddr = kmap_local_page(page);
758 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
764 EXPORT_SYMBOL(copy_page_from_iter);
766 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
768 struct pipe_inode_info *pipe = i->pipe;
769 unsigned int p_mask = pipe->ring_size - 1;
776 bytes = n = push_pipe(i, bytes, &i_head, &off);
781 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
782 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
783 memset(p + off, 0, chunk);
786 i->iov_offset = off + chunk;
795 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
797 if (unlikely(iov_iter_is_pipe(i)))
798 return pipe_zero(bytes, i);
799 iterate_and_advance(i, bytes, base, len, count,
800 clear_user(base, len),
806 EXPORT_SYMBOL(iov_iter_zero);
808 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
811 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
812 if (unlikely(!page_copy_sane(page, offset, bytes))) {
813 kunmap_atomic(kaddr);
816 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
817 kunmap_atomic(kaddr);
821 iterate_and_advance(i, bytes, base, len, off,
822 copyin(p + off, base, len),
823 memcpy(p + off, base, len)
825 kunmap_atomic(kaddr);
828 EXPORT_SYMBOL(copy_page_from_iter_atomic);
830 static inline void pipe_truncate(struct iov_iter *i)
832 struct pipe_inode_info *pipe = i->pipe;
833 unsigned int p_tail = pipe->tail;
834 unsigned int p_head = pipe->head;
835 unsigned int p_mask = pipe->ring_size - 1;
837 if (!pipe_empty(p_head, p_tail)) {
838 struct pipe_buffer *buf;
839 unsigned int i_head = i->head;
840 size_t off = i->iov_offset;
843 buf = &pipe->bufs[i_head & p_mask];
844 buf->len = off - buf->offset;
847 while (p_head != i_head) {
849 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
856 static void pipe_advance(struct iov_iter *i, size_t size)
858 struct pipe_inode_info *pipe = i->pipe;
860 struct pipe_buffer *buf;
861 unsigned int p_mask = pipe->ring_size - 1;
862 unsigned int i_head = i->head;
863 size_t off = i->iov_offset, left = size;
865 if (off) /* make it relative to the beginning of buffer */
866 left += off - pipe->bufs[i_head & p_mask].offset;
868 buf = &pipe->bufs[i_head & p_mask];
869 if (left <= buf->len)
875 i->iov_offset = buf->offset + left;
878 /* ... and discard everything past that point */
882 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
884 const struct bio_vec *bvec, *end;
890 size += i->iov_offset;
892 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
893 if (likely(size < bvec->bv_len))
895 size -= bvec->bv_len;
897 i->iov_offset = size;
898 i->nr_segs -= bvec - i->bvec;
902 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
904 const struct iovec *iov, *end;
910 size += i->iov_offset; // from beginning of current segment
911 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
912 if (likely(size < iov->iov_len))
914 size -= iov->iov_len;
916 i->iov_offset = size;
917 i->nr_segs -= iov - i->iov;
921 void iov_iter_advance(struct iov_iter *i, size_t size)
923 if (unlikely(i->count < size))
925 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
926 i->iov_offset += size;
928 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
929 /* iovec and kvec have identical layouts */
930 iov_iter_iovec_advance(i, size);
931 } else if (iov_iter_is_bvec(i)) {
932 iov_iter_bvec_advance(i, size);
933 } else if (iov_iter_is_pipe(i)) {
934 pipe_advance(i, size);
935 } else if (iov_iter_is_discard(i)) {
939 EXPORT_SYMBOL(iov_iter_advance);
941 void iov_iter_revert(struct iov_iter *i, size_t unroll)
945 if (WARN_ON(unroll > MAX_RW_COUNT))
948 if (unlikely(iov_iter_is_pipe(i))) {
949 struct pipe_inode_info *pipe = i->pipe;
950 unsigned int p_mask = pipe->ring_size - 1;
951 unsigned int i_head = i->head;
952 size_t off = i->iov_offset;
954 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
955 size_t n = off - b->offset;
961 if (!unroll && i_head == i->start_head) {
966 b = &pipe->bufs[i_head & p_mask];
967 off = b->offset + b->len;
974 if (unlikely(iov_iter_is_discard(i)))
976 if (unroll <= i->iov_offset) {
977 i->iov_offset -= unroll;
980 unroll -= i->iov_offset;
981 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
982 BUG(); /* We should never go beyond the start of the specified
983 * range since we might then be straying into pages that
986 } else if (iov_iter_is_bvec(i)) {
987 const struct bio_vec *bvec = i->bvec;
989 size_t n = (--bvec)->bv_len;
993 i->iov_offset = n - unroll;
998 } else { /* same logics for iovec and kvec */
999 const struct iovec *iov = i->iov;
1001 size_t n = (--iov)->iov_len;
1005 i->iov_offset = n - unroll;
1012 EXPORT_SYMBOL(iov_iter_revert);
1015 * Return the count of just the current iov_iter segment.
1017 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1019 if (i->nr_segs > 1) {
1020 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1021 return min(i->count, i->iov->iov_len - i->iov_offset);
1022 if (iov_iter_is_bvec(i))
1023 return min(i->count, i->bvec->bv_len - i->iov_offset);
1027 EXPORT_SYMBOL(iov_iter_single_seg_count);
1029 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1030 const struct kvec *kvec, unsigned long nr_segs,
1033 WARN_ON(direction & ~(READ | WRITE));
1034 *i = (struct iov_iter){
1035 .iter_type = ITER_KVEC,
1036 .data_source = direction,
1043 EXPORT_SYMBOL(iov_iter_kvec);
1045 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1046 const struct bio_vec *bvec, unsigned long nr_segs,
1049 WARN_ON(direction & ~(READ | WRITE));
1050 *i = (struct iov_iter){
1051 .iter_type = ITER_BVEC,
1052 .data_source = direction,
1059 EXPORT_SYMBOL(iov_iter_bvec);
1061 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1062 struct pipe_inode_info *pipe,
1065 BUG_ON(direction != READ);
1066 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1067 *i = (struct iov_iter){
1068 .iter_type = ITER_PIPE,
1069 .data_source = false,
1072 .start_head = pipe->head,
1077 EXPORT_SYMBOL(iov_iter_pipe);
1080 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1081 * @i: The iterator to initialise.
1082 * @direction: The direction of the transfer.
1083 * @xarray: The xarray to access.
1084 * @start: The start file position.
1085 * @count: The size of the I/O buffer in bytes.
1087 * Set up an I/O iterator to either draw data out of the pages attached to an
1088 * inode or to inject data into those pages. The pages *must* be prevented
1089 * from evaporation, either by taking a ref on them or locking them by the
1092 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1093 struct xarray *xarray, loff_t start, size_t count)
1095 BUG_ON(direction & ~1);
1096 *i = (struct iov_iter) {
1097 .iter_type = ITER_XARRAY,
1098 .data_source = direction,
1100 .xarray_start = start,
1105 EXPORT_SYMBOL(iov_iter_xarray);
1108 * iov_iter_discard - Initialise an I/O iterator that discards data
1109 * @i: The iterator to initialise.
1110 * @direction: The direction of the transfer.
1111 * @count: The size of the I/O buffer in bytes.
1113 * Set up an I/O iterator that just discards everything that's written to it.
1114 * It's only available as a READ iterator.
1116 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1118 BUG_ON(direction != READ);
1119 *i = (struct iov_iter){
1120 .iter_type = ITER_DISCARD,
1121 .data_source = false,
1126 EXPORT_SYMBOL(iov_iter_discard);
1128 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1131 size_t size = i->count;
1132 size_t skip = i->iov_offset;
1135 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1136 size_t len = i->iov[k].iov_len - skip;
1142 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1152 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1155 size_t size = i->count;
1156 unsigned skip = i->iov_offset;
1159 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1160 size_t len = i->bvec[k].bv_len - skip;
1166 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1177 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1178 * are aligned to the parameters.
1180 * @i: &struct iov_iter to restore
1181 * @addr_mask: bit mask to check against the iov element's addresses
1182 * @len_mask: bit mask to check against the iov element's lengths
1184 * Return: false if any addresses or lengths intersect with the provided masks
1186 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1189 if (likely(iter_is_ubuf(i))) {
1190 if (i->count & len_mask)
1192 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1197 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1198 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1200 if (iov_iter_is_bvec(i))
1201 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1203 if (iov_iter_is_pipe(i)) {
1204 unsigned int p_mask = i->pipe->ring_size - 1;
1205 size_t size = i->count;
1207 if (size & len_mask)
1209 if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
1210 if (i->iov_offset & addr_mask)
1217 if (iov_iter_is_xarray(i)) {
1218 if (i->count & len_mask)
1220 if ((i->xarray_start + i->iov_offset) & addr_mask)
1226 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1228 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1230 unsigned long res = 0;
1231 size_t size = i->count;
1232 size_t skip = i->iov_offset;
1235 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1236 size_t len = i->iov[k].iov_len - skip;
1238 res |= (unsigned long)i->iov[k].iov_base + skip;
1250 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1253 size_t size = i->count;
1254 unsigned skip = i->iov_offset;
1257 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1258 size_t len = i->bvec[k].bv_len - skip;
1259 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1270 unsigned long iov_iter_alignment(const struct iov_iter *i)
1272 if (likely(iter_is_ubuf(i))) {
1273 size_t size = i->count;
1275 return ((unsigned long)i->ubuf + i->iov_offset) | size;
1279 /* iovec and kvec have identical layouts */
1280 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1281 return iov_iter_alignment_iovec(i);
1283 if (iov_iter_is_bvec(i))
1284 return iov_iter_alignment_bvec(i);
1286 if (iov_iter_is_pipe(i)) {
1287 size_t size = i->count;
1289 if (size && i->iov_offset && allocated(pipe_buf(i->pipe, i->head)))
1290 return size | i->iov_offset;
1294 if (iov_iter_is_xarray(i))
1295 return (i->xarray_start + i->iov_offset) | i->count;
1299 EXPORT_SYMBOL(iov_iter_alignment);
1301 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1303 unsigned long res = 0;
1304 unsigned long v = 0;
1305 size_t size = i->count;
1308 if (iter_is_ubuf(i))
1311 if (WARN_ON(!iter_is_iovec(i)))
1314 for (k = 0; k < i->nr_segs; k++) {
1315 if (i->iov[k].iov_len) {
1316 unsigned long base = (unsigned long)i->iov[k].iov_base;
1317 if (v) // if not the first one
1318 res |= base | v; // this start | previous end
1319 v = base + i->iov[k].iov_len;
1320 if (size <= i->iov[k].iov_len)
1322 size -= i->iov[k].iov_len;
1327 EXPORT_SYMBOL(iov_iter_gap_alignment);
1329 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1331 struct page **pages,
1335 struct pipe_inode_info *pipe = i->pipe;
1336 unsigned int p_mask = pipe->ring_size - 1;
1337 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1344 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1352 static ssize_t pipe_get_pages(struct iov_iter *i,
1353 struct page **pages, size_t maxsize, unsigned maxpages,
1356 unsigned int iter_head, npages;
1362 data_start(i, &iter_head, start);
1363 /* Amount of free space: some of this one + all after this one */
1364 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1365 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1367 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1370 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1371 pgoff_t index, unsigned int nr_pages)
1373 XA_STATE(xas, xa, index);
1375 unsigned int ret = 0;
1378 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1379 if (xas_retry(&xas, page))
1382 /* Has the page moved or been split? */
1383 if (unlikely(page != xas_reload(&xas))) {
1388 pages[ret] = find_subpage(page, xas.xa_index);
1389 get_page(pages[ret]);
1390 if (++ret == nr_pages)
1397 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1398 struct page **pages, size_t maxsize,
1399 unsigned maxpages, size_t *_start_offset)
1401 unsigned nr, offset;
1402 pgoff_t index, count;
1403 size_t size = maxsize;
1406 if (!size || !maxpages)
1409 pos = i->xarray_start + i->iov_offset;
1410 index = pos >> PAGE_SHIFT;
1411 offset = pos & ~PAGE_MASK;
1412 *_start_offset = offset;
1415 if (size > PAGE_SIZE - offset) {
1416 size -= PAGE_SIZE - offset;
1417 count += size >> PAGE_SHIFT;
1423 if (count > maxpages)
1426 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1430 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1433 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1434 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1439 if (iter_is_ubuf(i))
1440 return (unsigned long)i->ubuf + i->iov_offset;
1442 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1443 size_t len = i->iov[k].iov_len - skip;
1449 return (unsigned long)i->iov[k].iov_base + skip;
1451 BUG(); // if it had been empty, we wouldn't get called
1454 /* must be done on non-empty ITER_BVEC one */
1455 static struct page *first_bvec_segment(const struct iov_iter *i,
1456 size_t *size, size_t *start)
1459 size_t skip = i->iov_offset, len;
1461 len = i->bvec->bv_len - skip;
1464 skip += i->bvec->bv_offset;
1465 page = i->bvec->bv_page + skip / PAGE_SIZE;
1466 *start = skip % PAGE_SIZE;
1470 ssize_t iov_iter_get_pages(struct iov_iter *i,
1471 struct page **pages, size_t maxsize, unsigned maxpages,
1476 if (maxsize > i->count)
1480 if (maxsize > MAX_RW_COUNT)
1481 maxsize = MAX_RW_COUNT;
1483 if (likely(user_backed_iter(i))) {
1484 unsigned int gup_flags = 0;
1487 if (iov_iter_rw(i) != WRITE)
1488 gup_flags |= FOLL_WRITE;
1490 gup_flags |= FOLL_NOFAULT;
1492 addr = first_iovec_segment(i, &maxsize);
1493 *start = addr % PAGE_SIZE;
1495 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1498 res = get_user_pages_fast(addr, n, gup_flags, pages);
1499 if (unlikely(res <= 0))
1501 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1503 if (iov_iter_is_bvec(i)) {
1506 page = first_bvec_segment(i, &maxsize, start);
1507 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1510 for (int k = 0; k < n; k++)
1511 get_page(*pages++ = page++);
1512 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1514 if (iov_iter_is_pipe(i))
1515 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1516 if (iov_iter_is_xarray(i))
1517 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1520 EXPORT_SYMBOL(iov_iter_get_pages);
1522 static struct page **get_pages_array(size_t n)
1524 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1527 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1528 struct page ***pages, size_t maxsize,
1532 unsigned int iter_head, npages;
1538 data_start(i, &iter_head, start);
1539 /* Amount of free space: some of this one + all after this one */
1540 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1541 n = npages * PAGE_SIZE - *start;
1545 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1546 p = get_pages_array(npages);
1549 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1557 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1558 struct page ***pages, size_t maxsize,
1559 size_t *_start_offset)
1562 unsigned nr, offset;
1563 pgoff_t index, count;
1564 size_t size = maxsize;
1570 pos = i->xarray_start + i->iov_offset;
1571 index = pos >> PAGE_SHIFT;
1572 offset = pos & ~PAGE_MASK;
1573 *_start_offset = offset;
1576 if (size > PAGE_SIZE - offset) {
1577 size -= PAGE_SIZE - offset;
1578 count += size >> PAGE_SHIFT;
1584 p = get_pages_array(count);
1589 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1593 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1596 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1597 struct page ***pages, size_t maxsize,
1603 if (maxsize > i->count)
1607 if (maxsize > MAX_RW_COUNT)
1608 maxsize = MAX_RW_COUNT;
1610 if (likely(user_backed_iter(i))) {
1611 unsigned int gup_flags = 0;
1614 if (iov_iter_rw(i) != WRITE)
1615 gup_flags |= FOLL_WRITE;
1617 gup_flags |= FOLL_NOFAULT;
1619 addr = first_iovec_segment(i, &maxsize);
1620 *start = addr % PAGE_SIZE;
1622 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1623 p = get_pages_array(n);
1626 res = get_user_pages_fast(addr, n, gup_flags, p);
1627 if (unlikely(res <= 0)) {
1633 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1635 if (iov_iter_is_bvec(i)) {
1638 page = first_bvec_segment(i, &maxsize, start);
1639 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1640 *pages = p = get_pages_array(n);
1643 for (int k = 0; k < n; k++)
1644 get_page(*p++ = page++);
1645 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1647 if (iov_iter_is_pipe(i))
1648 return pipe_get_pages_alloc(i, pages, maxsize, start);
1649 if (iov_iter_is_xarray(i))
1650 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1653 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1655 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1660 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1664 iterate_and_advance(i, bytes, base, len, off, ({
1665 next = csum_and_copy_from_user(base, addr + off, len);
1666 sum = csum_block_add(sum, next, off);
1669 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1675 EXPORT_SYMBOL(csum_and_copy_from_iter);
1677 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1680 struct csum_state *csstate = _csstate;
1683 if (unlikely(iov_iter_is_discard(i))) {
1684 WARN_ON(1); /* for now */
1688 sum = csum_shift(csstate->csum, csstate->off);
1689 if (unlikely(iov_iter_is_pipe(i)))
1690 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1691 else iterate_and_advance(i, bytes, base, len, off, ({
1692 next = csum_and_copy_to_user(addr + off, base, len);
1693 sum = csum_block_add(sum, next, off);
1696 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1699 csstate->csum = csum_shift(sum, csstate->off);
1700 csstate->off += bytes;
1703 EXPORT_SYMBOL(csum_and_copy_to_iter);
1705 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1708 #ifdef CONFIG_CRYPTO_HASH
1709 struct ahash_request *hash = hashp;
1710 struct scatterlist sg;
1713 copied = copy_to_iter(addr, bytes, i);
1714 sg_init_one(&sg, addr, copied);
1715 ahash_request_set_crypt(hash, &sg, NULL, copied);
1716 crypto_ahash_update(hash);
1722 EXPORT_SYMBOL(hash_and_copy_to_iter);
1724 static int iov_npages(const struct iov_iter *i, int maxpages)
1726 size_t skip = i->iov_offset, size = i->count;
1727 const struct iovec *p;
1730 for (p = i->iov; size; skip = 0, p++) {
1731 unsigned offs = offset_in_page(p->iov_base + skip);
1732 size_t len = min(p->iov_len - skip, size);
1736 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1737 if (unlikely(npages > maxpages))
1744 static int bvec_npages(const struct iov_iter *i, int maxpages)
1746 size_t skip = i->iov_offset, size = i->count;
1747 const struct bio_vec *p;
1750 for (p = i->bvec; size; skip = 0, p++) {
1751 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1752 size_t len = min(p->bv_len - skip, size);
1755 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1756 if (unlikely(npages > maxpages))
1762 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1764 if (unlikely(!i->count))
1766 if (likely(iter_is_ubuf(i))) {
1767 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1768 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1769 return min(npages, maxpages);
1771 /* iovec and kvec have identical layouts */
1772 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1773 return iov_npages(i, maxpages);
1774 if (iov_iter_is_bvec(i))
1775 return bvec_npages(i, maxpages);
1776 if (iov_iter_is_pipe(i)) {
1777 unsigned int iter_head;
1784 data_start(i, &iter_head, &off);
1785 /* some of this one + all after this one */
1786 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1787 return min(npages, maxpages);
1789 if (iov_iter_is_xarray(i)) {
1790 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1791 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1792 return min(npages, maxpages);
1796 EXPORT_SYMBOL(iov_iter_npages);
1798 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1801 if (unlikely(iov_iter_is_pipe(new))) {
1805 if (iov_iter_is_bvec(new))
1806 return new->bvec = kmemdup(new->bvec,
1807 new->nr_segs * sizeof(struct bio_vec),
1809 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1810 /* iovec and kvec have identical layout */
1811 return new->iov = kmemdup(new->iov,
1812 new->nr_segs * sizeof(struct iovec),
1816 EXPORT_SYMBOL(dup_iter);
1818 static int copy_compat_iovec_from_user(struct iovec *iov,
1819 const struct iovec __user *uvec, unsigned long nr_segs)
1821 const struct compat_iovec __user *uiov =
1822 (const struct compat_iovec __user *)uvec;
1823 int ret = -EFAULT, i;
1825 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1828 for (i = 0; i < nr_segs; i++) {
1832 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1833 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1835 /* check for compat_size_t not fitting in compat_ssize_t .. */
1840 iov[i].iov_base = compat_ptr(buf);
1841 iov[i].iov_len = len;
1850 static int copy_iovec_from_user(struct iovec *iov,
1851 const struct iovec __user *uvec, unsigned long nr_segs)
1855 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1857 for (seg = 0; seg < nr_segs; seg++) {
1858 if ((ssize_t)iov[seg].iov_len < 0)
1865 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1866 unsigned long nr_segs, unsigned long fast_segs,
1867 struct iovec *fast_iov, bool compat)
1869 struct iovec *iov = fast_iov;
1873 * SuS says "The readv() function *may* fail if the iovcnt argument was
1874 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1875 * traditionally returned zero for zero segments, so...
1879 if (nr_segs > UIO_MAXIOV)
1880 return ERR_PTR(-EINVAL);
1881 if (nr_segs > fast_segs) {
1882 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1884 return ERR_PTR(-ENOMEM);
1888 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1890 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1892 if (iov != fast_iov)
1894 return ERR_PTR(ret);
1900 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1901 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1902 struct iov_iter *i, bool compat)
1904 ssize_t total_len = 0;
1908 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1911 return PTR_ERR(iov);
1915 * According to the Single Unix Specification we should return EINVAL if
1916 * an element length is < 0 when cast to ssize_t or if the total length
1917 * would overflow the ssize_t return value of the system call.
1919 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1922 for (seg = 0; seg < nr_segs; seg++) {
1923 ssize_t len = (ssize_t)iov[seg].iov_len;
1925 if (!access_ok(iov[seg].iov_base, len)) {
1932 if (len > MAX_RW_COUNT - total_len) {
1933 len = MAX_RW_COUNT - total_len;
1934 iov[seg].iov_len = len;
1939 iov_iter_init(i, type, iov, nr_segs, total_len);
1948 * import_iovec() - Copy an array of &struct iovec from userspace
1949 * into the kernel, check that it is valid, and initialize a new
1950 * &struct iov_iter iterator to access it.
1952 * @type: One of %READ or %WRITE.
1953 * @uvec: Pointer to the userspace array.
1954 * @nr_segs: Number of elements in userspace array.
1955 * @fast_segs: Number of elements in @iov.
1956 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1957 * on-stack) kernel array.
1958 * @i: Pointer to iterator that will be initialized on success.
1960 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1961 * then this function places %NULL in *@iov on return. Otherwise, a new
1962 * array will be allocated and the result placed in *@iov. This means that
1963 * the caller may call kfree() on *@iov regardless of whether the small
1964 * on-stack array was used or not (and regardless of whether this function
1965 * returns an error or not).
1967 * Return: Negative error code on error, bytes imported on success
1969 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1970 unsigned nr_segs, unsigned fast_segs,
1971 struct iovec **iovp, struct iov_iter *i)
1973 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1974 in_compat_syscall());
1976 EXPORT_SYMBOL(import_iovec);
1978 int import_single_range(int rw, void __user *buf, size_t len,
1979 struct iovec *iov, struct iov_iter *i)
1981 if (len > MAX_RW_COUNT)
1983 if (unlikely(!access_ok(buf, len)))
1986 iov->iov_base = buf;
1988 iov_iter_init(i, rw, iov, 1, len);
1991 EXPORT_SYMBOL(import_single_range);
1994 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1995 * iov_iter_save_state() was called.
1997 * @i: &struct iov_iter to restore
1998 * @state: state to restore from
2000 * Used after iov_iter_save_state() to bring restore @i, if operations may
2003 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2005 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2007 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
2008 !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
2010 i->iov_offset = state->iov_offset;
2011 i->count = state->count;
2012 if (iter_is_ubuf(i))
2015 * For the *vec iters, nr_segs + iov is constant - if we increment
2016 * the vec, then we also decrement the nr_segs count. Hence we don't
2017 * need to track both of these, just one is enough and we can deduct
2018 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2019 * size, so we can just increment the iov pointer as they are unionzed.
2020 * ITER_BVEC _may_ be the same size on some archs, but on others it is
2021 * not. Be safe and handle it separately.
2023 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2024 if (iov_iter_is_bvec(i))
2025 i->bvec -= state->nr_segs - i->nr_segs;
2027 i->iov -= state->nr_segs - i->nr_segs;
2028 i->nr_segs = state->nr_segs;