2 * Copyright (c) 2004 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/kern_xio.c,v 1.5 2004/05/13 17:40:15 dillon Exp $
29 * Kernel XIO interface. An initialized XIO is basically a collection of
30 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
31 * can represent userspace or kernelspace buffers, and can be passed to
32 * foreign threads outside of the originating vmspace. XIO buffers are
33 * not mapped into KVM and thus can be manipulated and passed around with
36 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
37 * places that need to pass (possibly userspace) data between threads.
39 * TODO: check for busy page when modifying, check writeable.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
46 #include <sys/vmmeter.h>
47 #include <sys/vnode.h>
49 #include <sys/sfbuf.h>
52 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_page2.h>
65 * Initialize an XIO given a userspace buffer. 0 is returned on success,
66 * an error code on failure. The actual number of bytes that could be
67 * accomodated in the XIO will be stored in xio_bytes.
69 * Note that you cannot legally accessed a previously cached linmap with
70 * a newly initialized xio until after calling xio_linmap().
73 xio_init_ubuf(xio_t xio, void *ubase, size_t ubytes, int flags)
83 addr = trunc_page((vm_offset_t)ubase);
84 xio->xio_flags = flags;
91 vmprot = (flags & XIOF_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
92 xio->xio_offset = (vm_offset_t)ubase & PAGE_MASK;
93 xio->xio_pages = xio->xio_internal_pages;
94 if ((n = PAGE_SIZE - xio->xio_offset) > ubytes)
96 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
97 if (vm_fault_quick((caddr_t)addr, vmprot) < 0)
99 if ((paddr = pmap_kextract(addr)) == 0)
102 m = PHYS_TO_VM_PAGE(paddr);
105 xio->xio_pages[i] = m;
108 if ((n = ubytes) > PAGE_SIZE)
115 * If a failure occured clean out what we loaded and return EFAULT.
116 * Return 0 on success.
118 if (i < XIO_INTERNAL_PAGES && n) {
120 xio->xio_error = EFAULT;
123 return(xio->xio_error);
127 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
128 * an error code on failure. The actual number of bytes that could be
129 * accomodated in the XIO will be stored in xio_bytes.
131 * vmprot is usually either VM_PROT_READ or VM_PROT_WRITE.
133 * Note that you cannot legally accessed a previously cached linmap with
134 * a newly initialized xio until after calling xio_linmap().
137 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
146 addr = trunc_page((vm_offset_t)kbase);
148 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
150 xio->xio_pages = xio->xio_internal_pages;
152 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
154 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
155 if ((paddr = pmap_kextract(addr)) == 0)
158 m = PHYS_TO_VM_PAGE(paddr);
161 xio->xio_pages[i] = m;
164 if ((n = kbytes) > PAGE_SIZE)
171 * If a failure occured clean out what we loaded and return EFAULT.
172 * Return 0 on success.
174 if (i < XIO_INTERNAL_PAGES && n) {
176 xio->xio_error = EFAULT;
178 return(xio->xio_error);
182 * Cleanup an XIO so it can be destroyed. The pages associated with the
183 * XIO are released. If a linear mapping buffer is active, it will be
184 * unlocked but the mappings will be left intact for optimal reconstitution
185 * in a later xio_linmap() call.
187 * Note that you cannot legally accessed the linmap on a released XIO.
190 xio_release(xio_t xio)
197 for (i = 0; i < xio->xio_npages; ++i) {
198 m = xio->xio_pages[i];
202 if (xio->xio_flags & XIOF_LINMAP) {
203 xio->xio_flags &= ~XIOF_LINMAP;
209 xio->xio_error = ENOBUFS;
213 * Copy data between an XIO and a UIO. If the UIO represents userspace it
214 * must be relative to the current context. Both the UIO and the XIO are
215 * modified, but the XIO's pages are not released when exhausted.
217 * UIO_READ xio -> uio
218 * UIO_WRITE uio -> xio
221 xio_uio_copy(xio_t xio, struct uio *uio, int *sizep)
226 if ((bytes = xio->xio_bytes) > uio->uio_resid)
227 bytes = uio->uio_resid;
228 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset, bytes, uio);
230 xio->xio_bytes -= bytes;
231 xio->xio_offset += bytes;
240 * Copy the specified number of bytes from the xio to a userland
241 * buffer. Return an error code or 0 on success.
243 * The XIO is modified, but the XIO's pages are not released when exhausted.
246 xio_copy_xtou(xio_t xio, void *uptr, int bytes)
255 if (bytes > xio->xio_bytes)
258 offset = xio->xio_offset & PAGE_MASK;
259 if ((n = PAGE_SIZE - offset) > bytes)
263 for (i = xio->xio_offset >> PAGE_SHIFT; i < xio->xio_npages; ++i) {
264 m = xio->xio_pages[i];
265 sf = sf_buf_alloc(m, SFBA_QUICK);
266 error = copyout((char *)sf_buf_kva(sf) + offset, uptr, n);
272 xio->xio_offset += n;
273 uptr = (char *)uptr + n;
276 if ((n = bytes) > PAGE_SIZE)
284 * Copy the specified number of bytes from the xio to a kernel
285 * buffer. Return an error code or 0 on success.
287 * The XIO is modified, but the XIO's pages are not released when exhausted.
290 xio_copy_xtok(xio_t xio, void *kptr, int bytes)
299 if (bytes > xio->xio_bytes)
302 offset = xio->xio_offset & PAGE_MASK;
303 if ((n = PAGE_SIZE - offset) > bytes)
307 for (i = xio->xio_offset >> PAGE_SHIFT; i < xio->xio_npages; ++i) {
308 m = xio->xio_pages[i];
309 sf = sf_buf_alloc(m, SFBA_QUICK);
310 bcopy((char *)sf_buf_kva(sf) + offset, kptr, n);
314 xio->xio_offset += n;
315 kptr = (char *)kptr + n;
318 if ((n = bytes) > PAGE_SIZE)