kernel - MPSAFE work - Finish tokenizing vm_page.c
[dragonfly.git] / sys / platform / vkernel64 / platform / copyio.c
CommitLineData
da673940
JG
1/*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
da673940
JG
33 */
34
35#include <sys/types.h>
36#include <sys/systm.h>
0e6594a8 37#include <cpu/lwbuf.h>
da673940
JG
38#include <vm/vm_page.h>
39#include <vm/vm_extern.h>
40#include <assert.h>
41
42#include <sys/stat.h>
43#include <sys/mman.h>
44
0e6594a8
SW
45#include <sys/mplock2.h>
46
da673940
JG
47/*
48 * A bcopy that works dring low level boot, before FP is working
49 */
50void
51ovbcopy(const void *src, void *dst, size_t len)
52{
53 bcopy(src, dst, len);
54}
55
56void
57bcopyi(const void *src, void *dst, size_t len)
58{
59 bcopy(src, dst, len);
60}
61
62int
63copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
64{
65 size_t i;
66
67 for (i = 0; i < len; ++i) {
68 if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
69 if (lencopied)
70 *lencopied = i + 1;
71 return(0);
72 }
73 }
74 return (ENAMETOOLONG);
75}
76
77/*
78 * Copies a NUL-terminated string from user space to kernel space.
79 * The number of bytes copied, including the terminator, is returned in
80 * (*res).
81 *
82 * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
83 */
84int
85copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
86{
87 int error;
88 size_t n;
89 const char *uptr = udaddr;
90 char *kptr = kaddr;
91
92 if (res)
93 *res = 0;
94 while (len) {
95 n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
96 if (n > 32)
97 n = 32;
98 if (n > len)
99 n = len;
100 if ((error = copyin(uptr, kptr, n)) != 0)
101 return(error);
102 while (n) {
103 if (res)
104 ++*res;
105 if (*kptr == 0)
106 return(0);
107 ++kptr;
108 ++uptr;
109 --n;
110 --len;
111 }
112
113 }
114 return(ENAMETOOLONG);
115}
116
117/*
118 * Copy a binary buffer from user space to kernel space.
119 *
120 * NOTE: on a real system copyin/copyout are MP safe, but the current
121 * implementation on a vkernel is not so we get the mp lock.
122 *
123 * Returns 0 on success, EFAULT on failure.
124 */
125int
126copyin(const void *udaddr, void *kaddr, size_t len)
127{
128 struct vmspace *vm = curproc->p_vmspace;
0e6594a8 129 struct lwbuf *lwb;
da673940
JG
130 vm_page_t m;
131 int error;
132 size_t n;
133
134 error = 0;
135 get_mplock();
136 while (len) {
137 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
138 VM_PROT_READ,
139 VM_FAULT_NORMAL, &error);
140 if (error)
141 break;
142 n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
143 if (n > len)
144 n = len;
0e6594a8
SW
145 lwb = lwbuf_alloc(m);
146 bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
da673940
JG
147 kaddr, n);
148 len -= n;
149 udaddr = (const char *)udaddr + n;
150 kaddr = (char *)kaddr + n;
0e6594a8 151 lwbuf_free(lwb);
573fb415 152 vm_page_unhold(m);
da673940
JG
153 }
154 rel_mplock();
155 return (error);
156}
157
158/*
159 * Copy a binary buffer from kernel space to user space.
160 *
161 * Returns 0 on success, EFAULT on failure.
162 */
163int
164copyout(const void *kaddr, void *udaddr, size_t len)
165{
166 struct vmspace *vm = curproc->p_vmspace;
0e6594a8 167 struct lwbuf *lwb;
da673940
JG
168 vm_page_t m;
169 int error;
170 size_t n;
171
172 error = 0;
173 get_mplock();
174 while (len) {
175 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
176 VM_PROT_READ|VM_PROT_WRITE,
177 VM_FAULT_NORMAL, &error);
178 if (error)
179 break;
180 n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
181 if (n > len)
182 n = len;
0e6594a8
SW
183 lwb = lwbuf_alloc(m);
184 bcopy(kaddr, (char *)lwbuf_kva(lwb) +
da673940
JG
185 ((vm_offset_t)udaddr & PAGE_MASK), n);
186 len -= n;
187 udaddr = (char *)udaddr + n;
188 kaddr = (const char *)kaddr + n;
189 vm_page_dirty(m);
0e6594a8 190 lwbuf_free(lwb);
573fb415 191 vm_page_unhold(m);
da673940
JG
192 }
193 rel_mplock();
194 return (error);
195}
196
197/*
198 * Fetch the byte at the specified user address. Returns -1 on failure.
199 */
200int
201fubyte(const void *base)
202{
203 unsigned char c;
da673940 204
c0a27981 205 if (copyin(base, &c, 1) == 0)
da673940
JG
206 return((int)c);
207 return(-1);
208}
209
210/*
211 * Store a byte at the specified user address. Returns -1 on failure.
212 */
213int
214subyte (void *base, int byte)
215{
216 unsigned char c = byte;
da673940 217
c0a27981 218 if (copyout(&c, base, 1) == 0)
da673940
JG
219 return(0);
220 return(-1);
221}
222
223/*
224 * Fetch a word (integer, 32 bits) from user space
225 */
226long
227fuword(const void *base)
228{
229 long v;
da673940 230
c0a27981 231 if (copyin(base, &v, sizeof(v)) == 0)
da673940
JG
232 return((long)v);
233 return(-1);
234}
235
236/*
237 * Store a word (integer, 32 bits) to user space
238 */
239int
240suword(void *base, long word)
241{
c0a27981 242 if (copyout(&word, base, sizeof(word)) == 0)
da673940
JG
243 return(0);
244 return(-1);
245}
246
247/*
248 * Fetch an short word (16 bits) from user space
249 */
250int
251fusword(void *base)
252{
253 unsigned short sword;
da673940 254
c0a27981 255 if (copyin(base, &sword, sizeof(sword)) == 0)
da673940
JG
256 return((int)sword);
257 return(-1);
258}
259
260/*
261 * Store a short word (16 bits) to user space
262 */
263int
264susword (void *base, int word)
265{
266 unsigned short sword = word;
da673940 267
c0a27981 268 if (copyout(&sword, base, sizeof(sword)) == 0)
da673940
JG
269 return(0);
270 return(-1);
271}