Modify kern/makesyscall.sh to prefix all kernel system call procedures
[dragonfly.git] / sys / vm / vm_mmap.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39 *
40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
41 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $
42 * $DragonFly: src/sys/vm/vm_mmap.c,v 1.30 2006/06/05 07:26:11 dillon Exp $
43 */
44
45/*
46 * Mapped file (mmap) interface to VM
47 */
48
49#include <sys/param.h>
50#include <sys/kernel.h>
51#include <sys/systm.h>
52#include <sys/sysproto.h>
53#include <sys/filedesc.h>
54#include <sys/kern_syscall.h>
55#include <sys/proc.h>
56#include <sys/resource.h>
57#include <sys/resourcevar.h>
58#include <sys/vnode.h>
59#include <sys/fcntl.h>
60#include <sys/file.h>
61#include <sys/mman.h>
62#include <sys/conf.h>
63#include <sys/stat.h>
64#include <sys/vmmeter.h>
65#include <sys/sysctl.h>
66
67#include <vm/vm.h>
68#include <vm/vm_param.h>
69#include <sys/lock.h>
70#include <vm/pmap.h>
71#include <vm/vm_map.h>
72#include <vm/vm_object.h>
73#include <vm/vm_page.h>
74#include <vm/vm_pager.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_extern.h>
77#include <vm/vm_page.h>
78#include <vm/vm_kern.h>
79
80#include <sys/file2.h>
81#include <sys/thread2.h>
82
83static int max_proc_mmap;
84SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
85
86/*
87 * Set the maximum number of vm_map_entry structures per process. Roughly
88 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100
89 * of our KVM malloc space still results in generous limits. We want a
90 * default that is good enough to prevent the kernel running out of resources
91 * if attacked from compromised user account but generous enough such that
92 * multi-threaded processes are not unduly inconvenienced.
93 */
94
95static void vmmapentry_rsrc_init (void *);
96SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
97
98static void
99vmmapentry_rsrc_init(void *dummy)
100{
101 max_proc_mmap = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
102 sizeof(struct vm_map_entry);
103 max_proc_mmap /= 100;
104}
105
106/* ARGSUSED */
107int
108sys_sbrk(struct sbrk_args *uap)
109{
110 /* Not yet implemented */
111 return (EOPNOTSUPP);
112}
113
114/*
115 * sstk_args(int incr)
116 */
117/* ARGSUSED */
118int
119sys_sstk(struct sstk_args *uap)
120{
121 /* Not yet implemented */
122 return (EOPNOTSUPP);
123}
124
125/*
126 * mmap_args(void *addr, size_t len, int prot, int flags, int fd,
127 * long pad, off_t pos)
128 *
129 * Memory Map (mmap) system call. Note that the file offset
130 * and address are allowed to be NOT page aligned, though if
131 * the MAP_FIXED flag it set, both must have the same remainder
132 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
133 * page-aligned, the actual mapping starts at trunc_page(addr)
134 * and the return value is adjusted up by the page offset.
135 *
136 * Generally speaking, only character devices which are themselves
137 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
138 * there would be no cache coherency between a descriptor and a VM mapping
139 * both to the same character device.
140 *
141 * Block devices can be mmap'd no matter what they represent. Cache coherency
142 * is maintained as long as you do not write directly to the underlying
143 * character device.
144 */
145
146int
147kern_mmap(caddr_t uaddr, size_t ulen, int uprot, int uflags, int fd,
148 off_t upos, void **res)
149{
150 struct thread *td = curthread;
151 struct proc *p = td->td_proc;
152 struct file *fp = NULL;
153 struct vnode *vp;
154 vm_offset_t addr;
155 vm_size_t size, pageoff;
156 vm_prot_t prot, maxprot;
157 void *handle;
158 int flags, error;
159 int disablexworkaround;
160 off_t pos;
161 struct vmspace *vms = p->p_vmspace;
162 vm_object_t obj;
163
164 KKASSERT(p);
165
166 addr = (vm_offset_t) uaddr;
167 size = ulen;
168 prot = uprot & VM_PROT_ALL;
169 flags = uflags;
170 pos = upos;
171
172 /* make sure mapping fits into numeric range etc */
173 if ((ssize_t) ulen < 0 ||
174 ((flags & MAP_ANON) && fd != -1))
175 return (EINVAL);
176
177 if (flags & MAP_STACK) {
178 if ((fd != -1) ||
179 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
180 return (EINVAL);
181 flags |= MAP_ANON;
182 pos = 0;
183 }
184
185 /*
186 * Align the file position to a page boundary,
187 * and save its page offset component.
188 */
189 pageoff = (pos & PAGE_MASK);
190 pos -= pageoff;
191
192 /* Adjust size for rounding (on both ends). */
193 size += pageoff; /* low end... */
194 size = (vm_size_t) round_page(size); /* hi end */
195
196 /*
197 * Check for illegal addresses. Watch out for address wrap... Note
198 * that VM_*_ADDRESS are not constants due to casts (argh).
199 */
200 if (flags & MAP_FIXED) {
201 /*
202 * The specified address must have the same remainder
203 * as the file offset taken modulo PAGE_SIZE, so it
204 * should be aligned after adjustment by pageoff.
205 */
206 addr -= pageoff;
207 if (addr & PAGE_MASK)
208 return (EINVAL);
209 /* Address range must be all in user VM space. */
210 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
211 return (EINVAL);
212#ifndef i386
213 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
214 return (EINVAL);
215#endif
216 if (addr + size < addr)
217 return (EINVAL);
218 }
219 /*
220 * XXX for non-fixed mappings where no hint is provided or
221 * the hint would fall in the potential heap space,
222 * place it after the end of the largest possible heap.
223 *
224 * There should really be a pmap call to determine a reasonable
225 * location.
226 */
227 else if (addr == 0 ||
228 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
229 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz)))
230 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
231
232 if (flags & MAP_ANON) {
233 /*
234 * Mapping blank space is trivial.
235 */
236 handle = NULL;
237 maxprot = VM_PROT_ALL;
238 pos = 0;
239 } else {
240 /*
241 * Mapping file, get fp for validation. Obtain vnode and make
242 * sure it is of appropriate type.
243 */
244 fp = holdfp(p->p_fd, fd, -1);
245 if (fp == NULL)
246 return (EBADF);
247 if (fp->f_type != DTYPE_VNODE) {
248 error = EINVAL;
249 goto done;
250 }
251 /*
252 * POSIX shared-memory objects are defined to have
253 * kernel persistence, and are not defined to support
254 * read(2)/write(2) -- or even open(2). Thus, we can
255 * use MAP_ASYNC to trade on-disk coherence for speed.
256 * The shm_open(3) library routine turns on the FPOSIXSHM
257 * flag to request this behavior.
258 */
259 if (fp->f_flag & FPOSIXSHM)
260 flags |= MAP_NOSYNC;
261 vp = (struct vnode *) fp->f_data;
262 if (vp->v_type != VREG && vp->v_type != VCHR) {
263 error = EINVAL;
264 goto done;
265 }
266 if (vp->v_type == VREG) {
267 /*
268 * Get the proper underlying object
269 */
270 if ((obj = vp->v_object) == NULL) {
271 error = EINVAL;
272 goto done;
273 }
274 KKASSERT(vp == (struct vnode *)obj->handle);
275 }
276
277 /*
278 * XXX hack to handle use of /dev/zero to map anon memory (ala
279 * SunOS).
280 */
281 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
282 handle = NULL;
283 maxprot = VM_PROT_ALL;
284 flags |= MAP_ANON;
285 pos = 0;
286 } else {
287 /*
288 * cdevs does not provide private mappings of any kind.
289 */
290 /*
291 * However, for XIG X server to continue to work,
292 * we should allow the superuser to do it anyway.
293 * We only allow it at securelevel < 1.
294 * (Because the XIG X server writes directly to video
295 * memory via /dev/mem, it should never work at any
296 * other securelevel.
297 * XXX this will have to go
298 */
299 if (securelevel >= 1)
300 disablexworkaround = 1;
301 else
302 disablexworkaround = suser(td);
303 if (vp->v_type == VCHR && disablexworkaround &&
304 (flags & (MAP_PRIVATE|MAP_COPY))) {
305 error = EINVAL;
306 goto done;
307 }
308 /*
309 * Ensure that file and memory protections are
310 * compatible. Note that we only worry about
311 * writability if mapping is shared; in this case,
312 * current and max prot are dictated by the open file.
313 * XXX use the vnode instead? Problem is: what
314 * credentials do we use for determination? What if
315 * proc does a setuid?
316 */
317 maxprot = VM_PROT_EXECUTE; /* ??? */
318 if (fp->f_flag & FREAD) {
319 maxprot |= VM_PROT_READ;
320 } else if (prot & PROT_READ) {
321 error = EACCES;
322 goto done;
323 }
324 /*
325 * If we are sharing potential changes (either via
326 * MAP_SHARED or via the implicit sharing of character
327 * device mappings), and we are trying to get write
328 * permission although we opened it without asking
329 * for it, bail out. Check for superuser, only if
330 * we're at securelevel < 1, to allow the XIG X server
331 * to continue to work.
332 */
333
334 if ((flags & MAP_SHARED) != 0 ||
335 (vp->v_type == VCHR && disablexworkaround)) {
336 if ((fp->f_flag & FWRITE) != 0) {
337 struct vattr va;
338 if ((error = VOP_GETATTR(vp, &va))) {
339 goto done;
340 }
341 if ((va.va_flags &
342 (IMMUTABLE|APPEND)) == 0) {
343 maxprot |= VM_PROT_WRITE;
344 } else if (prot & PROT_WRITE) {
345 error = EPERM;
346 goto done;
347 }
348 } else if ((prot & PROT_WRITE) != 0) {
349 error = EACCES;
350 goto done;
351 }
352 } else {
353 maxprot |= VM_PROT_WRITE;
354 }
355 handle = (void *)vp;
356 }
357 }
358
359 /*
360 * Do not allow more then a certain number of vm_map_entry structures
361 * per process. Scale with the number of rforks sharing the map
362 * to make the limit reasonable for threads.
363 */
364 if (max_proc_mmap &&
365 vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) {
366 error = ENOMEM;
367 goto done;
368 }
369
370 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
371 flags, handle, pos);
372 if (error == 0)
373 *res = (void *)(addr + pageoff);
374done:
375 if (fp)
376 fdrop(fp);
377 return (error);
378}
379
380int
381sys_mmap(struct mmap_args *uap)
382{
383 int error;
384
385 error = kern_mmap(uap->addr, uap->len, uap->prot, uap->flags,
386 uap->fd, uap->pos, &uap->sysmsg_resultp);
387
388 return (error);
389}
390
391/*
392 * msync_args(void *addr, int len, int flags)
393 */
394int
395sys_msync(struct msync_args *uap)
396{
397 struct proc *p = curproc;
398 vm_offset_t addr;
399 vm_size_t size, pageoff;
400 int flags;
401 vm_map_t map;
402 int rv;
403
404 addr = (vm_offset_t) uap->addr;
405 size = uap->len;
406 flags = uap->flags;
407
408 pageoff = (addr & PAGE_MASK);
409 addr -= pageoff;
410 size += pageoff;
411 size = (vm_size_t) round_page(size);
412 if (addr + size < addr)
413 return(EINVAL);
414
415 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
416 return (EINVAL);
417
418 map = &p->p_vmspace->vm_map;
419
420 /*
421 * XXX Gak! If size is zero we are supposed to sync "all modified
422 * pages with the region containing addr". Unfortunately, we don't
423 * really keep track of individual mmaps so we approximate by flushing
424 * the range of the map entry containing addr. This can be incorrect
425 * if the region splits or is coalesced with a neighbor.
426 */
427 if (size == 0) {
428 vm_map_entry_t entry;
429
430 vm_map_lock_read(map);
431 rv = vm_map_lookup_entry(map, addr, &entry);
432 vm_map_unlock_read(map);
433 if (rv == FALSE)
434 return (EINVAL);
435 addr = entry->start;
436 size = entry->end - entry->start;
437 }
438
439 /*
440 * Clean the pages and interpret the return value.
441 */
442 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
443 (flags & MS_INVALIDATE) != 0);
444
445 switch (rv) {
446 case KERN_SUCCESS:
447 break;
448 case KERN_INVALID_ADDRESS:
449 return (EINVAL); /* Sun returns ENOMEM? */
450 case KERN_FAILURE:
451 return (EIO);
452 default:
453 return (EINVAL);
454 }
455
456 return (0);
457}
458
459/*
460 * munmap_args(void *addr, size_t len)
461 */
462int
463sys_munmap(struct munmap_args *uap)
464{
465 struct proc *p = curproc;
466 vm_offset_t addr;
467 vm_size_t size, pageoff;
468 vm_map_t map;
469
470 addr = (vm_offset_t) uap->addr;
471 size = uap->len;
472
473 pageoff = (addr & PAGE_MASK);
474 addr -= pageoff;
475 size += pageoff;
476 size = (vm_size_t) round_page(size);
477 if (addr + size < addr)
478 return(EINVAL);
479
480 if (size == 0)
481 return (0);
482
483 /*
484 * Check for illegal addresses. Watch out for address wrap... Note
485 * that VM_*_ADDRESS are not constants due to casts (argh).
486 */
487 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
488 return (EINVAL);
489#ifndef i386
490 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
491 return (EINVAL);
492#endif
493 map = &p->p_vmspace->vm_map;
494 /*
495 * Make sure entire range is allocated.
496 */
497 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
498 return (EINVAL);
499 /* returns nothing but KERN_SUCCESS anyway */
500 vm_map_remove(map, addr, addr + size);
501 return (0);
502}
503
504/*
505 * mprotect_args(const void *addr, size_t len, int prot)
506 */
507int
508sys_mprotect(struct mprotect_args *uap)
509{
510 struct proc *p = curproc;
511 vm_offset_t addr;
512 vm_size_t size, pageoff;
513 vm_prot_t prot;
514
515 addr = (vm_offset_t) uap->addr;
516 size = uap->len;
517 prot = uap->prot & VM_PROT_ALL;
518#if defined(VM_PROT_READ_IS_EXEC)
519 if (prot & VM_PROT_READ)
520 prot |= VM_PROT_EXECUTE;
521#endif
522
523 pageoff = (addr & PAGE_MASK);
524 addr -= pageoff;
525 size += pageoff;
526 size = (vm_size_t) round_page(size);
527 if (addr + size < addr)
528 return(EINVAL);
529
530 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
531 FALSE)) {
532 case KERN_SUCCESS:
533 return (0);
534 case KERN_PROTECTION_FAILURE:
535 return (EACCES);
536 }
537 return (EINVAL);
538}
539
540/*
541 * minherit_args(void *addr, size_t len, int inherit)
542 */
543int
544sys_minherit(struct minherit_args *uap)
545{
546 struct proc *p = curproc;
547 vm_offset_t addr;
548 vm_size_t size, pageoff;
549 vm_inherit_t inherit;
550
551 addr = (vm_offset_t)uap->addr;
552 size = uap->len;
553 inherit = uap->inherit;
554
555 pageoff = (addr & PAGE_MASK);
556 addr -= pageoff;
557 size += pageoff;
558 size = (vm_size_t) round_page(size);
559 if (addr + size < addr)
560 return(EINVAL);
561
562 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
563 inherit)) {
564 case KERN_SUCCESS:
565 return (0);
566 case KERN_PROTECTION_FAILURE:
567 return (EACCES);
568 }
569 return (EINVAL);
570}
571
572/*
573 * madvise_args(void *addr, size_t len, int behav)
574 */
575/* ARGSUSED */
576int
577sys_madvise(struct madvise_args *uap)
578{
579 struct proc *p = curproc;
580 vm_offset_t start, end;
581
582 /*
583 * Check for illegal behavior
584 */
585 if (uap->behav < 0 || uap->behav > MADV_CORE)
586 return (EINVAL);
587 /*
588 * Check for illegal addresses. Watch out for address wrap... Note
589 * that VM_*_ADDRESS are not constants due to casts (argh).
590 */
591 if (VM_MAXUSER_ADDRESS > 0 &&
592 ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS)
593 return (EINVAL);
594#ifndef i386
595 if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS)
596 return (EINVAL);
597#endif
598 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
599 return (EINVAL);
600
601 /*
602 * Since this routine is only advisory, we default to conservative
603 * behavior.
604 */
605 start = trunc_page((vm_offset_t) uap->addr);
606 end = round_page((vm_offset_t) uap->addr + uap->len);
607
608 if (vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav))
609 return (EINVAL);
610 return (0);
611}
612
613/*
614 * mincore_args(const void *addr, size_t len, char *vec)
615 */
616/* ARGSUSED */
617int
618sys_mincore(struct mincore_args *uap)
619{
620 struct proc *p = curproc;
621 vm_offset_t addr, first_addr;
622 vm_offset_t end, cend;
623 pmap_t pmap;
624 vm_map_t map;
625 char *vec;
626 int error;
627 int vecindex, lastvecindex;
628 vm_map_entry_t current;
629 vm_map_entry_t entry;
630 int mincoreinfo;
631 unsigned int timestamp;
632
633 /*
634 * Make sure that the addresses presented are valid for user
635 * mode.
636 */
637 first_addr = addr = trunc_page((vm_offset_t) uap->addr);
638 end = addr + (vm_size_t)round_page(uap->len);
639 if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS)
640 return (EINVAL);
641 if (end < addr)
642 return (EINVAL);
643
644 /*
645 * Address of byte vector
646 */
647 vec = uap->vec;
648
649 map = &p->p_vmspace->vm_map;
650 pmap = vmspace_pmap(p->p_vmspace);
651
652 vm_map_lock_read(map);
653RestartScan:
654 timestamp = map->timestamp;
655
656 if (!vm_map_lookup_entry(map, addr, &entry))
657 entry = entry->next;
658
659 /*
660 * Do this on a map entry basis so that if the pages are not
661 * in the current processes address space, we can easily look
662 * up the pages elsewhere.
663 */
664 lastvecindex = -1;
665 for(current = entry;
666 (current != &map->header) && (current->start < end);
667 current = current->next) {
668
669 /*
670 * ignore submaps (for now) or null objects
671 */
672 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
673 current->object.vm_object == NULL)
674 continue;
675
676 /*
677 * limit this scan to the current map entry and the
678 * limits for the mincore call
679 */
680 if (addr < current->start)
681 addr = current->start;
682 cend = current->end;
683 if (cend > end)
684 cend = end;
685
686 /*
687 * scan this entry one page at a time
688 */
689 while (addr < cend) {
690 /*
691 * Check pmap first, it is likely faster, also
692 * it can provide info as to whether we are the
693 * one referencing or modifying the page.
694 */
695 mincoreinfo = pmap_mincore(pmap, addr);
696 if (!mincoreinfo) {
697 vm_pindex_t pindex;
698 vm_ooffset_t offset;
699 vm_page_t m;
700
701 /*
702 * calculate the page index into the object
703 */
704 offset = current->offset + (addr - current->start);
705 pindex = OFF_TO_IDX(offset);
706
707 /*
708 * if the page is resident, then gather
709 * information about it. spl protection is
710 * required to maintain the object
711 * association. And XXX what if the page is
712 * busy? What's the deal with that?
713 */
714 crit_enter();
715 m = vm_page_lookup(current->object.vm_object,
716 pindex);
717 if (m && m->valid) {
718 mincoreinfo = MINCORE_INCORE;
719 if (m->dirty ||
720 pmap_is_modified(m))
721 mincoreinfo |= MINCORE_MODIFIED_OTHER;
722 if ((m->flags & PG_REFERENCED) ||
723 pmap_ts_referenced(m)) {
724 vm_page_flag_set(m, PG_REFERENCED);
725 mincoreinfo |= MINCORE_REFERENCED_OTHER;
726 }
727 }
728 crit_exit();
729 }
730
731 /*
732 * subyte may page fault. In case it needs to modify
733 * the map, we release the lock.
734 */
735 vm_map_unlock_read(map);
736
737 /*
738 * calculate index into user supplied byte vector
739 */
740 vecindex = OFF_TO_IDX(addr - first_addr);
741
742 /*
743 * If we have skipped map entries, we need to make sure that
744 * the byte vector is zeroed for those skipped entries.
745 */
746 while((lastvecindex + 1) < vecindex) {
747 error = subyte( vec + lastvecindex, 0);
748 if (error) {
749 return (EFAULT);
750 }
751 ++lastvecindex;
752 }
753
754 /*
755 * Pass the page information to the user
756 */
757 error = subyte( vec + vecindex, mincoreinfo);
758 if (error) {
759 return (EFAULT);
760 }
761
762 /*
763 * If the map has changed, due to the subyte, the previous
764 * output may be invalid.
765 */
766 vm_map_lock_read(map);
767 if (timestamp != map->timestamp)
768 goto RestartScan;
769
770 lastvecindex = vecindex;
771 addr += PAGE_SIZE;
772 }
773 }
774
775 /*
776 * subyte may page fault. In case it needs to modify
777 * the map, we release the lock.
778 */
779 vm_map_unlock_read(map);
780
781 /*
782 * Zero the last entries in the byte vector.
783 */
784 vecindex = OFF_TO_IDX(end - first_addr);
785 while((lastvecindex + 1) < vecindex) {
786 error = subyte( vec + lastvecindex, 0);
787 if (error) {
788 return (EFAULT);
789 }
790 ++lastvecindex;
791 }
792
793 /*
794 * If the map has changed, due to the subyte, the previous
795 * output may be invalid.
796 */
797 vm_map_lock_read(map);
798 if (timestamp != map->timestamp)
799 goto RestartScan;
800 vm_map_unlock_read(map);
801
802 return (0);
803}
804
805/*
806 * mlock_args(const void *addr, size_t len)
807 */
808int
809sys_mlock(struct mlock_args *uap)
810{
811 vm_offset_t addr;
812 vm_size_t size, pageoff;
813 int error;
814 struct proc *p = curproc;
815
816 addr = (vm_offset_t) uap->addr;
817 size = uap->len;
818
819 pageoff = (addr & PAGE_MASK);
820 addr -= pageoff;
821 size += pageoff;
822 size = (vm_size_t) round_page(size);
823
824 /* disable wrap around */
825 if (addr + size < addr)
826 return (EINVAL);
827
828 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired)
829 return (EAGAIN);
830
831#ifdef pmap_wired_count
832 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
833 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
834 return (ENOMEM);
835#else
836 error = suser_cred(p->p_ucred, 0);
837 if (error)
838 return (error);
839#endif
840
841 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, FALSE);
842 return (error == KERN_SUCCESS ? 0 : ENOMEM);
843}
844
845/*
846 * mlockall_args(int how)
847 */
848int
849sys_mlockall(struct mlockall_args *uap)
850{
851 return 0;
852}
853
854/*
855 * munlockall_args(void)
856 */
857int
858sys_munlockall(struct munlockall_args *uap)
859{
860 return 0;
861}
862
863/*
864 * munlock_args(const void *addr, size_t len)
865 */
866int
867sys_munlock(struct munlock_args *uap)
868{
869 struct thread *td = curthread;
870 struct proc *p = td->td_proc;
871 vm_offset_t addr;
872 vm_size_t size, pageoff;
873 int error;
874
875 addr = (vm_offset_t) uap->addr;
876 size = uap->len;
877
878 pageoff = (addr & PAGE_MASK);
879 addr -= pageoff;
880 size += pageoff;
881 size = (vm_size_t) round_page(size);
882
883 /* disable wrap around */
884 if (addr + size < addr)
885 return (EINVAL);
886
887#ifndef pmap_wired_count
888 error = suser(td);
889 if (error)
890 return (error);
891#endif
892
893 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, TRUE);
894 return (error == KERN_SUCCESS ? 0 : ENOMEM);
895}
896
897/*
898 * Internal version of mmap.
899 * Currently used by mmap, exec, and sys5 shared memory.
900 * Handle is either a vnode pointer or NULL for MAP_ANON.
901 */
902int
903vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
904 vm_prot_t maxprot, int flags,
905 void *handle,
906 vm_ooffset_t foff)
907{
908 boolean_t fitit;
909 vm_object_t object;
910 struct vnode *vp = NULL;
911 objtype_t type;
912 int rv = KERN_SUCCESS;
913 off_t objsize;
914 int docow;
915 struct thread *td = curthread; /* XXX */
916 struct proc *p = td->td_proc;
917
918 KKASSERT(p);
919
920 if (size == 0)
921 return (0);
922
923 objsize = size = round_page(size);
924
925 if (p->p_vmspace->vm_map.size + size >
926 p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
927 return(ENOMEM);
928 }
929
930 /*
931 * We currently can only deal with page aligned file offsets.
932 * The check is here rather than in the syscall because the
933 * kernel calls this function internally for other mmaping
934 * operations (such as in exec) and non-aligned offsets will
935 * cause pmap inconsistencies...so we want to be sure to
936 * disallow this in all cases.
937 */
938 if (foff & PAGE_MASK)
939 return (EINVAL);
940
941 if ((flags & MAP_FIXED) == 0) {
942 fitit = TRUE;
943 *addr = round_page(*addr);
944 } else {
945 if (*addr != trunc_page(*addr))
946 return (EINVAL);
947 fitit = FALSE;
948 vm_map_remove(map, *addr, *addr + size);
949 }
950
951 /*
952 * Lookup/allocate object.
953 */
954 if (flags & MAP_ANON) {
955 type = OBJT_DEFAULT;
956 /*
957 * Unnamed anonymous regions always start at 0.
958 */
959 if (handle == 0)
960 foff = 0;
961 } else {
962 vp = (struct vnode *) handle;
963 if (vp->v_type == VCHR) {
964 type = OBJT_DEVICE;
965 handle = (void *)(intptr_t)vp->v_rdev;
966 } else {
967 struct vattr vat;
968 int error;
969
970 error = VOP_GETATTR(vp, &vat);
971 if (error)
972 return (error);
973 objsize = vat.va_size;
974 type = OBJT_VNODE;
975 /*
976 * if it is a regular file without any references
977 * we do not need to sync it.
978 */
979 if (vp->v_type == VREG && vat.va_nlink == 0) {
980 flags |= MAP_NOSYNC;
981 }
982 }
983 }
984
985 if (handle == NULL) {
986 object = NULL;
987 docow = 0;
988 } else {
989 object = vm_pager_allocate(type, handle, objsize, prot, foff);
990 if (object == NULL)
991 return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
992 docow = MAP_PREFAULT_PARTIAL;
993 }
994
995 /*
996 * Force device mappings to be shared.
997 */
998 if (type == OBJT_DEVICE || type == OBJT_PHYS) {
999 flags &= ~(MAP_PRIVATE|MAP_COPY);
1000 flags |= MAP_SHARED;
1001 }
1002
1003 if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1004 docow |= MAP_COPY_ON_WRITE;
1005 if (flags & MAP_NOSYNC)
1006 docow |= MAP_DISABLE_SYNCER;
1007 if (flags & MAP_NOCORE)
1008 docow |= MAP_DISABLE_COREDUMP;
1009
1010#if defined(VM_PROT_READ_IS_EXEC)
1011 if (prot & VM_PROT_READ)
1012 prot |= VM_PROT_EXECUTE;
1013
1014 if (maxprot & VM_PROT_READ)
1015 maxprot |= VM_PROT_EXECUTE;
1016#endif
1017
1018 if (fitit) {
1019 *addr = pmap_addr_hint(object, *addr, size);
1020 }
1021
1022 if (flags & MAP_STACK)
1023 rv = vm_map_stack (map, *addr, size, prot,
1024 maxprot, docow);
1025 else
1026 rv = vm_map_find(map, object, foff, addr, size, fitit,
1027 prot, maxprot, docow);
1028
1029 if (rv != KERN_SUCCESS) {
1030 /*
1031 * Lose the object reference. Will destroy the
1032 * object if it's an unnamed anonymous mapping
1033 * or named anonymous without other references.
1034 */
1035 vm_object_deallocate(object);
1036 goto out;
1037 }
1038
1039 /*
1040 * Shared memory is also shared with children.
1041 */
1042 if (flags & (MAP_SHARED|MAP_INHERIT)) {
1043 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1044 if (rv != KERN_SUCCESS) {
1045 vm_map_remove(map, *addr, *addr + size);
1046 goto out;
1047 }
1048 }
1049out:
1050 switch (rv) {
1051 case KERN_SUCCESS:
1052 return (0);
1053 case KERN_INVALID_ADDRESS:
1054 case KERN_NO_SPACE:
1055 return (ENOMEM);
1056 case KERN_PROTECTION_FAILURE:
1057 return (EACCES);
1058 default:
1059 return (EINVAL);
1060 }
1061}