2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/vm/Attic/vm_copy.c,v 1.1 2004/01/18 12:29:50 dillon Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
32 #include <sys/vnode.h>
33 #include <sys/resourcevar.h>
34 #include <sys/vmmeter.h>
37 #include <vm/vm_param.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_pager.h>
46 #include <vm/vnode_pager.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_page2.h>
51 * Perform a generic copy between two vm_map's.
53 * This code is intended to eventually replace vm_uiomove() and is already
56 * XXX do COW page optimizations if possible when allowed by page alignment
57 * and maxbytes. maxbytes - bytes represents slop space in the target
58 * buffer that can be junked (or invalidated) by the copy.
61 vmspace_copy(struct vmspace *svm, vm_offset_t saddr,
62 struct vmspace *dvm, vm_offset_t daddr,
63 ssize_t bytes, ssize_t maxbytes)
82 if (n > PAGE_SIZE - (saddr & PAGE_MASK))
83 n = PAGE_SIZE - (saddr & PAGE_MASK);
84 if (n > PAGE_SIZE - (daddr & PAGE_MASK))
85 n = PAGE_SIZE - (daddr & PAGE_MASK);
88 * Wire and copy on a page-by-page basis. There are more efficient
89 * ways of doing this, but this is 'safe'.
92 rv = vm_fault_wire(&svm->vm_map, saddr, saddr + n);
93 if (rv != KERN_SUCCESS)
95 rv = vm_fault_wire(&dvm->vm_map, daddr, daddr + n);
96 if (rv != KERN_SUCCESS) {
97 vm_fault_unwire(&svm->vm_map, saddr, saddr + n);
100 pa1 = pmap_extract(&svm->vm_pmap, saddr);
101 pa2 = pmap_extract(&dvm->vm_pmap, daddr);
102 pmap_copy_page_frag(pa1, pa2, n);
103 vm_fault_unwire(&svm->vm_map, saddr, saddr + n);
104 vm_fault_unwire(&dvm->vm_map, daddr, daddr + n);
107 m1 = pmap_extract_vmpage(&svm->vm_pmap, saddr, VM_PROT_READ);
109 rv = vm_fault(&svm->vm_map, saddr, VM_PROT_READ, VM_FAULT_NORMAL);
110 if (rv != KERN_SUCCESS)
114 m2 = pmap_extract_vmpage(&dvm->vm_pmap, daddr, VM_PROT_WRITE);
116 rv = vm_fault(&dvm->vm_map, daddr, VM_PROT_WRITE, VM_FAULT_NORMAL);
117 if (rv != KERN_SUCCESS)
124 pmap_copy_page_frag(m1->phys_addr | (saddr & PAGE_MASK),
125 m2->phys_addr | (daddr & PAGE_MASK), n);