kernel - Additional fixes for vm_prefault issue
[dragonfly.git] / sys / vm / device_pager.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by the University of
23  *      California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
41  * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $
42  */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/conf.h>
48 #include <sys/mman.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/malloc.h>
52 #include <sys/thread2.h>
53 #include <sys/mutex2.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_zone.h>
60
61 static void dev_pager_dealloc (vm_object_t);
62 static int dev_pager_getpage (vm_object_t, vm_page_t *, int);
63 static void dev_pager_putpages (vm_object_t, vm_page_t *, int, 
64                 boolean_t, int *);
65 static boolean_t dev_pager_haspage (vm_object_t, vm_pindex_t);
66
67 /* list of device pager objects */
68 static TAILQ_HEAD(, vm_page) dev_freepages_list =
69                 TAILQ_HEAD_INITIALIZER(dev_freepages_list);
70 static MALLOC_DEFINE(M_FICTITIOUS_PAGES, "device-mapped pages",
71                 "Device mapped pages");
72
73 static vm_page_t dev_pager_getfake (vm_paddr_t);
74 static void dev_pager_putfake (vm_page_t);
75
76 struct pagerops devicepagerops = {
77         dev_pager_dealloc,
78         dev_pager_getpage,
79         dev_pager_putpages,
80         dev_pager_haspage
81 };
82
83 static struct mtx dev_pager_mtx = MTX_INITIALIZER;
84
85 /*
86  * No requirements.
87  */
88 vm_object_t
89 dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff)
90 {
91         cdev_t dev;
92         vm_object_t object;
93         unsigned int npages;
94         vm_offset_t off;
95
96         /*
97          * Make sure this device can be mapped.
98          */
99         dev = handle;
100
101         /*
102          * Offset should be page aligned.
103          */
104         if (foff & PAGE_MASK)
105                 return (NULL);
106
107         size = round_page64(size);
108
109         /*
110          * Check that the specified range of the device allows the desired
111          * protection.
112          *
113          * XXX assumes VM_PROT_* == PROT_*
114          */
115         npages = OFF_TO_IDX(size);
116         for (off = foff; npages--; off += PAGE_SIZE) {
117                 if (dev_dmmap(dev, off, (int)prot) == -1)
118                         return (NULL);
119         }
120
121         /*
122          * Look up pager, creating as necessary.
123          */
124         mtx_lock(&dev_pager_mtx);
125         object = dev->si_object;
126         if (object == NULL) {
127                 /*
128                  * Allocate object and associate it with the pager.
129                  */
130                 object = vm_object_allocate(OBJT_DEVICE,
131                                             OFF_TO_IDX(foff + size));
132                 object->handle = handle;
133                 TAILQ_INIT(&object->un_pager.devp.devp_pglist);
134                 dev->si_object = object;
135         } else {
136                 /*
137                  * Gain a reference to the object.
138                  */
139                 vm_object_reference(object);
140                 if (OFF_TO_IDX(foff + size) > object->size)
141                         object->size = OFF_TO_IDX(foff + size);
142         }
143         mtx_unlock(&dev_pager_mtx);
144
145         return (object);
146 }
147
148 /*
149  * No requirements.
150  */
151 static void
152 dev_pager_dealloc(vm_object_t object)
153 {
154         vm_page_t m;
155         cdev_t dev;
156
157         mtx_lock(&dev_pager_mtx);
158
159         if ((dev = object->handle) != NULL) {
160                 KKASSERT(dev->si_object);
161                 dev->si_object = NULL;
162         }
163         KKASSERT(object->swblock_count == 0);
164
165         /*
166          * Free up our fake pages.
167          */
168         while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
169                 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
170                 dev_pager_putfake(m);
171         }
172         mtx_unlock(&dev_pager_mtx);
173 }
174
175 /*
176  * No requirements.
177  */
178 static int
179 dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
180 {
181         vm_offset_t offset;
182         vm_paddr_t paddr;
183         vm_page_t page;
184         cdev_t dev;
185         int prot;
186
187         mtx_lock(&dev_pager_mtx);
188
189         page = *mpp;
190         dev = object->handle;
191         offset = page->pindex;
192         prot = PROT_READ;       /* XXX should pass in? */
193
194         paddr = pmap_phys_address(
195                     dev_dmmap(dev, (vm_offset_t)offset << PAGE_SHIFT, prot));
196         KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
197
198         if (page->flags & PG_FICTITIOUS) {
199                 /*
200                  * If the passed in reqpage page is a fake page, update it
201                  * with the new physical address.
202                  */
203                 page->phys_addr = paddr;
204                 page->valid = VM_PAGE_BITS_ALL;
205         } else {
206                 /*
207                  * Replace the passed in reqpage page with our own fake page
208                  * and free up all the original pages.
209                  */
210                 page = dev_pager_getfake(paddr);
211                 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
212                                   page, pageq);
213                 lwkt_gettoken(&vm_token);
214                 vm_object_hold(object);
215                 crit_enter();
216                 vm_page_free(*mpp);
217                 vm_page_insert(page, object, offset);
218                 crit_exit();
219                 vm_object_drop(object);
220                 lwkt_reltoken(&vm_token);
221         }
222         mtx_unlock(&dev_pager_mtx);
223         return (VM_PAGER_OK);
224 }
225
226 /*
227  * No requirements.
228  */
229 static void
230 dev_pager_putpages(vm_object_t object, vm_page_t *m,
231                    int count, boolean_t sync, int *rtvals)
232 {
233         panic("dev_pager_putpage called");
234 }
235
236 /*
237  * No requirements.
238  */
239 static boolean_t
240 dev_pager_haspage(vm_object_t object, vm_pindex_t pindex)
241 {
242         return (TRUE);
243 }
244
245 /*
246  * The caller must hold dev_pager_mtx
247  */
248 static vm_page_t
249 dev_pager_getfake(vm_paddr_t paddr)
250 {
251         vm_page_t m;
252
253         if ((m = TAILQ_FIRST(&dev_freepages_list)) != NULL) {
254                 TAILQ_REMOVE(&dev_freepages_list, m, pageq);
255         } else {
256                 m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK);
257         }
258         bzero(m, sizeof(*m));
259
260         m->flags = PG_BUSY | PG_FICTITIOUS;
261         m->valid = VM_PAGE_BITS_ALL;
262         m->dirty = 0;
263         m->busy = 0;
264         m->queue = PQ_NONE;
265         m->object = NULL;
266
267         m->wire_count = 1;
268         m->hold_count = 0;
269         m->phys_addr = paddr;
270
271         return (m);
272 }
273
274 /*
275  * Synthesized VM pages must be structurally stable for lockless lookups to
276  * work properly.
277  *
278  * The caller must hold dev_pager_mtx
279  */
280 static void
281 dev_pager_putfake(vm_page_t m)
282 {
283         if (!(m->flags & PG_FICTITIOUS))
284                 panic("dev_pager_putfake: bad page");
285         KKASSERT(m->object == NULL);
286         TAILQ_INSERT_HEAD(&dev_freepages_list, m, pageq);
287 }
288