kernel - More many-cores SMP work
[dragonfly.git] / sys / vm / device_pager.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by the University of
23  *      California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
41  * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $
42  */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/conf.h>
48 #include <sys/mman.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/malloc.h>
52 #include <sys/thread2.h>
53 #include <sys/mutex2.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_zone.h>
60
61 static void dev_pager_dealloc (vm_object_t);
62 static int dev_pager_getpage (vm_object_t, vm_page_t *, int);
63 static void dev_pager_putpages (vm_object_t, vm_page_t *, int, 
64                 boolean_t, int *);
65 static boolean_t dev_pager_haspage (vm_object_t, vm_pindex_t);
66
67 /* list of device pager objects */
68 static TAILQ_HEAD(, vm_page) dev_freepages_list =
69                 TAILQ_HEAD_INITIALIZER(dev_freepages_list);
70 static MALLOC_DEFINE(M_FICTITIOUS_PAGES, "device-mapped pages",
71                 "Device mapped pages");
72
73 static vm_page_t dev_pager_getfake (vm_paddr_t);
74 static void dev_pager_putfake (vm_page_t);
75
76 struct pagerops devicepagerops = {
77         dev_pager_dealloc,
78         dev_pager_getpage,
79         dev_pager_putpages,
80         dev_pager_haspage
81 };
82
83 static struct mtx dev_pager_mtx = MTX_INITIALIZER;
84
85 /*
86  * No requirements.
87  */
88 vm_object_t
89 dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff)
90 {
91         cdev_t dev;
92         vm_object_t object;
93         unsigned int npages;
94         vm_offset_t off;
95
96         /*
97          * Make sure this device can be mapped.
98          */
99         dev = handle;
100
101         /*
102          * Offset should be page aligned.
103          */
104         if (foff & PAGE_MASK)
105                 return (NULL);
106
107         size = round_page64(size);
108
109         /*
110          * Check that the specified range of the device allows the desired
111          * protection.
112          *
113          * XXX assumes VM_PROT_* == PROT_*
114          */
115         npages = OFF_TO_IDX(size);
116         for (off = foff; npages--; off += PAGE_SIZE) {
117                 if (dev_dmmap(dev, off, (int)prot) == -1)
118                         return (NULL);
119         }
120
121         /*
122          * Look up pager, creating as necessary.
123          */
124         mtx_lock(&dev_pager_mtx);
125         object = dev->si_object;
126         if (object == NULL) {
127                 /*
128                  * Allocate object and associate it with the pager.
129                  */
130                 object = vm_object_allocate(OBJT_DEVICE,
131                                             OFF_TO_IDX(foff + size));
132                 object->handle = handle;
133                 TAILQ_INIT(&object->un_pager.devp.devp_pglist);
134                 dev->si_object = object;
135         } else {
136                 /*
137                  * Gain a reference to the object.
138                  */
139                 vm_object_hold(object);
140                 vm_object_reference_locked(object);
141                 if (OFF_TO_IDX(foff + size) > object->size)
142                         object->size = OFF_TO_IDX(foff + size);
143                 vm_object_drop(object);
144         }
145         mtx_unlock(&dev_pager_mtx);
146
147         return (object);
148 }
149
150 /*
151  * No requirements.
152  */
153 static void
154 dev_pager_dealloc(vm_object_t object)
155 {
156         vm_page_t m;
157         cdev_t dev;
158
159         mtx_lock(&dev_pager_mtx);
160
161         if ((dev = object->handle) != NULL) {
162                 KKASSERT(dev->si_object);
163                 dev->si_object = NULL;
164         }
165         KKASSERT(object->swblock_count == 0);
166
167         /*
168          * Free up our fake pages.
169          */
170         while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
171                 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
172                 dev_pager_putfake(m);
173         }
174         mtx_unlock(&dev_pager_mtx);
175 }
176
177 /*
178  * No requirements.
179  */
180 static int
181 dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
182 {
183         vm_offset_t offset;
184         vm_paddr_t paddr;
185         vm_page_t page;
186         cdev_t dev;
187         int prot;
188
189         mtx_lock(&dev_pager_mtx);
190
191         page = *mpp;
192         dev = object->handle;
193         offset = page->pindex;
194         prot = PROT_READ;       /* XXX should pass in? */
195
196         paddr = pmap_phys_address(
197                     dev_dmmap(dev, (vm_offset_t)offset << PAGE_SHIFT, prot));
198         KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
199
200         if (page->flags & PG_FICTITIOUS) {
201                 /*
202                  * If the passed in reqpage page is a fake page, update it
203                  * with the new physical address.
204                  */
205                 page->phys_addr = paddr;
206                 page->valid = VM_PAGE_BITS_ALL;
207         } else {
208                 /*
209                  * Replace the passed in reqpage page with our own fake page
210                  * and free up all the original pages.
211                  */
212                 page = dev_pager_getfake(paddr);
213                 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
214                                   page, pageq);
215                 vm_object_hold(object);
216                 vm_page_free(*mpp);
217                 if (vm_page_insert(page, object, offset) == FALSE) {
218                         panic("dev_pager_getpage: page (%p,%ld) exists",
219                               object, offset);
220                 }
221                 vm_object_drop(object);
222         }
223         mtx_unlock(&dev_pager_mtx);
224         return (VM_PAGER_OK);
225 }
226
227 /*
228  * No requirements.
229  */
230 static void
231 dev_pager_putpages(vm_object_t object, vm_page_t *m,
232                    int count, boolean_t sync, int *rtvals)
233 {
234         panic("dev_pager_putpage called");
235 }
236
237 /*
238  * No requirements.
239  */
240 static boolean_t
241 dev_pager_haspage(vm_object_t object, vm_pindex_t pindex)
242 {
243         return (TRUE);
244 }
245
246 /*
247  * The caller must hold dev_pager_mtx
248  */
249 static vm_page_t
250 dev_pager_getfake(vm_paddr_t paddr)
251 {
252         vm_page_t m;
253
254         if ((m = TAILQ_FIRST(&dev_freepages_list)) != NULL) {
255                 TAILQ_REMOVE(&dev_freepages_list, m, pageq);
256         } else {
257                 m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK);
258         }
259         bzero(m, sizeof(*m));
260
261         m->flags = PG_BUSY | PG_FICTITIOUS;
262         m->valid = VM_PAGE_BITS_ALL;
263         m->dirty = 0;
264         m->busy = 0;
265         m->queue = PQ_NONE;
266         m->object = NULL;
267
268         m->wire_count = 1;
269         m->hold_count = 0;
270         m->phys_addr = paddr;
271
272         return (m);
273 }
274
275 /*
276  * Synthesized VM pages must be structurally stable for lockless lookups to
277  * work properly.
278  *
279  * The caller must hold dev_pager_mtx
280  */
281 static void
282 dev_pager_putfake(vm_page_t m)
283 {
284         if (!(m->flags & PG_FICTITIOUS))
285                 panic("dev_pager_putfake: bad page");
286         KKASSERT(m->object == NULL);
287         TAILQ_INSERT_HEAD(&dev_freepages_list, m, pageq);
288 }
289