kernel - MPSAFE work - Finish tokenizing vm_page.c
[dragonfly.git] / sys / vm / device_pager.c
CommitLineData
984263bc 1/*
99ad9bc4 2 * (MPSAFE)
8e7c4729 3 *
984263bc
MD
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)device_pager.c 8.1 (Berkeley) 6/11/93
41 * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
27db6797 46#include <sys/kernel.h>
984263bc
MD
47#include <sys/conf.h>
48#include <sys/mman.h>
335dda38 49#include <sys/device.h>
27db6797
MD
50#include <sys/queue.h>
51#include <sys/malloc.h>
2f1821ca 52#include <sys/thread2.h>
8e7c4729 53#include <sys/mutex2.h>
984263bc
MD
54
55#include <vm/vm.h>
56#include <vm/vm_object.h>
57#include <vm/vm_page.h>
58#include <vm/vm_pager.h>
59#include <vm/vm_zone.h>
60
1388df65 61static void dev_pager_dealloc (vm_object_t);
1b9d3514 62static int dev_pager_getpage (vm_object_t, vm_page_t *, int);
1388df65
RG
63static void dev_pager_putpages (vm_object_t, vm_page_t *, int,
64 boolean_t, int *);
1b9d3514 65static boolean_t dev_pager_haspage (vm_object_t, vm_pindex_t);
984263bc
MD
66
67/* list of device pager objects */
107e9bcc
MD
68static TAILQ_HEAD(, vm_page) dev_freepages_list =
69 TAILQ_HEAD_INITIALIZER(dev_freepages_list);
8e7c4729
MD
70static MALLOC_DEFINE(M_FICTITIOUS_PAGES, "device-mapped pages",
71 "Device mapped pages");
984263bc 72
6ef943a3 73static vm_page_t dev_pager_getfake (vm_paddr_t);
1388df65 74static void dev_pager_putfake (vm_page_t);
984263bc 75
984263bc 76struct pagerops devicepagerops = {
984263bc 77 dev_pager_dealloc,
1b9d3514 78 dev_pager_getpage,
984263bc 79 dev_pager_putpages,
107e9bcc 80 dev_pager_haspage
984263bc
MD
81};
82
8e7c4729
MD
83static struct mtx dev_pager_mtx = MTX_INITIALIZER;
84
85/*
86 * No requirements.
87 */
5a648714 88vm_object_t
57f7b636 89dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff)
984263bc 90{
b13267a5 91 cdev_t dev;
984263bc
MD
92 vm_object_t object;
93 unsigned int npages;
94 vm_offset_t off;
95
96 /*
97 * Make sure this device can be mapped.
98 */
99 dev = handle;
984263bc
MD
100
101 /*
102 * Offset should be page aligned.
103 */
104 if (foff & PAGE_MASK)
105 return (NULL);
106
aecf2182 107 size = round_page64(size);
984263bc
MD
108
109 /*
110 * Check that the specified range of the device allows the desired
111 * protection.
112 *
113 * XXX assumes VM_PROT_* == PROT_*
114 */
115 npages = OFF_TO_IDX(size);
335dda38
MD
116 for (off = foff; npages--; off += PAGE_SIZE) {
117 if (dev_dmmap(dev, off, (int)prot) == -1)
984263bc 118 return (NULL);
335dda38 119 }
984263bc
MD
120
121 /*
984263bc
MD
122 * Look up pager, creating as necessary.
123 */
8e7c4729 124 mtx_lock(&dev_pager_mtx);
d28e1355 125 object = dev->si_object;
984263bc
MD
126 if (object == NULL) {
127 /*
128 * Allocate object and associate it with the pager.
129 */
130 object = vm_object_allocate(OBJT_DEVICE,
8e7c4729 131 OFF_TO_IDX(foff + size));
984263bc
MD
132 object->handle = handle;
133 TAILQ_INIT(&object->un_pager.devp.devp_pglist);
d28e1355 134 dev->si_object = object;
984263bc
MD
135 } else {
136 /*
137 * Gain a reference to the object.
138 */
139 vm_object_reference(object);
140 if (OFF_TO_IDX(foff + size) > object->size)
141 object->size = OFF_TO_IDX(foff + size);
142 }
8e7c4729 143 mtx_unlock(&dev_pager_mtx);
984263bc
MD
144
145 return (object);
146}
147
8e7c4729
MD
148/*
149 * No requirements.
150 */
984263bc 151static void
57e43348 152dev_pager_dealloc(vm_object_t object)
984263bc
MD
153{
154 vm_page_t m;
d28e1355
MD
155 cdev_t dev;
156
8e7c4729
MD
157 mtx_lock(&dev_pager_mtx);
158
d28e1355
MD
159 if ((dev = object->handle) != NULL) {
160 KKASSERT(dev->si_object);
161 dev->si_object = NULL;
162 }
8d292090 163 KKASSERT(object->swblock_count == 0);
984263bc 164
984263bc
MD
165 /*
166 * Free up our fake pages.
167 */
168 while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
169 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
170 dev_pager_putfake(m);
171 }
8e7c4729 172 mtx_unlock(&dev_pager_mtx);
984263bc
MD
173}
174
8e7c4729
MD
175/*
176 * No requirements.
177 */
984263bc 178static int
1b9d3514 179dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
984263bc
MD
180{
181 vm_offset_t offset;
6ef943a3 182 vm_paddr_t paddr;
984263bc 183 vm_page_t page;
b13267a5 184 cdev_t dev;
984263bc
MD
185 int prot;
186
8e7c4729
MD
187 mtx_lock(&dev_pager_mtx);
188
1b9d3514 189 page = *mpp;
984263bc 190 dev = object->handle;
1b9d3514 191 offset = page->pindex;
984263bc 192 prot = PROT_READ; /* XXX should pass in? */
984263bc 193
1b9d3514
MD
194 paddr = pmap_phys_address(
195 dev_dmmap(dev, (vm_offset_t)offset << PAGE_SHIFT, prot));
984263bc 196 KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
2f1821ca 197
1b9d3514 198 if (page->flags & PG_FICTITIOUS) {
2f1821ca
MD
199 /*
200 * If the passed in reqpage page is a fake page, update it
201 * with the new physical address.
202 */
1b9d3514
MD
203 page->phys_addr = paddr;
204 page->valid = VM_PAGE_BITS_ALL;
2f1821ca
MD
205 } else {
206 /*
207 * Replace the passed in reqpage page with our own fake page
208 * and free up all the original pages.
209 */
210 page = dev_pager_getfake(paddr);
8e7c4729
MD
211 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
212 page, pageq);
573fb415 213 lwkt_gettoken(&vm_token);
2f1821ca 214 crit_enter();
1b9d3514 215 vm_page_free(*mpp);
2f1821ca
MD
216 vm_page_insert(page, object, offset);
217 crit_exit();
573fb415 218 lwkt_reltoken(&vm_token);
2f1821ca 219 }
8e7c4729 220 mtx_unlock(&dev_pager_mtx);
984263bc
MD
221 return (VM_PAGER_OK);
222}
223
8e7c4729
MD
224/*
225 * No requirements.
226 */
984263bc 227static void
1b9d3514
MD
228dev_pager_putpages(vm_object_t object, vm_page_t *m,
229 int count, boolean_t sync, int *rtvals)
984263bc
MD
230{
231 panic("dev_pager_putpage called");
232}
233
8e7c4729
MD
234/*
235 * No requirements.
236 */
984263bc 237static boolean_t
1b9d3514 238dev_pager_haspage(vm_object_t object, vm_pindex_t pindex)
984263bc 239{
984263bc
MD
240 return (TRUE);
241}
242
8e7c4729
MD
243/*
244 * The caller must hold dev_pager_mtx
245 */
984263bc 246static vm_page_t
57e43348 247dev_pager_getfake(vm_paddr_t paddr)
984263bc
MD
248{
249 vm_page_t m;
250
27db6797
MD
251 if ((m = TAILQ_FIRST(&dev_freepages_list)) != NULL) {
252 TAILQ_REMOVE(&dev_freepages_list, m, pageq);
253 } else {
254 m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK);
255 }
256 bzero(m, sizeof(*m));
984263bc
MD
257
258 m->flags = PG_BUSY | PG_FICTITIOUS;
259 m->valid = VM_PAGE_BITS_ALL;
260 m->dirty = 0;
261 m->busy = 0;
262 m->queue = PQ_NONE;
263 m->object = NULL;
264
265 m->wire_count = 1;
266 m->hold_count = 0;
267 m->phys_addr = paddr;
268
269 return (m);
270}
271
27db6797
MD
272/*
273 * Synthesized VM pages must be structurally stable for lockless lookups to
274 * work properly.
8e7c4729
MD
275 *
276 * The caller must hold dev_pager_mtx
27db6797 277 */
984263bc 278static void
57e43348 279dev_pager_putfake(vm_page_t m)
984263bc
MD
280{
281 if (!(m->flags & PG_FICTITIOUS))
282 panic("dev_pager_putfake: bad page");
fad57d0e 283 KKASSERT(m->object == NULL);
27db6797 284 TAILQ_INSERT_HEAD(&dev_freepages_list, m, pageq);
984263bc 285}
fad57d0e 286