Device layer rollup commit.
[dragonfly.git] / sys / vm / vm_swap.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
34 * $FreeBSD: src/sys/vm/vm_swap.c,v 1.96.2.2 2001/10/14 18:46:47 iedowse Exp $
e4c9c0c8 35 * $DragonFly: src/sys/vm/vm_swap.c,v 1.12 2004/05/19 22:53:06 dillon Exp $
984263bc
MD
36 */
37
38#include "opt_swap.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/sysproto.h>
43#include <sys/buf.h>
44#include <sys/proc.h>
45#include <sys/namei.h>
46#include <sys/dmap.h> /* XXX */
47#include <sys/vnode.h>
48#include <sys/fcntl.h>
49#include <sys/blist.h>
50#include <sys/kernel.h>
51#include <sys/lock.h>
52#include <sys/conf.h>
53#include <sys/stat.h>
54#include <vm/vm.h>
55#include <vm/vm_extern.h>
56#include <vm/swap_pager.h>
57#include <vm/vm_zone.h>
58
59/*
60 * Indirect driver for multi-controller paging.
61 */
62
63#ifndef NSWAPDEV
64#define NSWAPDEV 4
65#endif
66static struct swdevt should_be_malloced[NSWAPDEV];
67static struct swdevt *swdevt = should_be_malloced;
68static int nswap; /* first block after the interleaved devs */
69static int nswdev = NSWAPDEV;
70int vm_swap_size;
71
1388df65 72static int swapdev_strategy (struct vop_strategy_args *ap);
984263bc
MD
73struct vnode *swapdev_vp;
74
75/*
76 * swapdev_strategy:
77 *
78 * VOP_STRATEGY() for swapdev_vp.
79 * Perform swap strategy interleave device selection.
80 *
81 * The bp is expected to be locked and *not* B_DONE on call.
82 */
83
84static int
57e43348
MD
85swapdev_strategy(struct vop_strategy_args /* {
86 struct vnode *a_vp;
87 struct buf *a_bp;
88 } */ *ap)
984263bc
MD
89{
90 int s, sz, off, seg, index;
5f910b2f 91 struct swdevt *sp;
984263bc
MD
92 struct vnode *vp;
93 struct buf *bp;
94
95 bp = ap->a_bp;
96 sz = howmany(bp->b_bcount, PAGE_SIZE);
97
98 /*
99 * Convert interleaved swap into per-device swap. Note that
100 * the block size is left in PAGE_SIZE'd chunks (for the newswap)
101 * here.
102 */
103 if (nswdev > 1) {
104 off = bp->b_blkno % dmmax;
105 if (off + sz > dmmax) {
106 bp->b_error = EINVAL;
107 bp->b_flags |= B_ERROR;
108 biodone(bp);
109 return 0;
110 }
111 seg = bp->b_blkno / dmmax;
112 index = seg % nswdev;
113 seg /= nswdev;
114 bp->b_blkno = seg * dmmax + off;
115 } else {
116 index = 0;
117 }
118 sp = &swdevt[index];
119 if (bp->b_blkno + sz > sp->sw_nblks) {
120 bp->b_error = EINVAL;
121 bp->b_flags |= B_ERROR;
122 biodone(bp);
123 return 0;
124 }
125 bp->b_dev = sp->sw_device;
126 if (sp->sw_vp == NULL) {
127 bp->b_error = ENODEV;
128 bp->b_flags |= B_ERROR;
129 biodone(bp);
130 return 0;
131 }
132
133 /*
134 * Convert from PAGE_SIZE'd to DEV_BSIZE'd chunks for the actual I/O
135 */
136 bp->b_blkno = ctodb(bp->b_blkno);
137
138 vhold(sp->sw_vp);
139 s = splvm();
140 if ((bp->b_flags & B_READ) == 0) {
141 vp = bp->b_vp;
142 if (vp) {
143 vp->v_numoutput--;
144 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
145 vp->v_flag &= ~VBWAIT;
146 wakeup(&vp->v_numoutput);
147 }
148 }
149 sp->sw_vp->v_numoutput++;
150 }
151 pbreassignbuf(bp, sp->sw_vp);
152 splx(s);
153 VOP_STRATEGY(bp->b_vp, bp);
154 return 0;
155}
156
157/*
158 * Create a special vnode op vector for swapdev_vp - we only use
159 * VOP_STRATEGY(), everything else returns an error.
160 */
161vop_t **swapdev_vnodeop_p;
162static struct vnodeopv_entry_desc swapdev_vnodeop_entries[] = {
163 { &vop_default_desc, (vop_t *) vop_defaultop },
164 { &vop_strategy_desc, (vop_t *) swapdev_strategy },
165 { NULL, NULL }
166};
167static struct vnodeopv_desc swapdev_vnodeop_opv_desc =
168 { &swapdev_vnodeop_p, swapdev_vnodeop_entries };
169
170VNODEOP_SET(swapdev_vnodeop_opv_desc);
171
172/*
41c20dac
MD
173 * swapon_args(char *name)
174 *
984263bc
MD
175 * System call swapon(name) enables swapping on device name,
176 * which must be in the swdevsw. Return EBUSY
177 * if already swapping on this device.
178 */
984263bc
MD
179/* ARGSUSED */
180int
41c20dac 181swapon(struct swapon_args *uap)
984263bc 182{
dadab5e9 183 struct thread *td = curthread;
984263bc 184 struct vattr attr;
5f910b2f 185 struct vnode *vp;
984263bc
MD
186 struct nameidata nd;
187 int error;
dadab5e9 188 struct ucred *cred;
984263bc 189
dadab5e9
MD
190 KKASSERT(td->td_proc);
191 cred = td->td_proc->p_ucred;
192
193 error = suser(td);
984263bc
MD
194 if (error)
195 return (error);
196
2b69e610 197 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->name, td);
984263bc
MD
198 error = namei(&nd);
199 if (error)
200 return (error);
201
202 NDFREE(&nd, NDF_ONLY_PNBUF);
203 vp = nd.ni_vp;
204
205 if (vn_isdisk(vp, &error))
e4c9c0c8 206 error = swaponvp(td, vp, 0);
984263bc 207 else if (vp->v_type == VREG && vp->v_tag == VT_NFS &&
3b568787 208 (error = VOP_GETATTR(vp, &attr, td)) == 0) {
984263bc
MD
209 /*
210 * Allow direct swapping to NFS regular files in the same
211 * way that nfs_mountroot() sets up diskless swapping.
212 */
e4c9c0c8 213 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
984263bc
MD
214 }
215
216 if (error)
217 vrele(vp);
218
219 return (error);
220}
221
222/*
223 * Swfree(index) frees the index'th portion of the swap map.
224 * Each of the nswdev devices provides 1/nswdev'th of the swap
225 * space, which is laid out with blocks of dmmax pages circularly
226 * among the devices.
227 *
228 * The new swap code uses page-sized blocks. The old swap code used
229 * DEV_BSIZE'd chunks.
230 *
231 * XXX locking when multiple swapon's run in parallel
232 */
233int
e4c9c0c8 234swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
984263bc 235{
e4c9c0c8
MD
236 u_long aligned_nblks;
237 struct ucred *cred;
5f910b2f
RG
238 struct swdevt *sp;
239 swblk_t vsbase;
984263bc 240 swblk_t dvbase;
e4c9c0c8
MD
241 dev_t dev;
242 int index;
984263bc 243 int error;
e4c9c0c8 244 long blk;
dadab5e9
MD
245
246 KKASSERT(td->td_proc);
247 cred = td->td_proc->p_ucred;
984263bc
MD
248
249 if (!swapdev_vp) {
250 error = getnewvnode(VT_NON, NULL, swapdev_vnodeop_p,
251 &swapdev_vp);
252 if (error)
253 panic("Cannot get vnode for swapdev");
254 swapdev_vp->v_type = VNON; /* Untyped */
255 }
256
257 ASSERT_VOP_UNLOCKED(vp, "swaponvp");
258 for (sp = swdevt, index = 0 ; index < nswdev; index++, sp++) {
259 if (sp->sw_vp == vp)
260 return EBUSY;
261 if (!sp->sw_vp)
262 goto found;
263
264 }
265 return EINVAL;
266 found:
41a01a4d 267 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
dadab5e9 268 error = VOP_OPEN(vp, FREAD | FWRITE, cred, td);
41a01a4d 269 VOP_UNLOCK(vp, NULL, 0, td);
984263bc
MD
270 if (error)
271 return (error);
272
e4c9c0c8
MD
273 /*
274 * v_rdev is not valid until after the VOP_OPEN() call. dev_psize()
275 * must be supported if a character device has been specified.
276 */
277 if (vp->v_type == VCHR)
278 dev = vp->v_rdev;
279 else
280 dev = NODEV;
281
335dda38 282 if (nblks == 0 && dev != NODEV && ((nblks = dev_dpsize(dev)) == -1)) {
3b568787 283 (void) VOP_CLOSE(vp, FREAD | FWRITE, td);
984263bc
MD
284 return (ENXIO);
285 }
286 if (nblks == 0) {
3b568787 287 (void) VOP_CLOSE(vp, FREAD | FWRITE, td);
984263bc
MD
288 return (ENXIO);
289 }
290
291 /*
292 * If we go beyond this, we get overflows in the radix
293 * tree bitmap code.
294 */
295 if (nblks > 0x40000000 / BLIST_META_RADIX / nswdev) {
296 printf("exceeded maximum of %d blocks per swap unit\n",
297 0x40000000 / BLIST_META_RADIX / nswdev);
3b568787 298 (void) VOP_CLOSE(vp, FREAD | FWRITE, td);
984263bc
MD
299 return (ENXIO);
300 }
301 /*
302 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
303 * First chop nblks off to page-align it, then convert.
304 *
305 * sw->sw_nblks is in page-sized chunks now too.
306 */
307 nblks &= ~(ctodb(1) - 1);
308 nblks = dbtoc(nblks);
309
310 sp->sw_vp = vp;
311 sp->sw_dev = dev2udev(dev);
312 sp->sw_device = dev;
313 sp->sw_flags |= SW_FREED;
314 sp->sw_nblks = nblks;
315
316 /*
317 * nblks, nswap, and dmmax are PAGE_SIZE'd parameters now, not
318 * DEV_BSIZE'd. aligned_nblks is used to calculate the
319 * size of the swap bitmap, taking into account the stripe size.
320 */
321 aligned_nblks = (nblks + (dmmax - 1)) & ~(u_long)(dmmax - 1);
322
323 if (aligned_nblks * nswdev > nswap)
324 nswap = aligned_nblks * nswdev;
325
326 if (swapblist == NULL)
327 swapblist = blist_create(nswap);
328 else
329 blist_resize(&swapblist, nswap, 0);
330
331 for (dvbase = dmmax; dvbase < nblks; dvbase += dmmax) {
332 blk = min(nblks - dvbase, dmmax);
333 vsbase = index * dmmax + dvbase * nswdev;
334 blist_free(swapblist, vsbase, blk);
335 vm_swap_size += blk;
336 }
337
338 return (0);
339}