proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / vfs / umapfs / umap_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software donated to Berkeley by
6 * the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)umap_vnops.c 8.6 (Berkeley) 5/22/95
37 * $FreeBSD: src/sys/miscfs/umapfs/umap_vnops.c,v 1.30 1999/08/30 07:08:04 bde Exp $
dadab5e9 38 * $DragonFly: src/sys/vfs/umapfs/Attic/umap_vnops.c,v 1.3 2003/06/25 03:56:01 dillon Exp $
984263bc
MD
39 */
40
41/*
42 * Umap Layer
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/sysctl.h>
49#include <sys/vnode.h>
50#include <sys/mount.h>
dadab5e9 51#include <sys/proc.h>
984263bc
MD
52#include <sys/namei.h>
53#include <sys/malloc.h>
54#include <sys/buf.h>
55#include <miscfs/umapfs/umap.h>
56#include <miscfs/nullfs/null.h>
57
58static int umap_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
59SYSCTL_INT(_debug, OID_AUTO, umapfs_bug_bypass, CTLFLAG_RW,
60 &umap_bug_bypass, 0, "");
61
62static int umap_bypass __P((struct vop_generic_args *ap));
63static int umap_getattr __P((struct vop_getattr_args *ap));
64static int umap_inactive __P((struct vop_inactive_args *ap));
65static int umap_lock __P((struct vop_lock_args *ap));
66static int umap_print __P((struct vop_print_args *ap));
67static int umap_reclaim __P((struct vop_reclaim_args *ap));
68static int umap_rename __P((struct vop_rename_args *ap));
69static int umap_unlock __P((struct vop_unlock_args *ap));
70
71/*
72 * This is the 10-Apr-92 bypass routine.
73 * See null_vnops.c:null_bypass for more details.
74 */
75static int
76umap_bypass(ap)
77 struct vop_generic_args /* {
78 struct vnodeop_desc *a_desc;
79 <other random data follows, presumably>
80 } */ *ap;
81{
82 struct ucred **credpp = 0, *credp = 0;
83 struct ucred *savecredp = 0, *savecompcredp = 0;
84 struct ucred *compcredp = 0;
85 struct vnode **this_vp_p;
86 int error;
87 struct vnode *old_vps[VDESC_MAX_VPS];
88 struct vnode *vp1 = 0;
89 struct vnode **vps_p[VDESC_MAX_VPS];
90 struct vnode ***vppp;
91 struct vnodeop_desc *descp = ap->a_desc;
92 int reles, i;
93 struct componentname **compnamepp = 0;
94
95 if (umap_bug_bypass)
96 printf ("umap_bypass: %s\n", descp->vdesc_name);
97
98#ifdef DIAGNOSTIC
99 /*
100 * We require at least one vp.
101 */
102 if (descp->vdesc_vp_offsets == NULL ||
103 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
104 panic ("umap_bypass: no vp's in map");
105#endif
106
107 /*
108 * Map the vnodes going in.
109 * Later, we'll invoke the operation based on
110 * the first mapped vnode's operation vector.
111 */
112 reles = descp->vdesc_flags;
113 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
114 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
115 break; /* bail out at end of list */
116 vps_p[i] = this_vp_p =
117 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
118
119 if (i == 0) {
120 vp1 = *vps_p[0];
121 }
122
123 /*
124 * We're not guaranteed that any but the first vnode
125 * are of our type. Check for and don't map any
126 * that aren't. (Must map first vp or vclean fails.)
127 */
128
129 if (i && (*this_vp_p)->v_op != umap_vnodeop_p) {
130 old_vps[i] = NULL;
131 } else {
132 old_vps[i] = *this_vp_p;
133 *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
134 if (reles & 1)
135 VREF(*this_vp_p);
136 }
137
138 }
139
140 /*
141 * Fix the credentials. (That's the purpose of this layer.)
142 */
143
144 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
145
146 credpp = VOPARG_OFFSETTO(struct ucred**,
147 descp->vdesc_cred_offset, ap);
148
149 /* Save old values */
150
151 savecredp = (*credpp);
152 if (savecredp != NOCRED)
153 (*credpp) = crdup(savecredp);
154 credp = *credpp;
155
156 if (umap_bug_bypass && credp->cr_uid != 0)
157 printf("umap_bypass: user was %lu, group %lu\n",
158 (u_long)credp->cr_uid, (u_long)credp->cr_gid);
159
160 /* Map all ids in the credential structure. */
161
162 umap_mapids(vp1->v_mount, credp);
163
164 if (umap_bug_bypass && credp->cr_uid != 0)
165 printf("umap_bypass: user now %lu, group %lu\n",
166 (u_long)credp->cr_uid, (u_long)credp->cr_gid);
167 }
168
169 /* BSD often keeps a credential in the componentname structure
170 * for speed. If there is one, it better get mapped, too.
171 */
172
173 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
174
175 compnamepp = VOPARG_OFFSETTO(struct componentname**,
176 descp->vdesc_componentname_offset, ap);
177
178 compcredp = (*compnamepp)->cn_cred;
179 savecompcredp = compcredp;
180 if (savecompcredp != NOCRED)
181 (*compnamepp)->cn_cred = crdup(savecompcredp);
182 compcredp = (*compnamepp)->cn_cred;
183
184 if (umap_bug_bypass && compcredp->cr_uid != 0)
185 printf(
186 "umap_bypass: component credit user was %lu, group %lu\n",
187 (u_long)compcredp->cr_uid,
188 (u_long)compcredp->cr_gid);
189
190 /* Map all ids in the credential structure. */
191
192 umap_mapids(vp1->v_mount, compcredp);
193
194 if (umap_bug_bypass && compcredp->cr_uid != 0)
195 printf(
196 "umap_bypass: component credit user now %lu, group %lu\n",
197 (u_long)compcredp->cr_uid,
198 (u_long)compcredp->cr_gid);
199 }
200
201 /*
202 * Call the operation on the lower layer
203 * with the modified argument structure.
204 */
205 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
206
207 /*
208 * Maintain the illusion of call-by-value
209 * by restoring vnodes in the argument structure
210 * to their original value.
211 */
212 reles = descp->vdesc_flags;
213 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
214 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
215 break; /* bail out at end of list */
216 if (old_vps[i]) {
217 *(vps_p[i]) = old_vps[i];
218 if (reles & 1)
219 vrele(*(vps_p[i]));
220 };
221 };
222
223 /*
224 * Map the possible out-going vpp
225 * (Assumes that the lower layer always returns
226 * a VREF'ed vpp unless it gets an error.)
227 */
228 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
229 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
230 !error) {
231 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
232 goto out;
233 vppp = VOPARG_OFFSETTO(struct vnode***,
234 descp->vdesc_vpp_offset, ap);
235 if (*vppp)
236 error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
237 };
238
239 out:
240 /*
241 * Free duplicate cred structure and restore old one.
242 */
243 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
244 if (umap_bug_bypass && credp && credp->cr_uid != 0)
245 printf("umap_bypass: returning-user was %lu\n",
246 (u_long)credp->cr_uid);
247
248 if (savecredp != NOCRED) {
249 crfree(credp);
250 (*credpp) = savecredp;
251 if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
252 printf(
253 "umap_bypass: returning-user now %lu\n\n",
254 (u_long)(*credpp)->cr_uid);
255 }
256 }
257
258 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
259 if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0)
260 printf(
261 "umap_bypass: returning-component-user was %lu\n",
262 (u_long)compcredp->cr_uid);
263
264 if (savecompcredp != NOCRED) {
265 crfree(compcredp);
266 (*compnamepp)->cn_cred = savecompcredp;
267 if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
268 printf(
269 "umap_bypass: returning-component-user now %lu\n",
270 (u_long)compcredp->cr_uid);
271 }
272 }
273
274 return (error);
275}
276
277
278/*
279 * We handle getattr to change the fsid.
280 */
281static int
282umap_getattr(ap)
283 struct vop_getattr_args /* {
284 struct vnode *a_vp;
285 struct vattr *a_vap;
286 struct ucred *a_cred;
dadab5e9 287 struct thread *a_td;
984263bc
MD
288 } */ *ap;
289{
290 short uid, gid;
291 int error, tmpid, nentries, gnentries;
292 u_long (*mapdata)[2], (*gmapdata)[2];
293 struct vnode **vp1p;
294 struct vnodeop_desc *descp = ap->a_desc;
295
296 error = umap_bypass((struct vop_generic_args *)ap);
297 if (error)
298 return (error);
299
300 /*
301 * Umap needs to map the uid and gid returned by a stat
302 * into the proper values for this site. This involves
303 * finding the returned uid in the mapping information,
304 * translating it into the uid on the other end,
305 * and filling in the proper field in the vattr
306 * structure pointed to by ap->a_vap. The group
307 * is easier, since currently all groups will be
308 * translate to the NULLGROUP.
309 */
310
311 /* Find entry in map */
312
313 uid = ap->a_vap->va_uid;
314 gid = ap->a_vap->va_gid;
315 if (umap_bug_bypass)
316 printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
317 gid);
318
319 vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
320 nentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
321 mapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
322 gnentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
323 gmapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
324
325 /* Reverse map the uid for the vnode. Since it's a reverse
326 map, we can't use umap_mapids() to do it. */
327
328 tmpid = umap_reverse_findid(uid, mapdata, nentries);
329
330 if (tmpid != -1) {
331
332 ap->a_vap->va_uid = (uid_t) tmpid;
333 if (umap_bug_bypass)
334 printf("umap_getattr: original uid = %d\n", uid);
335 } else
336 ap->a_vap->va_uid = (uid_t) NOBODY;
337
338 /* Reverse map the gid for the vnode. */
339
340 tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
341
342 if (tmpid != -1) {
343
344 ap->a_vap->va_gid = (gid_t) tmpid;
345 if (umap_bug_bypass)
346 printf("umap_getattr: original gid = %d\n", gid);
347 } else
348 ap->a_vap->va_gid = (gid_t) NULLGROUP;
349
350 return (0);
351}
352
353/*
354 * We need to process our own vnode lock and then clear the
355 * interlock flag as it applies only to our vnode, not the
356 * vnodes below us on the stack.
357 */
358static int
359umap_lock(ap)
360 struct vop_lock_args /* {
361 struct vnode *a_vp;
362 int a_flags;
dadab5e9 363 struct thread *a_td;
984263bc
MD
364 } */ *ap;
365{
366
367 vop_nolock(ap);
368 if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
369 return (0);
370 ap->a_flags &= ~LK_INTERLOCK;
371 return (null_bypass((struct vop_generic_args *)ap));
372}
373
374/*
375 * We need to process our own vnode unlock and then clear the
376 * interlock flag as it applies only to our vnode, not the
377 * vnodes below us on the stack.
378 */
379int
380umap_unlock(ap)
381 struct vop_unlock_args /* {
382 struct vnode *a_vp;
383 int a_flags;
dadab5e9 384 struct thread *a_td;
984263bc
MD
385 } */ *ap;
386{
387 vop_nounlock(ap);
388 ap->a_flags &= ~LK_INTERLOCK;
389 return (null_bypass((struct vop_generic_args *)ap));
390}
391
392static int
393umap_inactive(ap)
394 struct vop_inactive_args /* {
395 struct vnode *a_vp;
dadab5e9 396 struct thread *a_td;
984263bc
MD
397 } */ *ap;
398{
399 struct vnode *vp = ap->a_vp;
400 struct umap_node *xp = VTOUMAP(vp);
401 struct vnode *lowervp = xp->umap_lowervp;
402 /*
403 * Do nothing (and _don't_ bypass).
404 * Wait to vrele lowervp until reclaim,
405 * so that until then our umap_node is in the
406 * cache and reusable.
407 *
408 */
dadab5e9
MD
409 VOP_INACTIVE(lowervp, ap->a_td);
410 VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
984263bc
MD
411 return (0);
412}
413
414static int
415umap_reclaim(ap)
416 struct vop_reclaim_args /* {
417 struct vnode *a_vp;
418 } */ *ap;
419{
420 struct vnode *vp = ap->a_vp;
421 struct umap_node *xp = VTOUMAP(vp);
422 struct vnode *lowervp = xp->umap_lowervp;
423
424 /* After this assignment, this node will not be re-used. */
425 xp->umap_lowervp = NULL;
426 LIST_REMOVE(xp, umap_hash);
427 FREE(vp->v_data, M_TEMP);
428 vp->v_data = NULL;
429 vrele(lowervp);
430 return (0);
431}
432
433static int
434umap_print(ap)
435 struct vop_print_args /* {
436 struct vnode *a_vp;
437 } */ *ap;
438{
439 struct vnode *vp = ap->a_vp;
440 printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp, UMAPVPTOLOWERVP(vp));
441 return (0);
442}
443
444static int
445umap_rename(ap)
446 struct vop_rename_args /* {
447 struct vnode *a_fdvp;
448 struct vnode *a_fvp;
449 struct componentname *a_fcnp;
450 struct vnode *a_tdvp;
451 struct vnode *a_tvp;
452 struct componentname *a_tcnp;
453 } */ *ap;
454{
455 int error;
456 struct componentname *compnamep;
457 struct ucred *compcredp, *savecompcredp;
458 struct vnode *vp;
459
460 /*
461 * Rename is irregular, having two componentname structures.
462 * We need to map the cre in the second structure,
463 * and then bypass takes care of the rest.
464 */
465
466 vp = ap->a_fdvp;
467 compnamep = ap->a_tcnp;
468 compcredp = compnamep->cn_cred;
469
470 savecompcredp = compcredp;
471 compcredp = compnamep->cn_cred = crdup(savecompcredp);
472
473 if (umap_bug_bypass && compcredp->cr_uid != 0)
474 printf(
475 "umap_rename: rename component credit user was %lu, group %lu\n",
476 (u_long)compcredp->cr_uid, (u_long)compcredp->cr_gid);
477
478 /* Map all ids in the credential structure. */
479
480 umap_mapids(vp->v_mount, compcredp);
481
482 if (umap_bug_bypass && compcredp->cr_uid != 0)
483 printf(
484 "umap_rename: rename component credit user now %lu, group %lu\n",
485 (u_long)compcredp->cr_uid, (u_long)compcredp->cr_gid);
486
487 error = umap_bypass((struct vop_generic_args *)ap);
488
489 /* Restore the additional mapped componentname cred structure. */
490
491 crfree(compcredp);
492 compnamep->cn_cred = savecompcredp;
493
494 return error;
495}
496
497/*
498 * Global vfs data structures
499 */
500/*
501 * XXX - strategy, bwrite are hand coded currently. They should
502 * go away with a merged buffer/block cache.
503 *
504 */
505vop_t **umap_vnodeop_p;
506static struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
507 { &vop_default_desc, (vop_t *) umap_bypass },
508 { &vop_getattr_desc, (vop_t *) umap_getattr },
509 { &vop_inactive_desc, (vop_t *) umap_inactive },
510 { &vop_lock_desc, (vop_t *) umap_lock },
511 { &vop_print_desc, (vop_t *) umap_print },
512 { &vop_reclaim_desc, (vop_t *) umap_reclaim },
513 { &vop_rename_desc, (vop_t *) umap_rename },
514 { &vop_unlock_desc, (vop_t *) umap_unlock },
515 { NULL, NULL }
516};
517static struct vnodeopv_desc umap_vnodeop_opv_desc =
518 { &umap_vnodeop_p, umap_vnodeop_entries };
519
520VNODEOP_SET(umap_vnodeop_opv_desc);