In contrast to FreeBSD 4 and 5, our slab allocator does hand out cross-page
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
4d17b298 38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.20 2004/04/08 22:32:14 dillon Exp $
984263bc
MD
39 */
40
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/buf.h>
55#include <sys/malloc.h>
56#include <sys/mbuf.h>
57#include <sys/namei.h>
58#include <sys/socket.h>
59#include <sys/vnode.h>
60#include <sys/dirent.h>
61#include <sys/fcntl.h>
62#include <sys/lockf.h>
63#include <sys/stat.h>
64#include <sys/sysctl.h>
65#include <sys/conf.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69#include <vm/vm_zone.h>
70
3020e3be
MD
71#include <sys/buf2.h>
72
1f2de5d4 73#include <vfs/fifofs/fifo.h>
984263bc 74
1f2de5d4
MD
75#include "rpcv2.h"
76#include "nfsproto.h"
77#include "nfs.h"
1f2de5d4 78#include "nfsmount.h"
c1cf1e59 79#include "nfsnode.h"
1f2de5d4
MD
80#include "xdr_subs.h"
81#include "nfsm_subs.h"
82#include "nqnfs.h"
984263bc
MD
83
84#include <net/if.h>
85#include <netinet/in.h>
86#include <netinet/in_var.h>
87
88/* Defs */
89#define TRUE 1
90#define FALSE 0
91
92/*
93 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
94 * calls are not in getblk() and brelse() so that they would not be necessary
95 * here.
96 */
97#ifndef B_VMIO
98#define vfs_busy_pages(bp, f)
99#endif
100
a6ee311a
RG
101static int nfsspec_read (struct vop_read_args *);
102static int nfsspec_write (struct vop_write_args *);
103static int nfsfifo_read (struct vop_read_args *);
104static int nfsfifo_write (struct vop_write_args *);
105static int nfsspec_close (struct vop_close_args *);
106static int nfsfifo_close (struct vop_close_args *);
984263bc 107#define nfs_poll vop_nopoll
a6ee311a
RG
108static int nfs_flush (struct vnode *,int,struct thread *,int);
109static int nfs_setattrrpc (struct vnode *,struct vattr *,struct ucred *,struct thread *);
110static int nfs_lookup (struct vop_lookup_args *);
111static int nfs_create (struct vop_create_args *);
112static int nfs_mknod (struct vop_mknod_args *);
113static int nfs_open (struct vop_open_args *);
114static int nfs_close (struct vop_close_args *);
115static int nfs_access (struct vop_access_args *);
116static int nfs_getattr (struct vop_getattr_args *);
117static int nfs_setattr (struct vop_setattr_args *);
118static int nfs_read (struct vop_read_args *);
119static int nfs_mmap (struct vop_mmap_args *);
120static int nfs_fsync (struct vop_fsync_args *);
121static int nfs_remove (struct vop_remove_args *);
122static int nfs_link (struct vop_link_args *);
123static int nfs_rename (struct vop_rename_args *);
124static int nfs_mkdir (struct vop_mkdir_args *);
125static int nfs_rmdir (struct vop_rmdir_args *);
126static int nfs_symlink (struct vop_symlink_args *);
127static int nfs_readdir (struct vop_readdir_args *);
128static int nfs_bmap (struct vop_bmap_args *);
129static int nfs_strategy (struct vop_strategy_args *);
130static int nfs_lookitup (struct vnode *, const char *, int,
131 struct ucred *, struct thread *, struct nfsnode **);
132static int nfs_sillyrename (struct vnode *,struct vnode *,struct componentname *);
133static int nfsspec_access (struct vop_access_args *);
134static int nfs_readlink (struct vop_readlink_args *);
135static int nfs_print (struct vop_print_args *);
136static int nfs_advlock (struct vop_advlock_args *);
137static int nfs_bwrite (struct vop_bwrite_args *);
984263bc
MD
138/*
139 * Global vfs data structures for nfs
140 */
141vop_t **nfsv2_vnodeop_p;
142static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
143 { &vop_default_desc, (vop_t *) vop_defaultop },
144 { &vop_access_desc, (vop_t *) nfs_access },
145 { &vop_advlock_desc, (vop_t *) nfs_advlock },
146 { &vop_bmap_desc, (vop_t *) nfs_bmap },
147 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
148 { &vop_close_desc, (vop_t *) nfs_close },
149 { &vop_create_desc, (vop_t *) nfs_create },
150 { &vop_fsync_desc, (vop_t *) nfs_fsync },
151 { &vop_getattr_desc, (vop_t *) nfs_getattr },
152 { &vop_getpages_desc, (vop_t *) nfs_getpages },
153 { &vop_putpages_desc, (vop_t *) nfs_putpages },
154 { &vop_inactive_desc, (vop_t *) nfs_inactive },
155 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
156 { &vop_lease_desc, (vop_t *) vop_null },
157 { &vop_link_desc, (vop_t *) nfs_link },
158 { &vop_lock_desc, (vop_t *) vop_sharedlock },
159 { &vop_lookup_desc, (vop_t *) nfs_lookup },
160 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
161 { &vop_mknod_desc, (vop_t *) nfs_mknod },
162 { &vop_mmap_desc, (vop_t *) nfs_mmap },
163 { &vop_open_desc, (vop_t *) nfs_open },
164 { &vop_poll_desc, (vop_t *) nfs_poll },
165 { &vop_print_desc, (vop_t *) nfs_print },
166 { &vop_read_desc, (vop_t *) nfs_read },
167 { &vop_readdir_desc, (vop_t *) nfs_readdir },
168 { &vop_readlink_desc, (vop_t *) nfs_readlink },
169 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
170 { &vop_remove_desc, (vop_t *) nfs_remove },
171 { &vop_rename_desc, (vop_t *) nfs_rename },
172 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
173 { &vop_setattr_desc, (vop_t *) nfs_setattr },
174 { &vop_strategy_desc, (vop_t *) nfs_strategy },
175 { &vop_symlink_desc, (vop_t *) nfs_symlink },
176 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
177 { &vop_write_desc, (vop_t *) nfs_write },
178 { NULL, NULL }
179};
180static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
181 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
182VNODEOP_SET(nfsv2_vnodeop_opv_desc);
183
184/*
185 * Special device vnode ops
186 */
187vop_t **spec_nfsv2nodeop_p;
188static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
189 { &vop_default_desc, (vop_t *) spec_vnoperate },
190 { &vop_access_desc, (vop_t *) nfsspec_access },
191 { &vop_close_desc, (vop_t *) nfsspec_close },
192 { &vop_fsync_desc, (vop_t *) nfs_fsync },
193 { &vop_getattr_desc, (vop_t *) nfs_getattr },
194 { &vop_inactive_desc, (vop_t *) nfs_inactive },
195 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
196 { &vop_lock_desc, (vop_t *) vop_sharedlock },
197 { &vop_print_desc, (vop_t *) nfs_print },
198 { &vop_read_desc, (vop_t *) nfsspec_read },
199 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
200 { &vop_setattr_desc, (vop_t *) nfs_setattr },
201 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
202 { &vop_write_desc, (vop_t *) nfsspec_write },
203 { NULL, NULL }
204};
205static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
206 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
207VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
208
209vop_t **fifo_nfsv2nodeop_p;
210static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
211 { &vop_default_desc, (vop_t *) fifo_vnoperate },
212 { &vop_access_desc, (vop_t *) nfsspec_access },
213 { &vop_close_desc, (vop_t *) nfsfifo_close },
214 { &vop_fsync_desc, (vop_t *) nfs_fsync },
215 { &vop_getattr_desc, (vop_t *) nfs_getattr },
216 { &vop_inactive_desc, (vop_t *) nfs_inactive },
217 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
218 { &vop_lock_desc, (vop_t *) vop_sharedlock },
219 { &vop_print_desc, (vop_t *) nfs_print },
220 { &vop_read_desc, (vop_t *) nfsfifo_read },
221 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
222 { &vop_setattr_desc, (vop_t *) nfs_setattr },
223 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
224 { &vop_write_desc, (vop_t *) nfsfifo_write },
225 { NULL, NULL }
226};
227static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
228 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
229VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
230
a6ee311a 231static int nfs_mknodrpc (struct vnode *dvp, struct vnode **vpp,
984263bc 232 struct componentname *cnp,
a6ee311a
RG
233 struct vattr *vap);
234static int nfs_removerpc (struct vnode *dvp, const char *name,
984263bc 235 int namelen,
a6ee311a
RG
236 struct ucred *cred, struct thread *td);
237static int nfs_renamerpc (struct vnode *fdvp, const char *fnameptr,
984263bc
MD
238 int fnamelen, struct vnode *tdvp,
239 const char *tnameptr, int tnamelen,
a6ee311a
RG
240 struct ucred *cred, struct thread *td);
241static int nfs_renameit (struct vnode *sdvp,
984263bc 242 struct componentname *scnp,
a6ee311a 243 struct sillyrename *sp);
984263bc
MD
244
245/*
246 * Global variables
247 */
248extern u_int32_t nfs_true, nfs_false;
249extern u_int32_t nfs_xdrneg1;
250extern struct nfsstats nfsstats;
251extern nfstype nfsv3_type[9];
dadab5e9 252struct thread *nfs_iodwant[NFS_MAXASYNCDAEMON];
984263bc
MD
253struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
254int nfs_numasync = 0;
255#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
256
257SYSCTL_DECL(_vfs_nfs);
258
259static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
260SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
261 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
262
4d17b298
MD
263static int nfsneg_cache_timeout = NFS_MINATTRTIMO;
264SYSCTL_INT(_vfs_nfs, OID_AUTO, neg_cache_timeout, CTLFLAG_RW,
265 &nfsneg_cache_timeout, 0, "NFS NEGATIVE ACCESS cache timeout");
266
984263bc
MD
267static int nfsv3_commit_on_close = 0;
268SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
269 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
270#if 0
271SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
272 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
273
274SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
275 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
276#endif
277
278#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
279 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
280 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
281static int
dadab5e9
MD
282nfs3_access_otw(struct vnode *vp, int wmode,
283 struct thread *td, struct ucred *cred)
984263bc
MD
284{
285 const int v3 = 1;
286 u_int32_t *tl;
287 int error = 0, attrflag;
288
289 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
290 caddr_t bpos, dpos, cp2;
40393ded
RG
291 int32_t t1, t2;
292 caddr_t cp;
984263bc
MD
293 u_int32_t rmode;
294 struct nfsnode *np = VTONFS(vp);
295
296 nfsstats.rpccnt[NFSPROC_ACCESS]++;
297 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
298 nfsm_fhtom(vp, v3);
299 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
300 *tl = txdr_unsigned(wmode);
dadab5e9 301 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
984263bc
MD
302 nfsm_postop_attr(vp, attrflag);
303 if (!error) {
304 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
305 rmode = fxdr_unsigned(u_int32_t, *tl);
306 np->n_mode = rmode;
307 np->n_modeuid = cred->cr_uid;
3a6f9faf 308 np->n_modestamp = mycpu->gd_time_seconds;
984263bc 309 }
6b08710e
MD
310 m_freem(mrep);
311nfsmout:
984263bc
MD
312 return error;
313}
314
315/*
316 * nfs access vnode op.
317 * For nfs version 2, just return ok. File accesses may fail later.
318 * For nfs version 3, use the access rpc to check accessibility. If file modes
319 * are changed on the server, accesses might still fail later.
320 */
321static int
322nfs_access(ap)
323 struct vop_access_args /* {
324 struct vnode *a_vp;
325 int a_mode;
326 struct ucred *a_cred;
dadab5e9 327 struct thread *a_td;
984263bc
MD
328 } */ *ap;
329{
40393ded 330 struct vnode *vp = ap->a_vp;
984263bc
MD
331 int error = 0;
332 u_int32_t mode, wmode;
333 int v3 = NFS_ISV3(vp);
334 struct nfsnode *np = VTONFS(vp);
335
336 /*
337 * Disallow write attempts on filesystems mounted read-only;
338 * unless the file is a socket, fifo, or a block or character
339 * device resident on the filesystem.
340 */
341 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
342 switch (vp->v_type) {
343 case VREG:
344 case VDIR:
345 case VLNK:
346 return (EROFS);
347 default:
348 break;
349 }
350 }
351 /*
352 * For nfs v3, check to see if we have done this recently, and if
353 * so return our cached result instead of making an ACCESS call.
354 * If not, do an access rpc, otherwise you are stuck emulating
355 * ufs_access() locally using the vattr. This may not be correct,
356 * since the server may apply other access criteria such as
357 * client uid-->server uid mapping that we do not know about.
358 */
359 if (v3) {
360 if (ap->a_mode & VREAD)
361 mode = NFSV3ACCESS_READ;
362 else
363 mode = 0;
364 if (vp->v_type != VDIR) {
365 if (ap->a_mode & VWRITE)
366 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
367 if (ap->a_mode & VEXEC)
368 mode |= NFSV3ACCESS_EXECUTE;
369 } else {
370 if (ap->a_mode & VWRITE)
371 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
372 NFSV3ACCESS_DELETE);
373 if (ap->a_mode & VEXEC)
374 mode |= NFSV3ACCESS_LOOKUP;
375 }
376 /* XXX safety belt, only make blanket request if caching */
377 if (nfsaccess_cache_timeout > 0) {
378 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
379 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
380 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
381 } else {
382 wmode = mode;
383 }
384
385 /*
386 * Does our cached result allow us to give a definite yes to
387 * this request?
388 */
3a6f9faf 389 if ((mycpu->gd_time_seconds < (np->n_modestamp + nfsaccess_cache_timeout)) &&
984263bc
MD
390 (ap->a_cred->cr_uid == np->n_modeuid) &&
391 ((np->n_mode & mode) == mode)) {
392 nfsstats.accesscache_hits++;
393 } else {
394 /*
395 * Either a no, or a don't know. Go to the wire.
396 */
397 nfsstats.accesscache_misses++;
dadab5e9 398 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
984263bc
MD
399 if (!error) {
400 if ((np->n_mode & mode) != mode) {
401 error = EACCES;
402 }
403 }
404 }
984263bc
MD
405 } else {
406 if ((error = nfsspec_access(ap)) != 0)
407 return (error);
408
409 /*
410 * Attempt to prevent a mapped root from accessing a file
411 * which it shouldn't. We try to read a byte from the file
412 * if the user is root and the file is not zero length.
413 * After calling nfsspec_access, we should have the correct
414 * file size cached.
415 */
416 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
417 && VTONFS(vp)->n_size > 0) {
418 struct iovec aiov;
419 struct uio auio;
420 char buf[1];
421
422 aiov.iov_base = buf;
423 aiov.iov_len = 1;
424 auio.uio_iov = &aiov;
425 auio.uio_iovcnt = 1;
426 auio.uio_offset = 0;
427 auio.uio_resid = 1;
428 auio.uio_segflg = UIO_SYSSPACE;
429 auio.uio_rw = UIO_READ;
dadab5e9 430 auio.uio_td = ap->a_td;
984263bc 431
c1cf1e59 432 if (vp->v_type == VREG) {
3b568787 433 error = nfs_readrpc(vp, &auio);
c1cf1e59 434 } else if (vp->v_type == VDIR) {
984263bc
MD
435 char* bp;
436 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
437 aiov.iov_base = bp;
438 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
3b568787 439 error = nfs_readdirrpc(vp, &auio);
984263bc 440 free(bp, M_TEMP);
c1cf1e59 441 } else if (vp->v_type == VLNK) {
3b568787 442 error = nfs_readlinkrpc(vp, &auio);
c1cf1e59 443 } else {
984263bc 444 error = EACCES;
c1cf1e59 445 }
984263bc 446 }
984263bc 447 }
c1cf1e59
MD
448 /*
449 * [re]record creds for reading and/or writing if access
09b1ee9b
MD
450 * was granted. Assume the NFS server will grant read access
451 * for execute requests.
c1cf1e59
MD
452 */
453 if (error == 0) {
09b1ee9b 454 if ((ap->a_mode & (VREAD|VEXEC)) && ap->a_cred != np->n_rucred) {
c1cf1e59
MD
455 crhold(ap->a_cred);
456 if (np->n_rucred)
457 crfree(np->n_rucred);
458 np->n_rucred = ap->a_cred;
459 }
460 if ((ap->a_mode & VWRITE) && ap->a_cred != np->n_wucred) {
461 crhold(ap->a_cred);
462 if (np->n_wucred)
463 crfree(np->n_wucred);
464 np->n_wucred = ap->a_cred;
465 }
466 }
467 return(error);
984263bc
MD
468}
469
470/*
471 * nfs open vnode op
472 * Check to see if the type is ok
473 * and that deletion is not in progress.
474 * For paged in text files, you will need to flush the page cache
475 * if consistency is lost.
476 */
477/* ARGSUSED */
478static int
479nfs_open(ap)
480 struct vop_open_args /* {
481 struct vnode *a_vp;
482 int a_mode;
483 struct ucred *a_cred;
dadab5e9 484 struct thread *a_td;
984263bc
MD
485 } */ *ap;
486{
40393ded 487 struct vnode *vp = ap->a_vp;
984263bc
MD
488 struct nfsnode *np = VTONFS(vp);
489 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
490 struct vattr vattr;
491 int error;
492
493 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
494#ifdef DIAGNOSTIC
495 printf("open eacces vtyp=%d\n",vp->v_type);
496#endif
ca3a2b2f 497 return (EOPNOTSUPP);
984263bc
MD
498 }
499 /*
500 * Get a valid lease. If cached data is stale, flush it.
501 */
502 if (nmp->nm_flag & NFSMNT_NQNFS) {
503 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
504 do {
3b568787 505 error = nqnfs_getlease(vp, ND_READ, ap->a_td);
984263bc
MD
506 } while (error == NQNFS_EXPIRED);
507 if (error)
508 return (error);
509 if (np->n_lrev != np->n_brev ||
510 (np->n_flag & NQNFSNONCACHE)) {
3b568787
MD
511 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1))
512 == EINTR) {
984263bc 513 return (error);
3b568787 514 }
984263bc
MD
515 np->n_brev = np->n_lrev;
516 }
517 }
518 } else {
519 if (np->n_flag & NMODIFIED) {
3b568787
MD
520 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1))
521 == EINTR) {
984263bc 522 return (error);
3b568787 523 }
984263bc
MD
524 np->n_attrstamp = 0;
525 if (vp->v_type == VDIR)
526 np->n_direofoffset = 0;
3b568787 527 error = VOP_GETATTR(vp, &vattr, ap->a_td);
984263bc
MD
528 if (error)
529 return (error);
530 np->n_mtime = vattr.va_mtime.tv_sec;
531 } else {
3b568787 532 error = VOP_GETATTR(vp, &vattr, ap->a_td);
984263bc
MD
533 if (error)
534 return (error);
535 if (np->n_mtime != vattr.va_mtime.tv_sec) {
536 if (vp->v_type == VDIR)
537 np->n_direofoffset = 0;
538 if ((error = nfs_vinvalbuf(vp, V_SAVE,
3b568787 539 ap->a_td, 1)) == EINTR) {
984263bc 540 return (error);
3b568787 541 }
984263bc
MD
542 np->n_mtime = vattr.va_mtime.tv_sec;
543 }
544 }
545 }
546 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
547 np->n_attrstamp = 0; /* For Open/Close consistency */
548 return (0);
549}
550
551/*
552 * nfs close vnode op
553 * What an NFS client should do upon close after writing is a debatable issue.
554 * Most NFS clients push delayed writes to the server upon close, basically for
555 * two reasons:
556 * 1 - So that any write errors may be reported back to the client process
557 * doing the close system call. By far the two most likely errors are
558 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
559 * 2 - To put a worst case upper bound on cache inconsistency between
560 * multiple clients for the file.
561 * There is also a consistency problem for Version 2 of the protocol w.r.t.
562 * not being able to tell if other clients are writing a file concurrently,
563 * since there is no way of knowing if the changed modify time in the reply
564 * is only due to the write for this client.
565 * (NFS Version 3 provides weak cache consistency data in the reply that
566 * should be sufficient to detect and handle this case.)
567 *
568 * The current code does the following:
569 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
570 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
571 * or commit them (this satisfies 1 and 2 except for the
572 * case where the server crashes after this close but
573 * before the commit RPC, which is felt to be "good
574 * enough". Changing the last argument to nfs_flush() to
575 * a 1 would force a commit operation, if it is felt a
576 * commit is necessary now.
577 * for NQNFS - do nothing now, since 2 is dealt with via leases and
578 * 1 should be dealt with via an fsync() system call for
579 * cases where write errors are important.
580 */
581/* ARGSUSED */
582static int
583nfs_close(ap)
584 struct vop_close_args /* {
585 struct vnodeop_desc *a_desc;
586 struct vnode *a_vp;
587 int a_fflag;
588 struct ucred *a_cred;
dadab5e9 589 struct thread *a_td;
984263bc
MD
590 } */ *ap;
591{
40393ded
RG
592 struct vnode *vp = ap->a_vp;
593 struct nfsnode *np = VTONFS(vp);
984263bc
MD
594 int error = 0;
595
596 if (vp->v_type == VREG) {
597 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
598 (np->n_flag & NMODIFIED)) {
599 if (NFS_ISV3(vp)) {
600 /*
601 * Under NFSv3 we have dirty buffers to dispose of. We
602 * must flush them to the NFS server. We have the option
603 * of waiting all the way through the commit rpc or just
604 * waiting for the initial write. The default is to only
605 * wait through the initial write so the data is in the
606 * server's cache, which is roughly similar to the state
607 * a standard disk subsystem leaves the file in on close().
608 *
609 * We cannot clear the NMODIFIED bit in np->n_flag due to
610 * potential races with other processes, and certainly
611 * cannot clear it if we don't commit.
612 */
613 int cm = nfsv3_commit_on_close ? 1 : 0;
3b568787 614 error = nfs_flush(vp, MNT_WAIT, ap->a_td, cm);
984263bc
MD
615 /* np->n_flag &= ~NMODIFIED; */
616 } else {
3b568787 617 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
984263bc
MD
618 }
619 np->n_attrstamp = 0;
620 }
621 if (np->n_flag & NWRITEERR) {
622 np->n_flag &= ~NWRITEERR;
623 error = np->n_error;
624 }
625 }
626 return (error);
627}
628
629/*
630 * nfs getattr call from vfs.
631 */
632static int
633nfs_getattr(ap)
634 struct vop_getattr_args /* {
635 struct vnode *a_vp;
636 struct vattr *a_vap;
637 struct ucred *a_cred;
dadab5e9 638 struct thread *a_td;
984263bc
MD
639 } */ *ap;
640{
40393ded
RG
641 struct vnode *vp = ap->a_vp;
642 struct nfsnode *np = VTONFS(vp);
643 caddr_t cp;
644 u_int32_t *tl;
645 int32_t t1, t2;
984263bc
MD
646 caddr_t bpos, dpos;
647 int error = 0;
648 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
649 int v3 = NFS_ISV3(vp);
650
651 /*
652 * Update local times for special files.
653 */
654 if (np->n_flag & (NACC | NUPD))
655 np->n_flag |= NCHG;
656 /*
657 * First look in the cache.
658 */
659 if (nfs_getattrcache(vp, ap->a_vap) == 0)
660 return (0);
661
662 if (v3 && nfsaccess_cache_timeout > 0) {
663 nfsstats.accesscache_misses++;
c1cf1e59 664 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
665 if (nfs_getattrcache(vp, ap->a_vap) == 0)
666 return (0);
667 }
668
669 nfsstats.rpccnt[NFSPROC_GETATTR]++;
670 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
671 nfsm_fhtom(vp, v3);
c1cf1e59 672 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
673 if (!error) {
674 nfsm_loadattr(vp, ap->a_vap);
675 }
6b08710e
MD
676 m_freem(mrep);
677nfsmout:
984263bc
MD
678 return (error);
679}
680
681/*
682 * nfs setattr call.
683 */
684static int
685nfs_setattr(ap)
686 struct vop_setattr_args /* {
687 struct vnodeop_desc *a_desc;
688 struct vnode *a_vp;
689 struct vattr *a_vap;
690 struct ucred *a_cred;
dadab5e9 691 struct thread *a_td;
984263bc
MD
692 } */ *ap;
693{
40393ded
RG
694 struct vnode *vp = ap->a_vp;
695 struct nfsnode *np = VTONFS(vp);
696 struct vattr *vap = ap->a_vap;
984263bc
MD
697 int error = 0;
698 u_quad_t tsize;
699
700#ifndef nolint
701 tsize = (u_quad_t)0;
702#endif
703
704 /*
705 * Setting of flags is not supported.
706 */
707 if (vap->va_flags != VNOVAL)
708 return (EOPNOTSUPP);
709
710 /*
711 * Disallow write attempts if the filesystem is mounted read-only.
712 */
713 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
714 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
715 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
716 (vp->v_mount->mnt_flag & MNT_RDONLY))
717 return (EROFS);
718 if (vap->va_size != VNOVAL) {
719 switch (vp->v_type) {
720 case VDIR:
721 return (EISDIR);
722 case VCHR:
723 case VBLK:
724 case VSOCK:
725 case VFIFO:
726 if (vap->va_mtime.tv_sec == VNOVAL &&
727 vap->va_atime.tv_sec == VNOVAL &&
728 vap->va_mode == (mode_t)VNOVAL &&
729 vap->va_uid == (uid_t)VNOVAL &&
730 vap->va_gid == (gid_t)VNOVAL)
731 return (0);
732 vap->va_size = VNOVAL;
733 break;
734 default:
735 /*
736 * Disallow write attempts if the filesystem is
737 * mounted read-only.
738 */
739 if (vp->v_mount->mnt_flag & MNT_RDONLY)
740 return (EROFS);
741
742 /*
743 * We run vnode_pager_setsize() early (why?),
744 * we must set np->n_size now to avoid vinvalbuf
745 * V_SAVE races that might setsize a lower
746 * value.
747 */
748
749 tsize = np->n_size;
3b568787 750 error = nfs_meta_setsize(vp, ap->a_td, vap->va_size);
984263bc
MD
751
752 if (np->n_flag & NMODIFIED) {
753 if (vap->va_size == 0)
3b568787 754 error = nfs_vinvalbuf(vp, 0, ap->a_td, 1);
984263bc 755 else
3b568787 756 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
984263bc
MD
757 if (error) {
758 np->n_size = tsize;
759 vnode_pager_setsize(vp, np->n_size);
760 return (error);
761 }
762 }
b07fc55c
DR
763 /* np->n_size has already been set to vap->va_size
764 * in nfs_meta_setsize(). We must set it again since
765 * nfs_loadattrcache() could be called through
766 * nfs_meta_setsize() and could modify np->n_size.
767 */
768 np->n_vattr.va_size = np->n_size = vap->va_size;
769 };
984263bc
MD
770 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
771 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
772 vp->v_type == VREG &&
3b568787 773 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1)) == EINTR)
984263bc 774 return (error);
dadab5e9 775 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
984263bc
MD
776 if (error && vap->va_size != VNOVAL) {
777 np->n_size = np->n_vattr.va_size = tsize;
778 vnode_pager_setsize(vp, np->n_size);
779 }
780 return (error);
781}
782
783/*
784 * Do an nfs setattr rpc.
785 */
786static int
dadab5e9
MD
787nfs_setattrrpc(struct vnode *vp, struct vattr *vap,
788 struct ucred *cred, struct thread *td)
984263bc 789{
40393ded
RG
790 struct nfsv2_sattr *sp;
791 caddr_t cp;
792 int32_t t1, t2;
984263bc
MD
793 caddr_t bpos, dpos, cp2;
794 u_int32_t *tl;
795 int error = 0, wccflag = NFSV3_WCCRATTR;
796 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
797 int v3 = NFS_ISV3(vp);
798
799 nfsstats.rpccnt[NFSPROC_SETATTR]++;
800 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
801 nfsm_fhtom(vp, v3);
802 if (v3) {
803 nfsm_v3attrbuild(vap, TRUE);
804 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
805 *tl = nfs_false;
806 } else {
807 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
808 if (vap->va_mode == (mode_t)VNOVAL)
809 sp->sa_mode = nfs_xdrneg1;
810 else
811 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
812 if (vap->va_uid == (uid_t)VNOVAL)
813 sp->sa_uid = nfs_xdrneg1;
814 else
815 sp->sa_uid = txdr_unsigned(vap->va_uid);
816 if (vap->va_gid == (gid_t)VNOVAL)
817 sp->sa_gid = nfs_xdrneg1;
818 else
819 sp->sa_gid = txdr_unsigned(vap->va_gid);
820 sp->sa_size = txdr_unsigned(vap->va_size);
821 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
822 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
823 }
dadab5e9 824 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
984263bc
MD
825 if (v3) {
826 nfsm_wcc_data(vp, wccflag);
827 } else
828 nfsm_loadattr(vp, (struct vattr *)0);
6b08710e
MD
829 m_freem(mrep);
830nfsmout:
984263bc
MD
831 return (error);
832}
833
834/*
4d17b298 835 * 'cached' nfs directory lookup
984263bc
MD
836 */
837static int
838nfs_lookup(ap)
839 struct vop_lookup_args /* {
840 struct vnodeop_desc *a_desc;
841 struct vnode *a_dvp;
842 struct vnode **a_vpp;
843 struct componentname *a_cnp;
844 } */ *ap;
845{
846 struct componentname *cnp = ap->a_cnp;
847 struct vnode *dvp = ap->a_dvp;
848 struct vnode **vpp = ap->a_vpp;
849 int flags = cnp->cn_flags;
850 struct vnode *newvp;
851 u_int32_t *tl;
852 caddr_t cp;
853 int32_t t1, t2;
854 struct nfsmount *nmp;
855 caddr_t bpos, dpos, cp2;
856 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
857 long len;
858 nfsfh_t *fhp;
859 struct nfsnode *np;
860 int lockparent, wantparent, error = 0, attrflag, fhsize;
861 int v3 = NFS_ISV3(dvp);
dadab5e9 862 struct thread *td = cnp->cn_td;
984263bc 863
4d17b298
MD
864 /*
865 * Read-only mount check and directory check.
866 */
984263bc 867 *vpp = NULLVP;
2b69e610
MD
868 if ((flags & CNP_ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
869 (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME))
984263bc 870 return (EROFS);
4d17b298 871
984263bc
MD
872 if (dvp->v_type != VDIR)
873 return (ENOTDIR);
4d17b298
MD
874
875 /*
876 * Look it up in the cache. Note that ENOENT is only returned if we
877 * previously entered a negative hit (see later on). The additional
878 * nfsneg_cache_timeout check causes previously cached results to
879 * be instantly ignored if the negative caching is turned off.
880 */
2b69e610
MD
881 lockparent = flags & CNP_LOCKPARENT;
882 wantparent = flags & (CNP_LOCKPARENT|CNP_WANTPARENT);
984263bc
MD
883 nmp = VFSTONFS(dvp->v_mount);
884 np = VTONFS(dvp);
4d17b298
MD
885 error = cache_lookup(dvp, NCPNULL, vpp, NCPPNULL, cnp);
886 if (error != 0) {
984263bc
MD
887 struct vattr vattr;
888 int vpid;
889
4d17b298
MD
890 if (error == ENOENT && nfsneg_cache_timeout) {
891 *vpp = NULLVP;
892 return (error);
893 }
dadab5e9 894 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
984263bc
MD
895 *vpp = NULLVP;
896 return (error);
897 }
898
899 newvp = *vpp;
900 vpid = newvp->v_id;
901 /*
902 * See the comment starting `Step through' in ufs/ufs_lookup.c
903 * for an explanation of the locking protocol
904 */
905 if (dvp == newvp) {
906 VREF(newvp);
907 error = 0;
2b69e610 908 } else if (flags & CNP_ISDOTDOT) {
41a01a4d
MD
909 VOP_UNLOCK(dvp, NULL, 0, td);
910 error = vget(newvp, NULL, LK_EXCLUSIVE, td);
2b69e610 911 if (!error && lockparent && (flags & CNP_ISLASTCN))
41a01a4d 912 error = vn_lock(dvp, NULL, LK_EXCLUSIVE, td);
984263bc 913 } else {
41a01a4d 914 error = vget(newvp, NULL, LK_EXCLUSIVE, td);
2b69e610 915 if (!lockparent || error || !(flags & CNP_ISLASTCN))
41a01a4d 916 VOP_UNLOCK(dvp, NULL, 0, td);
984263bc
MD
917 }
918 if (!error) {
919 if (vpid == newvp->v_id) {
3b568787 920 if (!VOP_GETATTR(newvp, &vattr, td)
984263bc
MD
921 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
922 nfsstats.lookupcache_hits++;
2b69e610
MD
923 if (cnp->cn_nameiop != NAMEI_LOOKUP &&
924 (flags & CNP_ISLASTCN))
925 cnp->cn_flags |= CNP_SAVENAME;
984263bc
MD
926 return (0);
927 }
928 cache_purge(newvp);
929 }
930 vput(newvp);
2b69e610 931 if (lockparent && dvp != newvp && (flags & CNP_ISLASTCN))
41a01a4d 932 VOP_UNLOCK(dvp, NULL, 0, td);
984263bc 933 }
41a01a4d 934 error = vn_lock(dvp, NULL, LK_EXCLUSIVE, td);
984263bc
MD
935 *vpp = NULLVP;
936 if (error)
937 return (error);
938 }
4d17b298
MD
939
940 /*
941 * Cache miss, go the wire.
942 */
984263bc
MD
943 error = 0;
944 newvp = NULLVP;
945 nfsstats.lookupcache_misses++;
946 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
947 len = cnp->cn_namelen;
948 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
949 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
950 nfsm_fhtom(dvp, v3);
951 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
dadab5e9 952 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_td, cnp->cn_cred);
984263bc 953 if (error) {
4d17b298
MD
954 /*
955 * Cache negatve lookups to reduce NFS traffic, but use
956 * a fast timeout.
957 */
958 if (error == ENOENT &&
959 (cnp->cn_flags & CNP_MAKEENTRY) &&
960 cnp->cn_nameiop == NAMEI_LOOKUP &&
961 nfsneg_cache_timeout) {
962 int toval = nfsneg_cache_timeout * hz;
963 if (cnp->cn_flags & CNP_CACHETIMEOUT) {
964 if (cnp->cn_timeout > toval)
965 cnp->cn_timeout = toval;
966 } else {
967 cnp->cn_flags |= CNP_CACHETIMEOUT;
968 cnp->cn_timeout = toval;
969 }
970 cache_enter(dvp, NCPNULL, NULL, cnp);
971 }
984263bc
MD
972 nfsm_postop_attr(dvp, attrflag);
973 m_freem(mrep);
974 goto nfsmout;
975 }
976 nfsm_getfh(fhp, fhsize, v3);
977
978 /*
979 * Handle RENAME case...
980 */
2b69e610 981 if (cnp->cn_nameiop == NAMEI_RENAME && wantparent && (flags & CNP_ISLASTCN)) {
984263bc
MD
982 if (NFS_CMPFH(np, fhp, fhsize)) {
983 m_freem(mrep);
984 return (EISDIR);
985 }
986 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
987 if (error) {
988 m_freem(mrep);
989 return (error);
990 }
991 newvp = NFSTOV(np);
992 if (v3) {
993 nfsm_postop_attr(newvp, attrflag);
994 nfsm_postop_attr(dvp, attrflag);
995 } else
996 nfsm_loadattr(newvp, (struct vattr *)0);
997 *vpp = newvp;
998 m_freem(mrep);
2b69e610 999 cnp->cn_flags |= CNP_SAVENAME;
984263bc 1000 if (!lockparent)
41a01a4d 1001 VOP_UNLOCK(dvp, NULL, 0, td);
984263bc
MD
1002 return (0);
1003 }
1004
2b69e610 1005 if (flags & CNP_ISDOTDOT) {
41a01a4d 1006 VOP_UNLOCK(dvp, NULL, 0, td);
984263bc
MD
1007 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1008 if (error) {
41a01a4d 1009 vn_lock(dvp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
1010 return (error);
1011 }
1012 newvp = NFSTOV(np);
2b69e610 1013 if (lockparent && (flags & CNP_ISLASTCN) &&
41a01a4d 1014 (error = vn_lock(dvp, NULL, LK_EXCLUSIVE, td))) {
984263bc
MD
1015 vput(newvp);
1016 return (error);
1017 }
1018 } else if (NFS_CMPFH(np, fhp, fhsize)) {
1019 VREF(dvp);
1020 newvp = dvp;
1021 } else {
1022 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1023 if (error) {
1024 m_freem(mrep);
1025 return (error);
1026 }
2b69e610 1027 if (!lockparent || !(flags & CNP_ISLASTCN))
41a01a4d 1028 VOP_UNLOCK(dvp, NULL, 0, td);
984263bc
MD
1029 newvp = NFSTOV(np);
1030 }
1031 if (v3) {
1032 nfsm_postop_attr(newvp, attrflag);
1033 nfsm_postop_attr(dvp, attrflag);
1034 } else
1035 nfsm_loadattr(newvp, (struct vattr *)0);
2b69e610
MD
1036 if (cnp->cn_nameiop != NAMEI_LOOKUP && (flags & CNP_ISLASTCN))
1037 cnp->cn_flags |= CNP_SAVENAME;
1038 if ((cnp->cn_flags & CNP_MAKEENTRY) &&
1039 (cnp->cn_nameiop != NAMEI_DELETE || !(flags & CNP_ISLASTCN))) {
984263bc 1040 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
bc0c094e 1041 cache_enter(dvp, NCPNULL, newvp, cnp);
984263bc
MD
1042 }
1043 *vpp = newvp;
6b08710e
MD
1044 m_freem(mrep);
1045nfsmout:
984263bc
MD
1046 if (error) {
1047 if (newvp != NULLVP) {
1048 vrele(newvp);
1049 *vpp = NULLVP;
1050 }
2b69e610
MD
1051 if ((cnp->cn_nameiop == NAMEI_CREATE || cnp->cn_nameiop == NAMEI_RENAME) &&
1052 (flags & CNP_ISLASTCN) && error == ENOENT) {
984263bc 1053 if (!lockparent)
41a01a4d 1054 VOP_UNLOCK(dvp, NULL, 0, td);
984263bc
MD
1055 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
1056 error = EROFS;
1057 else
1058 error = EJUSTRETURN;
1059 }
2b69e610
MD
1060 if (cnp->cn_nameiop != NAMEI_LOOKUP && (flags & CNP_ISLASTCN))
1061 cnp->cn_flags |= CNP_SAVENAME;
984263bc
MD
1062 }
1063 return (error);
1064}
1065
1066/*
1067 * nfs read call.
1068 * Just call nfs_bioread() to do the work.
1069 */
1070static int
1071nfs_read(ap)
1072 struct vop_read_args /* {
1073 struct vnode *a_vp;
1074 struct uio *a_uio;
1075 int a_ioflag;
1076 struct ucred *a_cred;
1077 } */ *ap;
1078{
40393ded 1079 struct vnode *vp = ap->a_vp;
984263bc 1080
3b568787 1081 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag));
ca3a2b2f
HP
1082 switch (vp->v_type) {
1083 case VREG:
1084 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag));
1085 case VDIR:
1086 return (EISDIR);
1087 default:
1088 return EOPNOTSUPP;
1089 }
984263bc
MD
1090}
1091
1092/*
1093 * nfs readlink call
1094 */
1095static int
1096nfs_readlink(ap)
1097 struct vop_readlink_args /* {
1098 struct vnode *a_vp;
1099 struct uio *a_uio;
1100 struct ucred *a_cred;
1101 } */ *ap;
1102{
40393ded 1103 struct vnode *vp = ap->a_vp;
984263bc
MD
1104
1105 if (vp->v_type != VLNK)
1106 return (EINVAL);
3b568787 1107 return (nfs_bioread(vp, ap->a_uio, 0));
984263bc
MD
1108}
1109
1110/*
1111 * Do a readlink rpc.
1112 * Called by nfs_doio() from below the buffer cache.
1113 */
1114int
3b568787 1115nfs_readlinkrpc(struct vnode *vp, struct uio *uiop)
984263bc 1116{
40393ded
RG
1117 u_int32_t *tl;
1118 caddr_t cp;
1119 int32_t t1, t2;
984263bc
MD
1120 caddr_t bpos, dpos, cp2;
1121 int error = 0, len, attrflag;
1122 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1123 int v3 = NFS_ISV3(vp);
1124
1125 nfsstats.rpccnt[NFSPROC_READLINK]++;
1126 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1127 nfsm_fhtom(vp, v3);
c1cf1e59 1128 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
1129 if (v3)
1130 nfsm_postop_attr(vp, attrflag);
1131 if (!error) {
1132 nfsm_strsiz(len, NFS_MAXPATHLEN);
1133 if (len == NFS_MAXPATHLEN) {
1134 struct nfsnode *np = VTONFS(vp);
1135 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1136 len = np->n_size;
1137 }
1138 nfsm_mtouio(uiop, len);
1139 }
6b08710e
MD
1140 m_freem(mrep);
1141nfsmout:
984263bc
MD
1142 return (error);
1143}
1144
1145/*
1146 * nfs read rpc call
1147 * Ditto above
1148 */
1149int
3b568787 1150nfs_readrpc(struct vnode *vp, struct uio *uiop)
984263bc 1151{
40393ded
RG
1152 u_int32_t *tl;
1153 caddr_t cp;
1154 int32_t t1, t2;
984263bc
MD
1155 caddr_t bpos, dpos, cp2;
1156 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1157 struct nfsmount *nmp;
1158 int error = 0, len, retlen, tsiz, eof, attrflag;
1159 int v3 = NFS_ISV3(vp);
1160
1161#ifndef nolint
1162 eof = 0;
1163#endif
1164 nmp = VFSTONFS(vp->v_mount);
1165 tsiz = uiop->uio_resid;
1166 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1167 return (EFBIG);
1168 while (tsiz > 0) {
1169 nfsstats.rpccnt[NFSPROC_READ]++;
1170 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1171 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1172 nfsm_fhtom(vp, v3);
1173 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1174 if (v3) {
1175 txdr_hyper(uiop->uio_offset, tl);
1176 *(tl + 2) = txdr_unsigned(len);
1177 } else {
1178 *tl++ = txdr_unsigned(uiop->uio_offset);
1179 *tl++ = txdr_unsigned(len);
1180 *tl = 0;
1181 }
c1cf1e59 1182 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc
MD
1183 if (v3) {
1184 nfsm_postop_attr(vp, attrflag);
1185 if (error) {
1186 m_freem(mrep);
1187 goto nfsmout;
1188 }
1189 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1190 eof = fxdr_unsigned(int, *(tl + 1));
1191 } else
1192 nfsm_loadattr(vp, (struct vattr *)0);
1193 nfsm_strsiz(retlen, nmp->nm_rsize);
1194 nfsm_mtouio(uiop, retlen);
1195 m_freem(mrep);
1196 tsiz -= retlen;
1197 if (v3) {
1198 if (eof || retlen == 0) {
1199 tsiz = 0;
1200 }
1201 } else if (retlen < len) {
1202 tsiz = 0;
1203 }
1204 }
1205nfsmout:
1206 return (error);
1207}
1208
1209/*
1210 * nfs write call
1211 */
1212int
3b568787 1213nfs_writerpc(vp, uiop, iomode, must_commit)
40393ded
RG
1214 struct vnode *vp;
1215 struct uio *uiop;
984263bc
MD
1216 int *iomode, *must_commit;
1217{
40393ded
RG
1218 u_int32_t *tl;
1219 caddr_t cp;
1220 int32_t t1, t2, backup;
984263bc
MD
1221 caddr_t bpos, dpos, cp2;
1222 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1223 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1224 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1225 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1226
1227#ifndef DIAGNOSTIC
1228 if (uiop->uio_iovcnt != 1)
1229 panic("nfs: writerpc iovcnt > 1");
1230#endif
1231 *must_commit = 0;
1232 tsiz = uiop->uio_resid;
1233 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1234 return (EFBIG);
1235 while (tsiz > 0) {
1236 nfsstats.rpccnt[NFSPROC_WRITE]++;
1237 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1238 nfsm_reqhead(vp, NFSPROC_WRITE,
1239 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1240 nfsm_fhtom(vp, v3);
1241 if (v3) {
1242 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1243 txdr_hyper(uiop->uio_offset, tl);
1244 tl += 2;
1245 *tl++ = txdr_unsigned(len);
1246 *tl++ = txdr_unsigned(*iomode);
1247 *tl = txdr_unsigned(len);
1248 } else {
40393ded 1249 u_int32_t x;
984263bc
MD
1250
1251 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1252 /* Set both "begin" and "current" to non-garbage. */
1253 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1254 *tl++ = x; /* "begin offset" */
1255 *tl++ = x; /* "current offset" */
1256 x = txdr_unsigned(len);
1257 *tl++ = x; /* total to this offset */
1258 *tl = x; /* size of this write */
1259 }
1260 nfsm_uiotom(uiop, len);
c1cf1e59 1261 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, nfs_vpcred(vp, ND_WRITE));
984263bc
MD
1262 if (v3) {
1263 wccflag = NFSV3_WCCCHK;
1264 nfsm_wcc_data(vp, wccflag);
1265 if (!error) {
1266 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1267 + NFSX_V3WRITEVERF);
1268 rlen = fxdr_unsigned(int, *tl++);
1269 if (rlen == 0) {
1270 error = NFSERR_IO;
1271 m_freem(mrep);
1272 break;
1273 } else if (rlen < len) {
1274 backup = len - rlen;
1275 uiop->uio_iov->iov_base -= backup;
1276 uiop->uio_iov->iov_len += backup;
1277 uiop->uio_offset -= backup;
1278 uiop->uio_resid += backup;
1279 len = rlen;
1280 }
1281 commit = fxdr_unsigned(int, *tl++);
1282
1283 /*
1284 * Return the lowest committment level
1285 * obtained by any of the RPCs.
1286 */
1287 if (committed == NFSV3WRITE_FILESYNC)
1288 committed = commit;
1289 else if (committed == NFSV3WRITE_DATASYNC &&
1290 commit == NFSV3WRITE_UNSTABLE)
1291 committed = commit;
1292 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1293 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1294 NFSX_V3WRITEVERF);
1295 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1296 } else if (bcmp((caddr_t)tl,
1297 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1298 *must_commit = 1;
1299 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1300 NFSX_V3WRITEVERF);
1301 }
1302 }
1303 } else
1304 nfsm_loadattr(vp, (struct vattr *)0);
1305 if (wccflag)
1306 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1307 m_freem(mrep);
1308 if (error)
1309 break;
1310 tsiz -= len;
1311 }
1312nfsmout:
1313 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1314 committed = NFSV3WRITE_FILESYNC;
1315 *iomode = committed;
1316 if (error)
1317 uiop->uio_resid = tsiz;
1318 return (error);
1319}
1320
1321/*
1322 * nfs mknod rpc
1323 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1324 * mode set to specify the file type and the size field for rdev.
1325 */
1326static int
1327nfs_mknodrpc(dvp, vpp, cnp, vap)
40393ded
RG
1328 struct vnode *dvp;
1329 struct vnode **vpp;
1330 struct componentname *cnp;
1331 struct vattr *vap;
984263bc 1332{
40393ded
RG
1333 struct nfsv2_sattr *sp;
1334 u_int32_t *tl;
1335 caddr_t cp;
1336 int32_t t1, t2;
984263bc
MD
1337 struct vnode *newvp = (struct vnode *)0;
1338 struct nfsnode *np = (struct nfsnode *)0;
1339 struct vattr vattr;
1340 char *cp2;
1341 caddr_t bpos, dpos;
1342 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1343 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1344 u_int32_t rdev;
1345 int v3 = NFS_ISV3(dvp);
1346
1347 if (vap->va_type == VCHR || vap->va_type == VBLK)
1348 rdev = txdr_unsigned(vap->va_rdev);
1349 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1350 rdev = nfs_xdrneg1;
1351 else {
1352 return (EOPNOTSUPP);
1353 }
3b568787 1354 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1355 return (error);
1356 }
1357 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1358 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1359 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1360 nfsm_fhtom(dvp, v3);
1361 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1362 if (v3) {
1363 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1364 *tl++ = vtonfsv3_type(vap->va_type);
1365 nfsm_v3attrbuild(vap, FALSE);
1366 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1367 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1368 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1369 *tl = txdr_unsigned(uminor(vap->va_rdev));
1370 }
1371 } else {
1372 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1373 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1374 sp->sa_uid = nfs_xdrneg1;
1375 sp->sa_gid = nfs_xdrneg1;
1376 sp->sa_size = rdev;
1377 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1378 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1379 }
dadab5e9 1380 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1381 if (!error) {
1382 nfsm_mtofh(dvp, newvp, v3, gotvp);
1383 if (!gotvp) {
1384 if (newvp) {
1385 vput(newvp);
1386 newvp = (struct vnode *)0;
1387 }
1388 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1389 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1390 if (!error)
1391 newvp = NFSTOV(np);
1392 }
1393 }
1394 if (v3)
1395 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
1396 m_freem(mrep);
1397nfsmout:
984263bc
MD
1398 if (error) {
1399 if (newvp)
1400 vput(newvp);
1401 } else {
2b69e610 1402 if (cnp->cn_flags & CNP_MAKEENTRY)
bc0c094e 1403 cache_enter(dvp, NCPNULL, newvp, cnp);
984263bc
MD
1404 *vpp = newvp;
1405 }
1406 VTONFS(dvp)->n_flag |= NMODIFIED;
1407 if (!wccflag)
1408 VTONFS(dvp)->n_attrstamp = 0;
1409 return (error);
1410}
1411
1412/*
1413 * nfs mknod vop
1414 * just call nfs_mknodrpc() to do the work.
1415 */
1416/* ARGSUSED */
1417static int
1418nfs_mknod(ap)
1419 struct vop_mknod_args /* {
1420 struct vnode *a_dvp;
1421 struct vnode **a_vpp;
1422 struct componentname *a_cnp;
1423 struct vattr *a_vap;
1424 } */ *ap;
1425{
1426 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1427}
1428
1429static u_long create_verf;
1430/*
1431 * nfs file create call
1432 */
1433static int
1434nfs_create(ap)
1435 struct vop_create_args /* {
1436 struct vnode *a_dvp;
1437 struct vnode **a_vpp;
1438 struct componentname *a_cnp;
1439 struct vattr *a_vap;
1440 } */ *ap;
1441{
40393ded
RG
1442 struct vnode *dvp = ap->a_dvp;
1443 struct vattr *vap = ap->a_vap;
1444 struct componentname *cnp = ap->a_cnp;
1445 struct nfsv2_sattr *sp;
1446 u_int32_t *tl;
1447 caddr_t cp;
1448 int32_t t1, t2;
984263bc
MD
1449 struct nfsnode *np = (struct nfsnode *)0;
1450 struct vnode *newvp = (struct vnode *)0;
1451 caddr_t bpos, dpos, cp2;
1452 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1453 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1454 struct vattr vattr;
1455 int v3 = NFS_ISV3(dvp);
1456
1457 /*
1458 * Oops, not for me..
1459 */
1460 if (vap->va_type == VSOCK)
1461 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1462
3b568787 1463 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1464 return (error);
1465 }
1466 if (vap->va_vaflags & VA_EXCLUSIVE)
1467 fmode |= O_EXCL;
1468again:
1469 nfsstats.rpccnt[NFSPROC_CREATE]++;
1470 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1471 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1472 nfsm_fhtom(dvp, v3);
1473 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1474 if (v3) {
1475 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1476 if (fmode & O_EXCL) {
1477 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1478 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1479#ifdef INET
1480 if (!TAILQ_EMPTY(&in_ifaddrhead))
1481 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1482 else
1483#endif
1484 *tl++ = create_verf;
1485 *tl = ++create_verf;
1486 } else {
1487 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1488 nfsm_v3attrbuild(vap, FALSE);
1489 }
1490 } else {
1491 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1492 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1493 sp->sa_uid = nfs_xdrneg1;
1494 sp->sa_gid = nfs_xdrneg1;
1495 sp->sa_size = 0;
1496 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1497 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1498 }
dadab5e9 1499 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1500 if (!error) {
1501 nfsm_mtofh(dvp, newvp, v3, gotvp);
1502 if (!gotvp) {
1503 if (newvp) {
1504 vput(newvp);
1505 newvp = (struct vnode *)0;
1506 }
1507 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1508 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1509 if (!error)
1510 newvp = NFSTOV(np);
1511 }
1512 }
1513 if (v3)
1514 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
1515 m_freem(mrep);
1516nfsmout:
984263bc
MD
1517 if (error) {
1518 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1519 fmode &= ~O_EXCL;
1520 goto again;
1521 }
1522 if (newvp)
1523 vput(newvp);
1524 } else if (v3 && (fmode & O_EXCL)) {
1525 /*
1526 * We are normally called with only a partially initialized
1527 * VAP. Since the NFSv3 spec says that server may use the
1528 * file attributes to store the verifier, the spec requires
1529 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1530 * in atime, but we can't really assume that all servers will
1531 * so we ensure that our SETATTR sets both atime and mtime.
1532 */
1533 if (vap->va_mtime.tv_sec == VNOVAL)
1534 vfs_timestamp(&vap->va_mtime);
1535 if (vap->va_atime.tv_sec == VNOVAL)
1536 vap->va_atime = vap->va_mtime;
dadab5e9 1537 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1538 }
1539 if (!error) {
2b69e610 1540 if (cnp->cn_flags & CNP_MAKEENTRY)
bc0c094e 1541 cache_enter(dvp, NCPNULL, newvp, cnp);
c1cf1e59
MD
1542 /*
1543 * The new np may have enough info for access
1544 * checks, make sure rucred and wucred are
1545 * initialized for read and write rpc's.
1546 */
1547 np = VTONFS(newvp);
1548 if (np->n_rucred == NULL)
1549 np->n_rucred = crhold(cnp->cn_cred);
1550 if (np->n_wucred == NULL)
1551 np->n_wucred = crhold(cnp->cn_cred);
984263bc
MD
1552 *ap->a_vpp = newvp;
1553 }
1554 VTONFS(dvp)->n_flag |= NMODIFIED;
1555 if (!wccflag)
1556 VTONFS(dvp)->n_attrstamp = 0;
1557 return (error);
1558}
1559
1560/*
1561 * nfs file remove call
1562 * To try and make nfs semantics closer to ufs semantics, a file that has
1563 * other processes using the vnode is renamed instead of removed and then
1564 * removed later on the last close.
1565 * - If v_usecount > 1
1566 * If a rename is not already in the works
1567 * call nfs_sillyrename() to set it up
1568 * else
1569 * do the remove rpc
1570 */
1571static int
1572nfs_remove(ap)
1573 struct vop_remove_args /* {
1574 struct vnodeop_desc *a_desc;
1575 struct vnode * a_dvp;
1576 struct vnode * a_vp;
1577 struct componentname * a_cnp;
1578 } */ *ap;
1579{
40393ded
RG
1580 struct vnode *vp = ap->a_vp;
1581 struct vnode *dvp = ap->a_dvp;
1582 struct componentname *cnp = ap->a_cnp;
1583 struct nfsnode *np = VTONFS(vp);
984263bc
MD
1584 int error = 0;
1585 struct vattr vattr;
1586
1587#ifndef DIAGNOSTIC
2b69e610 1588 if ((cnp->cn_flags & CNP_HASBUF) == 0)
984263bc
MD
1589 panic("nfs_remove: no name");
1590 if (vp->v_usecount < 1)
1591 panic("nfs_remove: bad v_usecount");
1592#endif
1593 if (vp->v_type == VDIR)
1594 error = EPERM;
1595 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
3b568787 1596 VOP_GETATTR(vp, &vattr, cnp->cn_td) == 0 &&
984263bc
MD
1597 vattr.va_nlink > 1)) {
1598 /*
1599 * Purge the name cache so that the chance of a lookup for
1600 * the name succeeding while the remove is in progress is
1601 * minimized. Without node locking it can still happen, such
1602 * that an I/O op returns ESTALE, but since you get this if
1603 * another host removes the file..
1604 */
1605 cache_purge(vp);
1606 /*
1607 * throw away biocache buffers, mainly to avoid
1608 * unnecessary delayed writes later.
1609 */
3b568787 1610 error = nfs_vinvalbuf(vp, 0, cnp->cn_td, 1);
984263bc
MD
1611 /* Do the rpc */
1612 if (error != EINTR)
1613 error = nfs_removerpc(dvp, cnp->cn_nameptr,
dadab5e9 1614 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1615 /*
1616 * Kludge City: If the first reply to the remove rpc is lost..
1617 * the reply to the retransmitted request will be ENOENT
1618 * since the file was in fact removed
1619 * Therefore, we cheat and return success.
1620 */
1621 if (error == ENOENT)
1622 error = 0;
1623 } else if (!np->n_sillyrename)
1624 error = nfs_sillyrename(dvp, vp, cnp);
1625 np->n_attrstamp = 0;
1626 return (error);
1627}
1628
1629/*
1630 * nfs file remove rpc called from nfs_inactive
1631 */
1632int
dadab5e9 1633nfs_removeit(struct sillyrename *sp)
984263bc
MD
1634{
1635
dadab5e9
MD
1636 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen,
1637 sp->s_cred, NULL));
984263bc
MD
1638}
1639
1640/*
1641 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1642 */
1643static int
dadab5e9
MD
1644nfs_removerpc(dvp, name, namelen, cred, td)
1645 struct vnode *dvp;
984263bc
MD
1646 const char *name;
1647 int namelen;
1648 struct ucred *cred;
dadab5e9 1649 struct thread *td;
984263bc 1650{
40393ded
RG
1651 u_int32_t *tl;
1652 caddr_t cp;
1653 int32_t t1, t2;
984263bc
MD
1654 caddr_t bpos, dpos, cp2;
1655 int error = 0, wccflag = NFSV3_WCCRATTR;
1656 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1657 int v3 = NFS_ISV3(dvp);
1658
1659 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1660 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1661 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1662 nfsm_fhtom(dvp, v3);
1663 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
dadab5e9 1664 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
984263bc
MD
1665 if (v3)
1666 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
1667 m_freem(mrep);
1668nfsmout:
984263bc
MD
1669 VTONFS(dvp)->n_flag |= NMODIFIED;
1670 if (!wccflag)
1671 VTONFS(dvp)->n_attrstamp = 0;
1672 return (error);
1673}
1674
1675/*
1676 * nfs file rename call
1677 */
1678static int
1679nfs_rename(ap)
1680 struct vop_rename_args /* {
1681 struct vnode *a_fdvp;
1682 struct vnode *a_fvp;
1683 struct componentname *a_fcnp;
1684 struct vnode *a_tdvp;
1685 struct vnode *a_tvp;
1686 struct componentname *a_tcnp;
1687 } */ *ap;
1688{
40393ded
RG
1689 struct vnode *fvp = ap->a_fvp;
1690 struct vnode *tvp = ap->a_tvp;
1691 struct vnode *fdvp = ap->a_fdvp;
1692 struct vnode *tdvp = ap->a_tdvp;
1693 struct componentname *tcnp = ap->a_tcnp;
1694 struct componentname *fcnp = ap->a_fcnp;
984263bc
MD
1695 int error;
1696
1697#ifndef DIAGNOSTIC
2b69e610
MD
1698 if ((tcnp->cn_flags & CNP_HASBUF) == 0 ||
1699 (fcnp->cn_flags & CNP_HASBUF) == 0)
984263bc
MD
1700 panic("nfs_rename: no name");
1701#endif
1702 /* Check for cross-device rename */
1703 if ((fvp->v_mount != tdvp->v_mount) ||
1704 (tvp && (fvp->v_mount != tvp->v_mount))) {
1705 error = EXDEV;
1706 goto out;
1707 }
1708
1709 /*
1710 * We have to flush B_DELWRI data prior to renaming
1711 * the file. If we don't, the delayed-write buffers
1712 * can be flushed out later after the file has gone stale
1713 * under NFSV3. NFSV2 does not have this problem because
1714 * ( as far as I can tell ) it flushes dirty buffers more
1715 * often.
1716 */
1717
3b568787 1718 VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_td);
984263bc 1719 if (tvp)
3b568787 1720 VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_td);
984263bc
MD
1721
1722 /*
1723 * If the tvp exists and is in use, sillyrename it before doing the
1724 * rename of the new file over it.
1725 * XXX Can't sillyrename a directory.
1726 */
1727 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1728 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1729 vput(tvp);
1730 tvp = NULL;
1731 }
1732
1733 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1734 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
dadab5e9 1735 tcnp->cn_td);
984263bc
MD
1736
1737 if (fvp->v_type == VDIR) {
1738 if (tvp != NULL && tvp->v_type == VDIR)
1739 cache_purge(tdvp);
1740 cache_purge(fdvp);
1741 }
1742
1743out:
1744 if (tdvp == tvp)
1745 vrele(tdvp);
1746 else
1747 vput(tdvp);
1748 if (tvp)
1749 vput(tvp);
1750 vrele(fdvp);
1751 vrele(fvp);
1752 /*
1753 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1754 */
1755 if (error == ENOENT)
1756 error = 0;
1757 return (error);
1758}
1759
1760/*
1761 * nfs file rename rpc called from nfs_remove() above
1762 */
1763static int
1764nfs_renameit(sdvp, scnp, sp)
1765 struct vnode *sdvp;
1766 struct componentname *scnp;
40393ded 1767 struct sillyrename *sp;
984263bc
MD
1768{
1769 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
dadab5e9 1770 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_td));
984263bc
MD
1771}
1772
1773/*
1774 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1775 */
1776static int
dadab5e9
MD
1777nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, td)
1778 struct vnode *fdvp;
984263bc
MD
1779 const char *fnameptr;
1780 int fnamelen;
40393ded 1781 struct vnode *tdvp;
984263bc
MD
1782 const char *tnameptr;
1783 int tnamelen;
1784 struct ucred *cred;
dadab5e9 1785 struct thread *td;
984263bc 1786{
40393ded
RG
1787 u_int32_t *tl;
1788 caddr_t cp;
1789 int32_t t1, t2;
984263bc
MD
1790 caddr_t bpos, dpos, cp2;
1791 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1792 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1793 int v3 = NFS_ISV3(fdvp);
1794
1795 nfsstats.rpccnt[NFSPROC_RENAME]++;
1796 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1797 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1798 nfsm_rndup(tnamelen));
1799 nfsm_fhtom(fdvp, v3);
1800 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1801 nfsm_fhtom(tdvp, v3);
1802 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
dadab5e9 1803 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
984263bc
MD
1804 if (v3) {
1805 nfsm_wcc_data(fdvp, fwccflag);
1806 nfsm_wcc_data(tdvp, twccflag);
1807 }
6b08710e
MD
1808 m_freem(mrep);
1809nfsmout:
984263bc
MD
1810 VTONFS(fdvp)->n_flag |= NMODIFIED;
1811 VTONFS(tdvp)->n_flag |= NMODIFIED;
1812 if (!fwccflag)
1813 VTONFS(fdvp)->n_attrstamp = 0;
1814 if (!twccflag)
1815 VTONFS(tdvp)->n_attrstamp = 0;
1816 return (error);
1817}
1818
1819/*
1820 * nfs hard link create call
1821 */
1822static int
1823nfs_link(ap)
1824 struct vop_link_args /* {
1825 struct vnode *a_tdvp;
1826 struct vnode *a_vp;
1827 struct componentname *a_cnp;
1828 } */ *ap;
1829{
40393ded
RG
1830 struct vnode *vp = ap->a_vp;
1831 struct vnode *tdvp = ap->a_tdvp;
1832 struct componentname *cnp = ap->a_cnp;
1833 u_int32_t *tl;
1834 caddr_t cp;
1835 int32_t t1, t2;
984263bc
MD
1836 caddr_t bpos, dpos, cp2;
1837 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1838 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1839 int v3;
1840
1841 if (vp->v_mount != tdvp->v_mount) {
1842 return (EXDEV);
1843 }
1844
1845 /*
1846 * Push all writes to the server, so that the attribute cache
1847 * doesn't get "out of sync" with the server.
1848 * XXX There should be a better way!
1849 */
3b568787 1850 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_td);
984263bc
MD
1851
1852 v3 = NFS_ISV3(vp);
1853 nfsstats.rpccnt[NFSPROC_LINK]++;
1854 nfsm_reqhead(vp, NFSPROC_LINK,
1855 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1856 nfsm_fhtom(vp, v3);
1857 nfsm_fhtom(tdvp, v3);
1858 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 1859 nfsm_request(vp, NFSPROC_LINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1860 if (v3) {
1861 nfsm_postop_attr(vp, attrflag);
1862 nfsm_wcc_data(tdvp, wccflag);
1863 }
6b08710e
MD
1864 m_freem(mrep);
1865nfsmout:
984263bc
MD
1866 VTONFS(tdvp)->n_flag |= NMODIFIED;
1867 if (!attrflag)
1868 VTONFS(vp)->n_attrstamp = 0;
1869 if (!wccflag)
1870 VTONFS(tdvp)->n_attrstamp = 0;
1871 /*
1872 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1873 */
1874 if (error == EEXIST)
1875 error = 0;
1876 return (error);
1877}
1878
1879/*
1880 * nfs symbolic link create call
1881 */
1882static int
1883nfs_symlink(ap)
1884 struct vop_symlink_args /* {
1885 struct vnode *a_dvp;
1886 struct vnode **a_vpp;
1887 struct componentname *a_cnp;
1888 struct vattr *a_vap;
1889 char *a_target;
1890 } */ *ap;
1891{
40393ded
RG
1892 struct vnode *dvp = ap->a_dvp;
1893 struct vattr *vap = ap->a_vap;
1894 struct componentname *cnp = ap->a_cnp;
1895 struct nfsv2_sattr *sp;
1896 u_int32_t *tl;
1897 caddr_t cp;
1898 int32_t t1, t2;
984263bc
MD
1899 caddr_t bpos, dpos, cp2;
1900 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1901 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1902 struct vnode *newvp = (struct vnode *)0;
1903 int v3 = NFS_ISV3(dvp);
1904
1905 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1906 slen = strlen(ap->a_target);
1907 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1908 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1909 nfsm_fhtom(dvp, v3);
1910 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1911 if (v3) {
1912 nfsm_v3attrbuild(vap, FALSE);
1913 }
1914 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1915 if (!v3) {
1916 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1917 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1918 sp->sa_uid = nfs_xdrneg1;
1919 sp->sa_gid = nfs_xdrneg1;
1920 sp->sa_size = nfs_xdrneg1;
1921 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1922 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1923 }
1924
1925 /*
1926 * Issue the NFS request and get the rpc response.
1927 *
1928 * Only NFSv3 responses returning an error of 0 actually return
1929 * a file handle that can be converted into newvp without having
1930 * to do an extra lookup rpc.
1931 */
dadab5e9 1932 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1933 if (v3) {
1934 if (error == 0)
1935 nfsm_mtofh(dvp, newvp, v3, gotvp);
1936 nfsm_wcc_data(dvp, wccflag);
1937 }
1938
1939 /*
1940 * out code jumps -> here, mrep is also freed.
1941 */
1942
6b08710e
MD
1943 m_freem(mrep);
1944nfsmout:
984263bc
MD
1945
1946 /*
1947 * If we get an EEXIST error, silently convert it to no-error
1948 * in case of an NFS retry.
1949 */
1950 if (error == EEXIST)
1951 error = 0;
1952
1953 /*
1954 * If we do not have (or no longer have) an error, and we could
1955 * not extract the newvp from the response due to the request being
1956 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1957 * to obtain a newvp to return.
1958 */
1959 if (error == 0 && newvp == NULL) {
1960 struct nfsnode *np = NULL;
1961
1962 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
dadab5e9 1963 cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1964 if (!error)
1965 newvp = NFSTOV(np);
1966 }
1967 if (error) {
1968 if (newvp)
1969 vput(newvp);
1970 } else {
1971 *ap->a_vpp = newvp;
1972 }
1973 VTONFS(dvp)->n_flag |= NMODIFIED;
1974 if (!wccflag)
1975 VTONFS(dvp)->n_attrstamp = 0;
1976 return (error);
1977}
1978
1979/*
1980 * nfs make dir call
1981 */
1982static int
1983nfs_mkdir(ap)
1984 struct vop_mkdir_args /* {
1985 struct vnode *a_dvp;
1986 struct vnode **a_vpp;
1987 struct componentname *a_cnp;
1988 struct vattr *a_vap;
1989 } */ *ap;
1990{
40393ded
RG
1991 struct vnode *dvp = ap->a_dvp;
1992 struct vattr *vap = ap->a_vap;
1993 struct componentname *cnp = ap->a_cnp;
1994 struct nfsv2_sattr *sp;
1995 u_int32_t *tl;
1996 caddr_t cp;
1997 int32_t t1, t2;
1998 int len;
984263bc
MD
1999 struct nfsnode *np = (struct nfsnode *)0;
2000 struct vnode *newvp = (struct vnode *)0;
2001 caddr_t bpos, dpos, cp2;
2002 int error = 0, wccflag = NFSV3_WCCRATTR;
2003 int gotvp = 0;
2004 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2005 struct vattr vattr;
2006 int v3 = NFS_ISV3(dvp);
2007
3b568787 2008 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
2009 return (error);
2010 }
2011 len = cnp->cn_namelen;
2012 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2013 nfsm_reqhead(dvp, NFSPROC_MKDIR,
2014 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2015 nfsm_fhtom(dvp, v3);
2016 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2017 if (v3) {
2018 nfsm_v3attrbuild(vap, FALSE);
2019 } else {
2020 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2021 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2022 sp->sa_uid = nfs_xdrneg1;
2023 sp->sa_gid = nfs_xdrneg1;
2024 sp->sa_size = nfs_xdrneg1;
2025 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2026 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2027 }
dadab5e9 2028 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
2029 if (!error)
2030 nfsm_mtofh(dvp, newvp, v3, gotvp);
2031 if (v3)
2032 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
2033 m_freem(mrep);
2034nfsmout:
984263bc
MD
2035 VTONFS(dvp)->n_flag |= NMODIFIED;
2036 if (!wccflag)
2037 VTONFS(dvp)->n_attrstamp = 0;
2038 /*
2039 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2040 * if we can succeed in looking up the directory.
2041 */
2042 if (error == EEXIST || (!error && !gotvp)) {
2043 if (newvp) {
2044 vrele(newvp);
2045 newvp = (struct vnode *)0;
2046 }
2047 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
dadab5e9 2048 cnp->cn_td, &np);
984263bc
MD
2049 if (!error) {
2050 newvp = NFSTOV(np);
2051 if (newvp->v_type != VDIR)
2052 error = EEXIST;
2053 }
2054 }
2055 if (error) {
2056 if (newvp)
2057 vrele(newvp);
2058 } else
2059 *ap->a_vpp = newvp;
2060 return (error);
2061}
2062
2063/*
2064 * nfs remove directory call
2065 */
2066static int
2067nfs_rmdir(ap)
2068 struct vop_rmdir_args /* {
2069 struct vnode *a_dvp;
2070 struct vnode *a_vp;
2071 struct componentname *a_cnp;
2072 } */ *ap;
2073{
40393ded
RG
2074 struct vnode *vp = ap->a_vp;
2075 struct vnode *dvp = ap->a_dvp;
2076 struct componentname *cnp = ap->a_cnp;
2077 u_int32_t *tl;
2078 caddr_t cp;
2079 int32_t t1, t2;
984263bc
MD
2080 caddr_t bpos, dpos, cp2;
2081 int error = 0, wccflag = NFSV3_WCCRATTR;
2082 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2083 int v3 = NFS_ISV3(dvp);
2084
2085 if (dvp == vp)
2086 return (EINVAL);
2087 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2088 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2089 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2090 nfsm_fhtom(dvp, v3);
2091 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 2092 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
2093 if (v3)
2094 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
2095 m_freem(mrep);
2096nfsmout:
984263bc
MD
2097 VTONFS(dvp)->n_flag |= NMODIFIED;
2098 if (!wccflag)
2099 VTONFS(dvp)->n_attrstamp = 0;
2100 cache_purge(dvp);
2101 cache_purge(vp);
2102 /*
2103 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2104 */
2105 if (error == ENOENT)
2106 error = 0;
2107 return (error);
2108}
2109
2110/*
2111 * nfs readdir call
2112 */
2113static int
2114nfs_readdir(ap)
2115 struct vop_readdir_args /* {
2116 struct vnode *a_vp;
2117 struct uio *a_uio;
2118 struct ucred *a_cred;
2119 } */ *ap;
2120{
40393ded
RG
2121 struct vnode *vp = ap->a_vp;
2122 struct nfsnode *np = VTONFS(vp);
2123 struct uio *uio = ap->a_uio;
984263bc
MD
2124 int tresid, error;
2125 struct vattr vattr;
2126
2127 if (vp->v_type != VDIR)
2128 return (EPERM);
2129 /*
2130 * First, check for hit on the EOF offset cache
2131 */
2132 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2133 (np->n_flag & NMODIFIED) == 0) {
2134 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
2135 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
2136 nfsstats.direofcache_hits++;
2137 return (0);
2138 }
3b568787 2139 } else if (VOP_GETATTR(vp, &vattr, uio->uio_td) == 0 &&
984263bc
MD
2140 np->n_mtime == vattr.va_mtime.tv_sec) {
2141 nfsstats.direofcache_hits++;
2142 return (0);
2143 }
2144 }
2145
2146 /*
2147 * Call nfs_bioread() to do the real work.
2148 */
2149 tresid = uio->uio_resid;
3b568787 2150 error = nfs_bioread(vp, uio, 0);
984263bc
MD
2151
2152 if (!error && uio->uio_resid == tresid)
2153 nfsstats.direofcache_misses++;
2154 return (error);
2155}
2156
2157/*
2158 * Readdir rpc call.
2159 * Called from below the buffer cache by nfs_doio().
2160 */
2161int
3b568787 2162nfs_readdirrpc(struct vnode *vp, struct uio *uiop)
984263bc 2163{
40393ded
RG
2164 int len, left;
2165 struct dirent *dp = NULL;
2166 u_int32_t *tl;
2167 caddr_t cp;
2168 int32_t t1, t2;
2169 nfsuint64 *cookiep;
984263bc
MD
2170 caddr_t bpos, dpos, cp2;
2171 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2172 nfsuint64 cookie;
2173 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2174 struct nfsnode *dnp = VTONFS(vp);
2175 u_quad_t fileno;
2176 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2177 int attrflag;
2178 int v3 = NFS_ISV3(vp);
2179
2180#ifndef DIAGNOSTIC
2181 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2182 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2183 panic("nfs readdirrpc bad uio");
2184#endif
2185
2186 /*
2187 * If there is no cookie, assume directory was stale.
2188 */
2189 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2190 if (cookiep)
2191 cookie = *cookiep;
2192 else
2193 return (NFSERR_BAD_COOKIE);
2194 /*
2195 * Loop around doing readdir rpc's of size nm_readdirsize
2196 * truncated to a multiple of DIRBLKSIZ.
2197 * The stopping criteria is EOF or buffer full.
2198 */
2199 while (more_dirs && bigenough) {
2200 nfsstats.rpccnt[NFSPROC_READDIR]++;
2201 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2202 NFSX_READDIR(v3));
2203 nfsm_fhtom(vp, v3);
2204 if (v3) {
2205 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2206 *tl++ = cookie.nfsuquad[0];
2207 *tl++ = cookie.nfsuquad[1];
2208 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2209 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2210 } else {
2211 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2212 *tl++ = cookie.nfsuquad[0];
2213 }
2214 *tl = txdr_unsigned(nmp->nm_readdirsize);
c1cf1e59 2215 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc
MD
2216 if (v3) {
2217 nfsm_postop_attr(vp, attrflag);
2218 if (!error) {
2219 nfsm_dissect(tl, u_int32_t *,
2220 2 * NFSX_UNSIGNED);
2221 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2222 dnp->n_cookieverf.nfsuquad[1] = *tl;
2223 } else {
2224 m_freem(mrep);
2225 goto nfsmout;
2226 }
2227 }
2228 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2229 more_dirs = fxdr_unsigned(int, *tl);
2230
2231 /* loop thru the dir entries, doctoring them to 4bsd form */
2232 while (more_dirs && bigenough) {
2233 if (v3) {
2234 nfsm_dissect(tl, u_int32_t *,
2235 3 * NFSX_UNSIGNED);
2236 fileno = fxdr_hyper(tl);
2237 len = fxdr_unsigned(int, *(tl + 2));
2238 } else {
2239 nfsm_dissect(tl, u_int32_t *,
2240 2 * NFSX_UNSIGNED);
2241 fileno = fxdr_unsigned(u_quad_t, *tl++);
2242 len = fxdr_unsigned(int, *tl);
2243 }
2244 if (len <= 0 || len > NFS_MAXNAMLEN) {
2245 error = EBADRPC;
2246 m_freem(mrep);
2247 goto nfsmout;
2248 }
2249 tlen = nfsm_rndup(len);
2250 if (tlen == len)
2251 tlen += 4; /* To ensure null termination */
2252 left = DIRBLKSIZ - blksiz;
2253 if ((tlen + DIRHDSIZ) > left) {
2254 dp->d_reclen += left;
2255 uiop->uio_iov->iov_base += left;
2256 uiop->uio_iov->iov_len -= left;
2257 uiop->uio_offset += left;
2258 uiop->uio_resid -= left;
2259 blksiz = 0;
2260 }
2261 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2262 bigenough = 0;
2263 if (bigenough) {
2264 dp = (struct dirent *)uiop->uio_iov->iov_base;
2265 dp->d_fileno = (int)fileno;
2266 dp->d_namlen = len;
2267 dp->d_reclen = tlen + DIRHDSIZ;
2268 dp->d_type = DT_UNKNOWN;
2269 blksiz += dp->d_reclen;
2270 if (blksiz == DIRBLKSIZ)
2271 blksiz = 0;
2272 uiop->uio_offset += DIRHDSIZ;
2273 uiop->uio_resid -= DIRHDSIZ;
2274 uiop->uio_iov->iov_base += DIRHDSIZ;
2275 uiop->uio_iov->iov_len -= DIRHDSIZ;
2276 nfsm_mtouio(uiop, len);
2277 cp = uiop->uio_iov->iov_base;
2278 tlen -= len;
2279 *cp = '\0'; /* null terminate */
2280 uiop->uio_iov->iov_base += tlen;
2281 uiop->uio_iov->iov_len -= tlen;
2282 uiop->uio_offset += tlen;
2283 uiop->uio_resid -= tlen;
2284 } else
2285 nfsm_adv(nfsm_rndup(len));
2286 if (v3) {
2287 nfsm_dissect(tl, u_int32_t *,
2288 3 * NFSX_UNSIGNED);
2289 } else {
2290 nfsm_dissect(tl, u_int32_t *,
2291 2 * NFSX_UNSIGNED);
2292 }
2293 if (bigenough) {
2294 cookie.nfsuquad[0] = *tl++;
2295 if (v3)
2296 cookie.nfsuquad[1] = *tl++;
2297 } else if (v3)
2298 tl += 2;
2299 else
2300 tl++;
2301 more_dirs = fxdr_unsigned(int, *tl);
2302 }
2303 /*
2304 * If at end of rpc data, get the eof boolean
2305 */
2306 if (!more_dirs) {
2307 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2308 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2309 }
2310 m_freem(mrep);
2311 }
2312 /*
2313 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2314 * by increasing d_reclen for the last record.
2315 */
2316 if (blksiz > 0) {
2317 left = DIRBLKSIZ - blksiz;
2318 dp->d_reclen += left;
2319 uiop->uio_iov->iov_base += left;
2320 uiop->uio_iov->iov_len -= left;
2321 uiop->uio_offset += left;
2322 uiop->uio_resid -= left;
2323 }
2324
2325 /*
2326 * We are now either at the end of the directory or have filled the
2327 * block.
2328 */
2329 if (bigenough)
2330 dnp->n_direofoffset = uiop->uio_offset;
2331 else {
2332 if (uiop->uio_resid > 0)
2333 printf("EEK! readdirrpc resid > 0\n");
2334 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2335 *cookiep = cookie;
2336 }
2337nfsmout:
2338 return (error);
2339}
2340
2341/*
2342 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2343 */
2344int
3b568787 2345nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop)
984263bc 2346{
40393ded
RG
2347 int len, left;
2348 struct dirent *dp;
2349 u_int32_t *tl;
2350 caddr_t cp;
2351 int32_t t1, t2;
2352 struct vnode *newvp;
2353 nfsuint64 *cookiep;
984263bc
MD
2354 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2355 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2356 struct nameidata nami, *ndp = &nami;
2357 struct componentname *cnp = &ndp->ni_cnd;
2358 nfsuint64 cookie;
2359 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2360 struct nfsnode *dnp = VTONFS(vp), *np;
2361 nfsfh_t *fhp;
2362 u_quad_t fileno;
2363 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2364 int attrflag, fhsize;
2365
2366#ifndef nolint
2367 dp = (struct dirent *)0;
2368#endif
2369#ifndef DIAGNOSTIC
2370 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2371 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2372 panic("nfs readdirplusrpc bad uio");
2373#endif
2374 ndp->ni_dvp = vp;
2375 newvp = NULLVP;
2376
2377 /*
2378 * If there is no cookie, assume directory was stale.
2379 */
2380 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2381 if (cookiep)
2382 cookie = *cookiep;
2383 else
2384 return (NFSERR_BAD_COOKIE);
2385 /*
2386 * Loop around doing readdir rpc's of size nm_readdirsize
2387 * truncated to a multiple of DIRBLKSIZ.
2388 * The stopping criteria is EOF or buffer full.
2389 */
2390 while (more_dirs && bigenough) {
2391 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2392 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2393 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2394 nfsm_fhtom(vp, 1);
2395 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2396 *tl++ = cookie.nfsuquad[0];
2397 *tl++ = cookie.nfsuquad[1];
2398 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2399 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2400 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2401 *tl = txdr_unsigned(nmp->nm_rsize);
c1cf1e59 2402 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc
MD
2403 nfsm_postop_attr(vp, attrflag);
2404 if (error) {
2405 m_freem(mrep);
2406 goto nfsmout;
2407 }
2408 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2409 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2410 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2411 more_dirs = fxdr_unsigned(int, *tl);
2412
2413 /* loop thru the dir entries, doctoring them to 4bsd form */
2414 while (more_dirs && bigenough) {
2415 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2416 fileno = fxdr_hyper(tl);
2417 len = fxdr_unsigned(int, *(tl + 2));
2418 if (len <= 0 || len > NFS_MAXNAMLEN) {
2419 error = EBADRPC;
2420 m_freem(mrep);
2421 goto nfsmout;
2422 }
2423 tlen = nfsm_rndup(len);
2424 if (tlen == len)
2425 tlen += 4; /* To ensure null termination*/
2426 left = DIRBLKSIZ - blksiz;
2427 if ((tlen + DIRHDSIZ) > left) {
2428 dp->d_reclen += left;
2429 uiop->uio_iov->iov_base += left;
2430 uiop->uio_iov->iov_len -= left;
2431 uiop->uio_offset += left;
2432 uiop->uio_resid -= left;
2433 blksiz = 0;
2434 }
2435 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2436 bigenough = 0;
2437 if (bigenough) {
2438 dp = (struct dirent *)uiop->uio_iov->iov_base;
2439 dp->d_fileno = (int)fileno;
2440 dp->d_namlen = len;
2441 dp->d_reclen = tlen + DIRHDSIZ;
2442 dp->d_type = DT_UNKNOWN;
2443 blksiz += dp->d_reclen;
2444 if (blksiz == DIRBLKSIZ)
2445 blksiz = 0;
2446 uiop->uio_offset += DIRHDSIZ;
2447 uiop->uio_resid -= DIRHDSIZ;
2448 uiop->uio_iov->iov_base += DIRHDSIZ;
2449 uiop->uio_iov->iov_len -= DIRHDSIZ;
2450 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2451 cnp->cn_namelen = len;
2452 nfsm_mtouio(uiop, len);
2453 cp = uiop->uio_iov->iov_base;
2454 tlen -= len;
2455 *cp = '\0';
2456 uiop->uio_iov->iov_base += tlen;
2457 uiop->uio_iov->iov_len -= tlen;
2458 uiop->uio_offset += tlen;
2459 uiop->uio_resid -= tlen;
2460 } else
2461 nfsm_adv(nfsm_rndup(len));
2462 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2463 if (bigenough) {
2464 cookie.nfsuquad[0] = *tl++;
2465 cookie.nfsuquad[1] = *tl++;
2466 } else
2467 tl += 2;
2468
2469 /*
2470 * Since the attributes are before the file handle
2471 * (sigh), we must skip over the attributes and then
2472 * come back and get them.
2473 */
2474 attrflag = fxdr_unsigned(int, *tl);
2475 if (attrflag) {
2476 dpossav1 = dpos;
2477 mdsav1 = md;
2478 nfsm_adv(NFSX_V3FATTR);
2479 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2480 doit = fxdr_unsigned(int, *tl);
2481 if (doit) {
2482 nfsm_getfh(fhp, fhsize, 1);
2483 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2484 VREF(vp);
2485 newvp = vp;
2486 np = dnp;
2487 } else {
2488 error = nfs_nget(vp->v_mount, fhp,
2489 fhsize, &np);
2490 if (error)
2491 doit = 0;
2492 else
2493 newvp = NFSTOV(np);
2494 }
2495 }
2496 if (doit && bigenough) {
2497 dpossav2 = dpos;
2498 dpos = dpossav1;
2499 mdsav2 = md;
2500 md = mdsav1;
2501 nfsm_loadattr(newvp, (struct vattr *)0);
2502 dpos = dpossav2;
2503 md = mdsav2;
2504 dp->d_type =
2505 IFTODT(VTTOIF(np->n_vattr.va_type));
2506 ndp->ni_vp = newvp;
bc0c094e 2507 cache_enter(ndp->ni_dvp, NCPNULL, ndp->ni_vp, cnp);
984263bc
MD
2508 }
2509 } else {
2510 /* Just skip over the file handle */
2511 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2512 i = fxdr_unsigned(int, *tl);
2513 nfsm_adv(nfsm_rndup(i));
2514 }
2515 if (newvp != NULLVP) {
2516 if (newvp == vp)
2517 vrele(newvp);
2518 else
2519 vput(newvp);
2520 newvp = NULLVP;
2521 }
2522 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2523 more_dirs = fxdr_unsigned(int, *tl);
2524 }
2525 /*
2526 * If at end of rpc data, get the eof boolean
2527 */
2528 if (!more_dirs) {
2529 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2530 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2531 }
2532 m_freem(mrep);
2533 }
2534 /*
2535 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2536 * by increasing d_reclen for the last record.
2537 */
2538 if (blksiz > 0) {
2539 left = DIRBLKSIZ - blksiz;
2540 dp->d_reclen += left;
2541 uiop->uio_iov->iov_base += left;
2542 uiop->uio_iov->iov_len -= left;
2543 uiop->uio_offset += left;
2544 uiop->uio_resid -= left;
2545 }
2546
2547 /*
2548 * We are now either at the end of the directory or have filled the
2549 * block.
2550 */
2551 if (bigenough)
2552 dnp->n_direofoffset = uiop->uio_offset;
2553 else {
2554 if (uiop->uio_resid > 0)
2555 printf("EEK! readdirplusrpc resid > 0\n");
2556 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2557 *cookiep = cookie;
2558 }
2559nfsmout:
2560 if (newvp != NULLVP) {
2561 if (newvp == vp)
2562 vrele(newvp);
2563 else
2564 vput(newvp);
2565 newvp = NULLVP;
2566 }
2567 return (error);
2568}
2569
2570/*
2571 * Silly rename. To make the NFS filesystem that is stateless look a little
2572 * more like the "ufs" a remove of an active vnode is translated to a rename
2573 * to a funny looking filename that is removed by nfs_inactive on the
2574 * nfsnode. There is the potential for another process on a different client
2575 * to create the same funny name between the nfs_lookitup() fails and the
2576 * nfs_rename() completes, but...
2577 */
2578static int
2579nfs_sillyrename(dvp, vp, cnp)
2580 struct vnode *dvp, *vp;
2581 struct componentname *cnp;
2582{
40393ded 2583 struct sillyrename *sp;
984263bc
MD
2584 struct nfsnode *np;
2585 int error;
984263bc
MD
2586
2587 cache_purge(dvp);
2588 np = VTONFS(vp);
2589#ifndef DIAGNOSTIC
2590 if (vp->v_type == VDIR)
2591 panic("nfs: sillyrename dir");
2592#endif
2593 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2594 M_NFSREQ, M_WAITOK);
2595 sp->s_cred = crdup(cnp->cn_cred);
2596 sp->s_dvp = dvp;
2597 VREF(dvp);
2598
2599 /* Fudge together a funny name */
dadab5e9 2600 sp->s_namlen = sprintf(sp->s_name, ".nfsA%08x4.4", (int)cnp->cn_td);
984263bc
MD
2601
2602 /* Try lookitups until we get one that isn't there */
2603 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2604 cnp->cn_td, (struct nfsnode **)0) == 0) {
984263bc
MD
2605 sp->s_name[4]++;
2606 if (sp->s_name[4] > 'z') {
2607 error = EINVAL;
2608 goto bad;
2609 }
2610 }
2611 error = nfs_renameit(dvp, cnp, sp);
2612 if (error)
2613 goto bad;
2614 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2615 cnp->cn_td, &np);
984263bc
MD
2616 np->n_sillyrename = sp;
2617 return (0);
2618bad:
2619 vrele(sp->s_dvp);
2620 crfree(sp->s_cred);
2621 free((caddr_t)sp, M_NFSREQ);
2622 return (error);
2623}
2624
2625/*
2626 * Look up a file name and optionally either update the file handle or
2627 * allocate an nfsnode, depending on the value of npp.
2628 * npp == NULL --> just do the lookup
2629 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2630 * handled too
2631 * *npp != NULL --> update the file handle in the vnode
2632 */
2633static int
dadab5e9 2634nfs_lookitup(dvp, name, len, cred, td, npp)
40393ded 2635 struct vnode *dvp;
984263bc
MD
2636 const char *name;
2637 int len;
2638 struct ucred *cred;
dadab5e9 2639 struct thread *td;
984263bc
MD
2640 struct nfsnode **npp;
2641{
40393ded
RG
2642 u_int32_t *tl;
2643 caddr_t cp;
2644 int32_t t1, t2;
984263bc
MD
2645 struct vnode *newvp = (struct vnode *)0;
2646 struct nfsnode *np, *dnp = VTONFS(dvp);
2647 caddr_t bpos, dpos, cp2;
2648 int error = 0, fhlen, attrflag;
2649 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2650 nfsfh_t *nfhp;
2651 int v3 = NFS_ISV3(dvp);
2652
2653 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2654 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2655 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2656 nfsm_fhtom(dvp, v3);
2657 nfsm_strtom(name, len, NFS_MAXNAMLEN);
dadab5e9 2658 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
984263bc
MD
2659 if (npp && !error) {
2660 nfsm_getfh(nfhp, fhlen, v3);
2661 if (*npp) {
2662 np = *npp;
2663 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2664 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2665 np->n_fhp = &np->n_fh;
2666 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2667 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2668 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2669 np->n_fhsize = fhlen;
2670 newvp = NFSTOV(np);
2671 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2672 VREF(dvp);
2673 newvp = dvp;
2674 } else {
2675 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2676 if (error) {
2677 m_freem(mrep);
2678 return (error);
2679 }
2680 newvp = NFSTOV(np);
2681 }
2682 if (v3) {
2683 nfsm_postop_attr(newvp, attrflag);
2684 if (!attrflag && *npp == NULL) {
2685 m_freem(mrep);
2686 if (newvp == dvp)
2687 vrele(newvp);
2688 else
2689 vput(newvp);
2690 return (ENOENT);
2691 }
2692 } else
2693 nfsm_loadattr(newvp, (struct vattr *)0);
2694 }
6b08710e
MD
2695 m_freem(mrep);
2696nfsmout:
984263bc
MD
2697 if (npp && *npp == NULL) {
2698 if (error) {
2699 if (newvp) {
2700 if (newvp == dvp)
2701 vrele(newvp);
2702 else
2703 vput(newvp);
2704 }
2705 } else
2706 *npp = np;
2707 }
2708 return (error);
2709}
2710
2711/*
2712 * Nfs Version 3 commit rpc
2713 */
2714int
3b568787 2715nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct thread *td)
984263bc 2716{
40393ded
RG
2717 caddr_t cp;
2718 u_int32_t *tl;
2719 int32_t t1, t2;
2720 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
984263bc
MD
2721 caddr_t bpos, dpos, cp2;
2722 int error = 0, wccflag = NFSV3_WCCRATTR;
2723 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2724
2725 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2726 return (0);
2727 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2728 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2729 nfsm_fhtom(vp, 1);
2730 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2731 txdr_hyper(offset, tl);
2732 tl += 2;
2733 *tl = txdr_unsigned(cnt);
c1cf1e59 2734 nfsm_request(vp, NFSPROC_COMMIT, td, nfs_vpcred(vp, ND_WRITE));
984263bc
MD
2735 nfsm_wcc_data(vp, wccflag);
2736 if (!error) {
2737 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2738 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2739 NFSX_V3WRITEVERF)) {
2740 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2741 NFSX_V3WRITEVERF);
2742 error = NFSERR_STALEWRITEVERF;
2743 }
2744 }
6b08710e
MD
2745 m_freem(mrep);
2746nfsmout:
984263bc
MD
2747 return (error);
2748}
2749
2750/*
2751 * Kludge City..
2752 * - make nfs_bmap() essentially a no-op that does no translation
2753 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2754 * (Maybe I could use the process's page mapping, but I was concerned that
2755 * Kernel Write might not be enabled and also figured copyout() would do
2756 * a lot more work than bcopy() and also it currently happens in the
2757 * context of the swapper process (2).
2758 */
2759static int
2760nfs_bmap(ap)
2761 struct vop_bmap_args /* {
2762 struct vnode *a_vp;
2763 daddr_t a_bn;
2764 struct vnode **a_vpp;
2765 daddr_t *a_bnp;
2766 int *a_runp;
2767 int *a_runb;
2768 } */ *ap;
2769{
40393ded 2770 struct vnode *vp = ap->a_vp;
984263bc
MD
2771
2772 if (ap->a_vpp != NULL)
2773 *ap->a_vpp = vp;
2774 if (ap->a_bnp != NULL)
2775 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2776 if (ap->a_runp != NULL)
2777 *ap->a_runp = 0;
2778 if (ap->a_runb != NULL)
2779 *ap->a_runb = 0;
2780 return (0);
2781}
2782
2783/*
2784 * Strategy routine.
2785 * For async requests when nfsiod(s) are running, queue the request by
2786 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2787 * request.
2788 */
2789static int
2790nfs_strategy(ap)
2791 struct vop_strategy_args *ap;
2792{
40393ded 2793 struct buf *bp = ap->a_bp;
dadab5e9 2794 struct thread *td;
984263bc
MD
2795 int error = 0;
2796
2797 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2798 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2799
2800 if (bp->b_flags & B_PHYS)
2801 panic("nfs physio");
2802
2803 if (bp->b_flags & B_ASYNC)
dadab5e9 2804 td = NULL;
984263bc 2805 else
dadab5e9 2806 td = curthread; /* XXX */
984263bc 2807
984263bc
MD
2808 /*
2809 * If the op is asynchronous and an i/o daemon is waiting
2810 * queue the request, wake it up and wait for completion
2811 * otherwise just do it ourselves.
2812 */
2813 if ((bp->b_flags & B_ASYNC) == 0 ||
3b568787
MD
2814 nfs_asyncio(bp, td))
2815 error = nfs_doio(bp, td);
984263bc
MD
2816 return (error);
2817}
2818
2819/*
2820 * Mmap a file
2821 *
2822 * NB Currently unsupported.
2823 */
2824/* ARGSUSED */
2825static int
2826nfs_mmap(ap)
2827 struct vop_mmap_args /* {
2828 struct vnode *a_vp;
2829 int a_fflags;
2830 struct ucred *a_cred;
dadab5e9 2831 struct thread *a_td;
984263bc
MD
2832 } */ *ap;
2833{
2834
2835 return (EINVAL);
2836}
2837
2838/*
2839 * fsync vnode op. Just call nfs_flush() with commit == 1.
2840 */
2841/* ARGSUSED */
2842static int
2843nfs_fsync(ap)
2844 struct vop_fsync_args /* {
2845 struct vnodeop_desc *a_desc;
2846 struct vnode * a_vp;
2847 struct ucred * a_cred;
2848 int a_waitfor;
dadab5e9 2849 struct thread * a_td;
984263bc
MD
2850 } */ *ap;
2851{
2852
3b568787 2853 return (nfs_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1));
984263bc
MD
2854}
2855
2856/*
2857 * Flush all the blocks associated with a vnode.
2858 * Walk through the buffer pool and push any dirty pages
2859 * associated with the vnode.
2860 */
2861static int
3b568787 2862nfs_flush(vp, waitfor, td, commit)
40393ded 2863 struct vnode *vp;
984263bc 2864 int waitfor;
dadab5e9 2865 struct thread *td;
984263bc
MD
2866 int commit;
2867{
40393ded
RG
2868 struct nfsnode *np = VTONFS(vp);
2869 struct buf *bp;
2870 int i;
984263bc
MD
2871 struct buf *nbp;
2872 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2873 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2874 int passone = 1;
2875 u_quad_t off, endoff, toff;
984263bc
MD
2876 struct buf **bvec = NULL;
2877#ifndef NFS_COMMITBVECSIZ
2878#define NFS_COMMITBVECSIZ 20
2879#endif
2880 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2881 int bvecsize = 0, bveccount;
2882
2883 if (nmp->nm_flag & NFSMNT_INT)
2884 slpflag = PCATCH;
2885 if (!commit)
2886 passone = 0;
2887 /*
2888 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2889 * server, but nas not been committed to stable storage on the server
2890 * yet. On the first pass, the byte range is worked out and the commit
2891 * rpc is done. On the second pass, nfs_writebp() is called to do the
2892 * job.
2893 */
2894again:
2895 off = (u_quad_t)-1;
2896 endoff = 0;
2897 bvecpos = 0;
2898 if (NFS_ISV3(vp) && commit) {
2899 s = splbio();
2900 /*
2901 * Count up how many buffers waiting for a commit.
2902 */
2903 bveccount = 0;
2904 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2905 nbp = TAILQ_NEXT(bp, b_vnbufs);
2906 if (BUF_REFCNT(bp) == 0 &&
2907 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2908 == (B_DELWRI | B_NEEDCOMMIT))
2909 bveccount++;
2910 }
2911 /*
2912 * Allocate space to remember the list of bufs to commit. It is
2913 * important to use M_NOWAIT here to avoid a race with nfs_write.
2914 * If we can't get memory (for whatever reason), we will end up
2915 * committing the buffers one-by-one in the loop below.
2916 */
2917 if (bvec != NULL && bvec != bvec_on_stack)
2918 free(bvec, M_TEMP);
2919 if (bveccount > NFS_COMMITBVECSIZ) {
2920 bvec = (struct buf **)
2921 malloc(bveccount * sizeof(struct buf *),
2922 M_TEMP, M_NOWAIT);
2923 if (bvec == NULL) {
2924 bvec = bvec_on_stack;
2925 bvecsize = NFS_COMMITBVECSIZ;
2926 } else
2927 bvecsize = bveccount;
2928 } else {
2929 bvec = bvec_on_stack;
2930 bvecsize = NFS_COMMITBVECSIZ;
2931 }
2932 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2933 nbp = TAILQ_NEXT(bp, b_vnbufs);
2934 if (bvecpos >= bvecsize)
2935 break;
2936 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2937 (B_DELWRI | B_NEEDCOMMIT) ||
2938 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2939 continue;
2940 bremfree(bp);
2941 /*
984263bc
MD
2942 * NOTE: we are not clearing B_DONE here, so we have
2943 * to do it later on in this routine if we intend to
2944 * initiate I/O on the bp.
2945 *
2946 * Note: to avoid loopback deadlocks, we do not
2947 * assign b_runningbufspace.
2948 */
984263bc
MD
2949 bp->b_flags |= B_WRITEINPROG;
2950 vfs_busy_pages(bp, 1);
2951
2952 /*
2953 * bp is protected by being locked, but nbp is not
2954 * and vfs_busy_pages() may sleep. We have to
2955 * recalculate nbp.
2956 */
2957 nbp = TAILQ_NEXT(bp, b_vnbufs);
2958
2959 /*
2960 * A list of these buffers is kept so that the
2961 * second loop knows which buffers have actually
2962 * been committed. This is necessary, since there
2963 * may be a race between the commit rpc and new
2964 * uncommitted writes on the file.
2965 */
2966 bvec[bvecpos++] = bp;
2967 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2968 bp->b_dirtyoff;
2969 if (toff < off)
2970 off = toff;
2971 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2972 if (toff > endoff)
2973 endoff = toff;
2974 }
2975 splx(s);
2976 }
2977 if (bvecpos > 0) {
2978 /*
3b568787
MD
2979 * Commit data on the server, as required. Note that
2980 * nfs_commit will use the vnode's cred for the commit.
984263bc 2981 */
3b568787 2982 retv = nfs_commit(vp, off, (int)(endoff - off), td);
984263bc
MD
2983
2984 if (retv == NFSERR_STALEWRITEVERF)
2985 nfs_clearcommit(vp->v_mount);
2986
2987 /*
2988 * Now, either mark the blocks I/O done or mark the
2989 * blocks dirty, depending on whether the commit
2990 * succeeded.
2991 */
2992 for (i = 0; i < bvecpos; i++) {
2993 bp = bvec[i];
2994 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2995 if (retv) {
2996 /*
2997 * Error, leave B_DELWRI intact
2998 */
2999 vfs_unbusy_pages(bp);
3000 brelse(bp);
3001 } else {
3002 /*
3003 * Success, remove B_DELWRI ( bundirty() ).
3004 *
3005 * b_dirtyoff/b_dirtyend seem to be NFS
3006 * specific. We should probably move that
3007 * into bundirty(). XXX
3008 */
3009 s = splbio();
3010 vp->v_numoutput++;
3011 bp->b_flags |= B_ASYNC;
3012 bundirty(bp);
3013 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3014 bp->b_dirtyoff = bp->b_dirtyend = 0;
3015 splx(s);
3016 biodone(bp);
3017 }
3018 }
3019 }
3020
3021 /*
3022 * Start/do any write(s) that are required.
3023 */
3024loop:
3025 s = splbio();
3026 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
3027 nbp = TAILQ_NEXT(bp, b_vnbufs);
3028 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
3029 if (waitfor != MNT_WAIT || passone)
3030 continue;
3031 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
3032 "nfsfsync", slpflag, slptimeo);
3033 splx(s);
3034 if (error == 0)
3035 panic("nfs_fsync: inconsistent lock");
3036 if (error == ENOLCK)
3037 goto loop;
dadab5e9 3038 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
984263bc
MD
3039 error = EINTR;
3040 goto done;
3041 }
3042 if (slpflag == PCATCH) {
3043 slpflag = 0;
3044 slptimeo = 2 * hz;
3045 }
3046 goto loop;
3047 }
3048 if ((bp->b_flags & B_DELWRI) == 0)
3049 panic("nfs_fsync: not dirty");
3050 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
3051 BUF_UNLOCK(bp);
3052 continue;
3053 }
3054 bremfree(bp);
3055 if (passone || !commit)
3056 bp->b_flags |= B_ASYNC;
3057 else
3058 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
3059 splx(s);
3060 VOP_BWRITE(bp->b_vp, bp);
3061 goto loop;
3062 }
3063 splx(s);
3064 if (passone) {
3065 passone = 0;
3066 goto again;
3067 }
3068 if (waitfor == MNT_WAIT) {
3069 while (vp->v_numoutput) {
3070 vp->v_flag |= VBWAIT;
3071 error = tsleep((caddr_t)&vp->v_numoutput,
377d4740 3072 slpflag, "nfsfsync", slptimeo);
984263bc 3073 if (error) {
dadab5e9 3074 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
984263bc
MD
3075 error = EINTR;
3076 goto done;
3077 }
3078 if (slpflag == PCATCH) {
3079 slpflag = 0;
3080 slptimeo = 2 * hz;
3081 }
3082 }
3083 }
3084 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
3085 goto loop;
3086 }
3087 }
3088 if (np->n_flag & NWRITEERR) {
3089 error = np->n_error;
3090 np->n_flag &= ~NWRITEERR;
3091 }
3092done:
3093 if (bvec != NULL && bvec != bvec_on_stack)
3094 free(bvec, M_TEMP);
3095 return (error);
3096}
3097
3098/*
3099 * NFS advisory byte-level locks.
3100 * Currently unsupported.
3101 */
3102static int
3103nfs_advlock(ap)
3104 struct vop_advlock_args /* {
3105 struct vnode *a_vp;
3106 caddr_t a_id;
3107 int a_op;
3108 struct flock *a_fl;
3109 int a_flags;
3110 } */ *ap;
3111{
40393ded 3112 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3113
3114 /*
3115 * The following kludge is to allow diskless support to work
3116 * until a real NFS lockd is implemented. Basically, just pretend
3117 * that this is a local lock.
3118 */
3119 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3120}
3121
3122/*
3123 * Print out the contents of an nfsnode.
3124 */
3125static int
3126nfs_print(ap)
3127 struct vop_print_args /* {
3128 struct vnode *a_vp;
3129 } */ *ap;
3130{
40393ded
RG
3131 struct vnode *vp = ap->a_vp;
3132 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3133
3134 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3135 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3136 if (vp->v_type == VFIFO)
3137 fifo_printinfo(vp);
3138 printf("\n");
3139 return (0);
3140}
3141
3142/*
3143 * Just call nfs_writebp() with the force argument set to 1.
3144 *
3145 * NOTE: B_DONE may or may not be set in a_bp on call.
3146 */
3147static int
3148nfs_bwrite(ap)
3149 struct vop_bwrite_args /* {
3150 struct vnode *a_bp;
3151 } */ *ap;
3152{
dadab5e9 3153 return (nfs_writebp(ap->a_bp, 1, curthread));
984263bc
MD
3154}
3155
3156/*
3157 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3158 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3159 * B_CACHE if this is a VMIO buffer.
3160 */
3161int
dadab5e9 3162nfs_writebp(bp, force, td)
40393ded 3163 struct buf *bp;
984263bc 3164 int force;
dadab5e9 3165 struct thread *td;
984263bc
MD
3166{
3167 int s;
3168 int oldflags = bp->b_flags;
3169#if 0
3170 int retv = 1;
3171 off_t off;
3172#endif
3173
3174 if (BUF_REFCNT(bp) == 0)
3175 panic("bwrite: buffer is not locked???");
3176
3177 if (bp->b_flags & B_INVAL) {
3178 brelse(bp);
3179 return(0);
3180 }
3181
3182 bp->b_flags |= B_CACHE;
3183
3184 /*
3185 * Undirty the bp. We will redirty it later if the I/O fails.
3186 */
3187
3188 s = splbio();
3189 bundirty(bp);
3190 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3191
3192 bp->b_vp->v_numoutput++;
984263bc
MD
3193 splx(s);
3194
3195 /*
3196 * Note: to avoid loopback deadlocks, we do not
3197 * assign b_runningbufspace.
3198 */
3199 vfs_busy_pages(bp, 1);
3200
3201 if (force)
3202 bp->b_flags |= B_WRITEINPROG;
3203 BUF_KERNPROC(bp);
3204 VOP_STRATEGY(bp->b_vp, bp);
3205
3206 if( (oldflags & B_ASYNC) == 0) {
3207 int rtval = biowait(bp);
3208
3209 if (oldflags & B_DELWRI) {
3210 s = splbio();
3211 reassignbuf(bp, bp->b_vp);
3212 splx(s);
3213 }
3214
3215 brelse(bp);
3216 return (rtval);
3217 }
3218
3219 return (0);
3220}
3221
3222/*
3223 * nfs special file access vnode op.
3224 * Essentially just get vattr and then imitate iaccess() since the device is
3225 * local to the client.
3226 */
3227static int
3228nfsspec_access(ap)
3229 struct vop_access_args /* {
3230 struct vnode *a_vp;
3231 int a_mode;
3232 struct ucred *a_cred;
dadab5e9 3233 struct thread *a_td;
984263bc
MD
3234 } */ *ap;
3235{
40393ded
RG
3236 struct vattr *vap;
3237 gid_t *gp;
3238 struct ucred *cred = ap->a_cred;
984263bc
MD
3239 struct vnode *vp = ap->a_vp;
3240 mode_t mode = ap->a_mode;
3241 struct vattr vattr;
40393ded 3242 int i;
984263bc
MD
3243 int error;
3244
3245 /*
3246 * Disallow write attempts on filesystems mounted read-only;
3247 * unless the file is a socket, fifo, or a block or character
3248 * device resident on the filesystem.
3249 */
3250 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3251 switch (vp->v_type) {
3252 case VREG:
3253 case VDIR:
3254 case VLNK:
3255 return (EROFS);
3256 default:
3257 break;
3258 }
3259 }
3260 /*
3261 * If you're the super-user,
3262 * you always get access.
3263 */
3264 if (cred->cr_uid == 0)
3265 return (0);
3266 vap = &vattr;
3b568787 3267 error = VOP_GETATTR(vp, vap, ap->a_td);
984263bc
MD
3268 if (error)
3269 return (error);
3270 /*
3271 * Access check is based on only one of owner, group, public.
3272 * If not owner, then check group. If not a member of the
3273 * group, then check public access.
3274 */
3275 if (cred->cr_uid != vap->va_uid) {
3276 mode >>= 3;
3277 gp = cred->cr_groups;
3278 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3279 if (vap->va_gid == *gp)
3280 goto found;
3281 mode >>= 3;
3282found:
3283 ;
3284 }
3285 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3286 return (error);
3287}
3288
3289/*
3290 * Read wrapper for special devices.
3291 */
3292static int
3293nfsspec_read(ap)
3294 struct vop_read_args /* {
3295 struct vnode *a_vp;
3296 struct uio *a_uio;
3297 int a_ioflag;
3298 struct ucred *a_cred;
3299 } */ *ap;
3300{
40393ded 3301 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3302
3303 /*
3304 * Set access flag.
3305 */
3306 np->n_flag |= NACC;
3307 getnanotime(&np->n_atim);
3308 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3309}
3310
3311/*
3312 * Write wrapper for special devices.
3313 */
3314static int
3315nfsspec_write(ap)
3316 struct vop_write_args /* {
3317 struct vnode *a_vp;
3318 struct uio *a_uio;
3319 int a_ioflag;
3320 struct ucred *a_cred;
3321 } */ *ap;
3322{
40393ded 3323 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3324
3325 /*
3326 * Set update flag.
3327 */
3328 np->n_flag |= NUPD;
3329 getnanotime(&np->n_mtim);
3330 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3331}
3332
3333/*
3334 * Close wrapper for special devices.
3335 *
3336 * Update the times on the nfsnode then do device close.
3337 */
3338static int
3339nfsspec_close(ap)
3340 struct vop_close_args /* {
3341 struct vnode *a_vp;
3342 int a_fflag;
3343 struct ucred *a_cred;
dadab5e9 3344 struct thread *a_td;
984263bc
MD
3345 } */ *ap;
3346{
40393ded
RG
3347 struct vnode *vp = ap->a_vp;
3348 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3349 struct vattr vattr;
3350
3351 if (np->n_flag & (NACC | NUPD)) {
3352 np->n_flag |= NCHG;
3353 if (vp->v_usecount == 1 &&
3354 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3355 VATTR_NULL(&vattr);
3356 if (np->n_flag & NACC)
3357 vattr.va_atime = np->n_atim;
3358 if (np->n_flag & NUPD)
3359 vattr.va_mtime = np->n_mtim;
c1cf1e59 3360 (void)VOP_SETATTR(vp, &vattr, nfs_vpcred(vp, ND_WRITE), ap->a_td);
984263bc
MD
3361 }
3362 }
3363 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3364}
3365
3366/*
3367 * Read wrapper for fifos.
3368 */
3369static int
3370nfsfifo_read(ap)
3371 struct vop_read_args /* {
3372 struct vnode *a_vp;
3373 struct uio *a_uio;
3374 int a_ioflag;
3375 struct ucred *a_cred;
3376 } */ *ap;
3377{
40393ded 3378 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3379
3380 /*
3381 * Set access flag.
3382 */
3383 np->n_flag |= NACC;
3384 getnanotime(&np->n_atim);
3385 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3386}
3387
3388/*
3389 * Write wrapper for fifos.
3390 */
3391static int
3392nfsfifo_write(ap)
3393 struct vop_write_args /* {
3394 struct vnode *a_vp;
3395 struct uio *a_uio;
3396 int a_ioflag;
3397 struct ucred *a_cred;
3398 } */ *ap;
3399{
40393ded 3400 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3401
3402 /*
3403 * Set update flag.
3404 */
3405 np->n_flag |= NUPD;
3406 getnanotime(&np->n_mtim);
3407 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3408}
3409
3410/*
3411 * Close wrapper for fifos.
3412 *
3413 * Update the times on the nfsnode then do fifo close.
3414 */
3415static int
3416nfsfifo_close(ap)
3417 struct vop_close_args /* {
3418 struct vnode *a_vp;
3419 int a_fflag;
dadab5e9 3420 struct thread *a_td;
984263bc
MD
3421 } */ *ap;
3422{
40393ded
RG
3423 struct vnode *vp = ap->a_vp;
3424 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3425 struct vattr vattr;
3426 struct timespec ts;
3427
3428 if (np->n_flag & (NACC | NUPD)) {
3429 getnanotime(&ts);
3430 if (np->n_flag & NACC)
3431 np->n_atim = ts;
3432 if (np->n_flag & NUPD)
3433 np->n_mtim = ts;
3434 np->n_flag |= NCHG;
3435 if (vp->v_usecount == 1 &&
3436 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3437 VATTR_NULL(&vattr);
3438 if (np->n_flag & NACC)
3439 vattr.va_atime = np->n_atim;
3440 if (np->n_flag & NUPD)
3441 vattr.va_mtime = np->n_mtim;
c1cf1e59 3442 (void)VOP_SETATTR(vp, &vattr, nfs_vpcred(vp, ND_WRITE), ap->a_td);
984263bc
MD
3443 }
3444 }
3445 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3446}
3447