proc->thread stage 3.5: Add an IO_CORE flag so coda doesn't have to dig
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.3 2003/06/19 01:55:07 dillon Exp $
39 */
40
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/buf.h>
55#include <sys/malloc.h>
56#include <sys/mbuf.h>
57#include <sys/namei.h>
58#include <sys/socket.h>
59#include <sys/vnode.h>
60#include <sys/dirent.h>
61#include <sys/fcntl.h>
62#include <sys/lockf.h>
63#include <sys/stat.h>
64#include <sys/sysctl.h>
65#include <sys/conf.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69#include <vm/vm_zone.h>
70
71#include <sys/buf2.h>
72
73#include <miscfs/fifofs/fifo.h>
74
75#include <nfs/rpcv2.h>
76#include <nfs/nfsproto.h>
77#include <nfs/nfs.h>
78#include <nfs/nfsnode.h>
79#include <nfs/nfsmount.h>
80#include <nfs/xdr_subs.h>
81#include <nfs/nfsm_subs.h>
82#include <nfs/nqnfs.h>
83
84#include <net/if.h>
85#include <netinet/in.h>
86#include <netinet/in_var.h>
87
88/* Defs */
89#define TRUE 1
90#define FALSE 0
91
92/*
93 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
94 * calls are not in getblk() and brelse() so that they would not be necessary
95 * here.
96 */
97#ifndef B_VMIO
98#define vfs_busy_pages(bp, f)
99#endif
100
101static int nfsspec_read __P((struct vop_read_args *));
102static int nfsspec_write __P((struct vop_write_args *));
103static int nfsfifo_read __P((struct vop_read_args *));
104static int nfsfifo_write __P((struct vop_write_args *));
105static int nfsspec_close __P((struct vop_close_args *));
106static int nfsfifo_close __P((struct vop_close_args *));
107#define nfs_poll vop_nopoll
108static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int));
109static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *));
110static int nfs_lookup __P((struct vop_lookup_args *));
111static int nfs_create __P((struct vop_create_args *));
112static int nfs_mknod __P((struct vop_mknod_args *));
113static int nfs_open __P((struct vop_open_args *));
114static int nfs_close __P((struct vop_close_args *));
115static int nfs_access __P((struct vop_access_args *));
116static int nfs_getattr __P((struct vop_getattr_args *));
117static int nfs_setattr __P((struct vop_setattr_args *));
118static int nfs_read __P((struct vop_read_args *));
119static int nfs_mmap __P((struct vop_mmap_args *));
120static int nfs_fsync __P((struct vop_fsync_args *));
121static int nfs_remove __P((struct vop_remove_args *));
122static int nfs_link __P((struct vop_link_args *));
123static int nfs_rename __P((struct vop_rename_args *));
124static int nfs_mkdir __P((struct vop_mkdir_args *));
125static int nfs_rmdir __P((struct vop_rmdir_args *));
126static int nfs_symlink __P((struct vop_symlink_args *));
127static int nfs_readdir __P((struct vop_readdir_args *));
128static int nfs_bmap __P((struct vop_bmap_args *));
129static int nfs_strategy __P((struct vop_strategy_args *));
130static int nfs_lookitup __P((struct vnode *, const char *, int,
131 struct ucred *, struct proc *, struct nfsnode **));
132static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
133static int nfsspec_access __P((struct vop_access_args *));
134static int nfs_readlink __P((struct vop_readlink_args *));
135static int nfs_print __P((struct vop_print_args *));
136static int nfs_advlock __P((struct vop_advlock_args *));
137static int nfs_bwrite __P((struct vop_bwrite_args *));
138/*
139 * Global vfs data structures for nfs
140 */
141vop_t **nfsv2_vnodeop_p;
142static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
143 { &vop_default_desc, (vop_t *) vop_defaultop },
144 { &vop_access_desc, (vop_t *) nfs_access },
145 { &vop_advlock_desc, (vop_t *) nfs_advlock },
146 { &vop_bmap_desc, (vop_t *) nfs_bmap },
147 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
148 { &vop_close_desc, (vop_t *) nfs_close },
149 { &vop_create_desc, (vop_t *) nfs_create },
150 { &vop_fsync_desc, (vop_t *) nfs_fsync },
151 { &vop_getattr_desc, (vop_t *) nfs_getattr },
152 { &vop_getpages_desc, (vop_t *) nfs_getpages },
153 { &vop_putpages_desc, (vop_t *) nfs_putpages },
154 { &vop_inactive_desc, (vop_t *) nfs_inactive },
155 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
156 { &vop_lease_desc, (vop_t *) vop_null },
157 { &vop_link_desc, (vop_t *) nfs_link },
158 { &vop_lock_desc, (vop_t *) vop_sharedlock },
159 { &vop_lookup_desc, (vop_t *) nfs_lookup },
160 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
161 { &vop_mknod_desc, (vop_t *) nfs_mknod },
162 { &vop_mmap_desc, (vop_t *) nfs_mmap },
163 { &vop_open_desc, (vop_t *) nfs_open },
164 { &vop_poll_desc, (vop_t *) nfs_poll },
165 { &vop_print_desc, (vop_t *) nfs_print },
166 { &vop_read_desc, (vop_t *) nfs_read },
167 { &vop_readdir_desc, (vop_t *) nfs_readdir },
168 { &vop_readlink_desc, (vop_t *) nfs_readlink },
169 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
170 { &vop_remove_desc, (vop_t *) nfs_remove },
171 { &vop_rename_desc, (vop_t *) nfs_rename },
172 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
173 { &vop_setattr_desc, (vop_t *) nfs_setattr },
174 { &vop_strategy_desc, (vop_t *) nfs_strategy },
175 { &vop_symlink_desc, (vop_t *) nfs_symlink },
176 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
177 { &vop_write_desc, (vop_t *) nfs_write },
178 { NULL, NULL }
179};
180static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
181 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
182VNODEOP_SET(nfsv2_vnodeop_opv_desc);
183
184/*
185 * Special device vnode ops
186 */
187vop_t **spec_nfsv2nodeop_p;
188static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
189 { &vop_default_desc, (vop_t *) spec_vnoperate },
190 { &vop_access_desc, (vop_t *) nfsspec_access },
191 { &vop_close_desc, (vop_t *) nfsspec_close },
192 { &vop_fsync_desc, (vop_t *) nfs_fsync },
193 { &vop_getattr_desc, (vop_t *) nfs_getattr },
194 { &vop_inactive_desc, (vop_t *) nfs_inactive },
195 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
196 { &vop_lock_desc, (vop_t *) vop_sharedlock },
197 { &vop_print_desc, (vop_t *) nfs_print },
198 { &vop_read_desc, (vop_t *) nfsspec_read },
199 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
200 { &vop_setattr_desc, (vop_t *) nfs_setattr },
201 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
202 { &vop_write_desc, (vop_t *) nfsspec_write },
203 { NULL, NULL }
204};
205static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
206 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
207VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
208
209vop_t **fifo_nfsv2nodeop_p;
210static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
211 { &vop_default_desc, (vop_t *) fifo_vnoperate },
212 { &vop_access_desc, (vop_t *) nfsspec_access },
213 { &vop_close_desc, (vop_t *) nfsfifo_close },
214 { &vop_fsync_desc, (vop_t *) nfs_fsync },
215 { &vop_getattr_desc, (vop_t *) nfs_getattr },
216 { &vop_inactive_desc, (vop_t *) nfs_inactive },
217 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
218 { &vop_lock_desc, (vop_t *) vop_sharedlock },
219 { &vop_print_desc, (vop_t *) nfs_print },
220 { &vop_read_desc, (vop_t *) nfsfifo_read },
221 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
222 { &vop_setattr_desc, (vop_t *) nfs_setattr },
223 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
224 { &vop_write_desc, (vop_t *) nfsfifo_write },
225 { NULL, NULL }
226};
227static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
228 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
229VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
230
231static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
232 struct componentname *cnp,
233 struct vattr *vap));
234static int nfs_removerpc __P((struct vnode *dvp, const char *name,
235 int namelen,
236 struct ucred *cred, struct proc *proc));
237static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
238 int fnamelen, struct vnode *tdvp,
239 const char *tnameptr, int tnamelen,
240 struct ucred *cred, struct proc *proc));
241static int nfs_renameit __P((struct vnode *sdvp,
242 struct componentname *scnp,
243 struct sillyrename *sp));
244
245/*
246 * Global variables
247 */
248extern u_int32_t nfs_true, nfs_false;
249extern u_int32_t nfs_xdrneg1;
250extern struct nfsstats nfsstats;
251extern nfstype nfsv3_type[9];
252struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
253struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
254int nfs_numasync = 0;
255#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
256
257SYSCTL_DECL(_vfs_nfs);
258
259static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
260SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
261 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
262
263static int nfsv3_commit_on_close = 0;
264SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
265 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
266#if 0
267SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
268 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
269
270SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
271 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
272#endif
273
274#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
275 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
276 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
277static int
278nfs3_access_otw(struct vnode *vp,
279 int wmode,
280 struct proc *p,
281 struct ucred *cred)
282{
283 const int v3 = 1;
284 u_int32_t *tl;
285 int error = 0, attrflag;
286
287 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
288 caddr_t bpos, dpos, cp2;
289 register int32_t t1, t2;
290 register caddr_t cp;
291 u_int32_t rmode;
292 struct nfsnode *np = VTONFS(vp);
293
294 nfsstats.rpccnt[NFSPROC_ACCESS]++;
295 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
296 nfsm_fhtom(vp, v3);
297 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
298 *tl = txdr_unsigned(wmode);
299 nfsm_request(vp, NFSPROC_ACCESS, p, cred);
300 nfsm_postop_attr(vp, attrflag);
301 if (!error) {
302 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
303 rmode = fxdr_unsigned(u_int32_t, *tl);
304 np->n_mode = rmode;
305 np->n_modeuid = cred->cr_uid;
306 np->n_modestamp = time_second;
307 }
308 nfsm_reqdone;
309 return error;
310}
311
312/*
313 * nfs access vnode op.
314 * For nfs version 2, just return ok. File accesses may fail later.
315 * For nfs version 3, use the access rpc to check accessibility. If file modes
316 * are changed on the server, accesses might still fail later.
317 */
318static int
319nfs_access(ap)
320 struct vop_access_args /* {
321 struct vnode *a_vp;
322 int a_mode;
323 struct ucred *a_cred;
324 struct proc *a_p;
325 } */ *ap;
326{
327 register struct vnode *vp = ap->a_vp;
328 int error = 0;
329 u_int32_t mode, wmode;
330 int v3 = NFS_ISV3(vp);
331 struct nfsnode *np = VTONFS(vp);
332
333 /*
334 * Disallow write attempts on filesystems mounted read-only;
335 * unless the file is a socket, fifo, or a block or character
336 * device resident on the filesystem.
337 */
338 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
339 switch (vp->v_type) {
340 case VREG:
341 case VDIR:
342 case VLNK:
343 return (EROFS);
344 default:
345 break;
346 }
347 }
348 /*
349 * For nfs v3, check to see if we have done this recently, and if
350 * so return our cached result instead of making an ACCESS call.
351 * If not, do an access rpc, otherwise you are stuck emulating
352 * ufs_access() locally using the vattr. This may not be correct,
353 * since the server may apply other access criteria such as
354 * client uid-->server uid mapping that we do not know about.
355 */
356 if (v3) {
357 if (ap->a_mode & VREAD)
358 mode = NFSV3ACCESS_READ;
359 else
360 mode = 0;
361 if (vp->v_type != VDIR) {
362 if (ap->a_mode & VWRITE)
363 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
364 if (ap->a_mode & VEXEC)
365 mode |= NFSV3ACCESS_EXECUTE;
366 } else {
367 if (ap->a_mode & VWRITE)
368 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
369 NFSV3ACCESS_DELETE);
370 if (ap->a_mode & VEXEC)
371 mode |= NFSV3ACCESS_LOOKUP;
372 }
373 /* XXX safety belt, only make blanket request if caching */
374 if (nfsaccess_cache_timeout > 0) {
375 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
376 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
377 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
378 } else {
379 wmode = mode;
380 }
381
382 /*
383 * Does our cached result allow us to give a definite yes to
384 * this request?
385 */
386 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
387 (ap->a_cred->cr_uid == np->n_modeuid) &&
388 ((np->n_mode & mode) == mode)) {
389 nfsstats.accesscache_hits++;
390 } else {
391 /*
392 * Either a no, or a don't know. Go to the wire.
393 */
394 nfsstats.accesscache_misses++;
395 error = nfs3_access_otw(vp, wmode, ap->a_p,ap->a_cred);
396 if (!error) {
397 if ((np->n_mode & mode) != mode) {
398 error = EACCES;
399 }
400 }
401 }
402 return (error);
403 } else {
404 if ((error = nfsspec_access(ap)) != 0)
405 return (error);
406
407 /*
408 * Attempt to prevent a mapped root from accessing a file
409 * which it shouldn't. We try to read a byte from the file
410 * if the user is root and the file is not zero length.
411 * After calling nfsspec_access, we should have the correct
412 * file size cached.
413 */
414 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
415 && VTONFS(vp)->n_size > 0) {
416 struct iovec aiov;
417 struct uio auio;
418 char buf[1];
419
420 aiov.iov_base = buf;
421 aiov.iov_len = 1;
422 auio.uio_iov = &aiov;
423 auio.uio_iovcnt = 1;
424 auio.uio_offset = 0;
425 auio.uio_resid = 1;
426 auio.uio_segflg = UIO_SYSSPACE;
427 auio.uio_rw = UIO_READ;
428 auio.uio_procp = ap->a_p;
429
430 if (vp->v_type == VREG)
431 error = nfs_readrpc(vp, &auio, ap->a_cred);
432 else if (vp->v_type == VDIR) {
433 char* bp;
434 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
435 aiov.iov_base = bp;
436 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
437 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
438 free(bp, M_TEMP);
439 } else if (vp->v_type == VLNK)
440 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
441 else
442 error = EACCES;
443 }
444 return (error);
445 }
446}
447
448/*
449 * nfs open vnode op
450 * Check to see if the type is ok
451 * and that deletion is not in progress.
452 * For paged in text files, you will need to flush the page cache
453 * if consistency is lost.
454 */
455/* ARGSUSED */
456static int
457nfs_open(ap)
458 struct vop_open_args /* {
459 struct vnode *a_vp;
460 int a_mode;
461 struct ucred *a_cred;
462 struct proc *a_p;
463 } */ *ap;
464{
465 register struct vnode *vp = ap->a_vp;
466 struct nfsnode *np = VTONFS(vp);
467 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
468 struct vattr vattr;
469 int error;
470
471 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
472#ifdef DIAGNOSTIC
473 printf("open eacces vtyp=%d\n",vp->v_type);
474#endif
475 return (EACCES);
476 }
477 /*
478 * Get a valid lease. If cached data is stale, flush it.
479 */
480 if (nmp->nm_flag & NFSMNT_NQNFS) {
481 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
482 do {
483 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
484 ap->a_p);
485 } while (error == NQNFS_EXPIRED);
486 if (error)
487 return (error);
488 if (np->n_lrev != np->n_brev ||
489 (np->n_flag & NQNFSNONCACHE)) {
490 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
491 ap->a_p, 1)) == EINTR)
492 return (error);
493 np->n_brev = np->n_lrev;
494 }
495 }
496 } else {
497 if (np->n_flag & NMODIFIED) {
498 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
499 ap->a_p, 1)) == EINTR)
500 return (error);
501 np->n_attrstamp = 0;
502 if (vp->v_type == VDIR)
503 np->n_direofoffset = 0;
504 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
505 if (error)
506 return (error);
507 np->n_mtime = vattr.va_mtime.tv_sec;
508 } else {
509 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
510 if (error)
511 return (error);
512 if (np->n_mtime != vattr.va_mtime.tv_sec) {
513 if (vp->v_type == VDIR)
514 np->n_direofoffset = 0;
515 if ((error = nfs_vinvalbuf(vp, V_SAVE,
516 ap->a_cred, ap->a_p, 1)) == EINTR)
517 return (error);
518 np->n_mtime = vattr.va_mtime.tv_sec;
519 }
520 }
521 }
522 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
523 np->n_attrstamp = 0; /* For Open/Close consistency */
524 return (0);
525}
526
527/*
528 * nfs close vnode op
529 * What an NFS client should do upon close after writing is a debatable issue.
530 * Most NFS clients push delayed writes to the server upon close, basically for
531 * two reasons:
532 * 1 - So that any write errors may be reported back to the client process
533 * doing the close system call. By far the two most likely errors are
534 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
535 * 2 - To put a worst case upper bound on cache inconsistency between
536 * multiple clients for the file.
537 * There is also a consistency problem for Version 2 of the protocol w.r.t.
538 * not being able to tell if other clients are writing a file concurrently,
539 * since there is no way of knowing if the changed modify time in the reply
540 * is only due to the write for this client.
541 * (NFS Version 3 provides weak cache consistency data in the reply that
542 * should be sufficient to detect and handle this case.)
543 *
544 * The current code does the following:
545 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
546 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
547 * or commit them (this satisfies 1 and 2 except for the
548 * case where the server crashes after this close but
549 * before the commit RPC, which is felt to be "good
550 * enough". Changing the last argument to nfs_flush() to
551 * a 1 would force a commit operation, if it is felt a
552 * commit is necessary now.
553 * for NQNFS - do nothing now, since 2 is dealt with via leases and
554 * 1 should be dealt with via an fsync() system call for
555 * cases where write errors are important.
556 */
557/* ARGSUSED */
558static int
559nfs_close(ap)
560 struct vop_close_args /* {
561 struct vnodeop_desc *a_desc;
562 struct vnode *a_vp;
563 int a_fflag;
564 struct ucred *a_cred;
565 struct proc *a_p;
566 } */ *ap;
567{
568 register struct vnode *vp = ap->a_vp;
569 register struct nfsnode *np = VTONFS(vp);
570 int error = 0;
571
572 if (vp->v_type == VREG) {
573 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
574 (np->n_flag & NMODIFIED)) {
575 if (NFS_ISV3(vp)) {
576 /*
577 * Under NFSv3 we have dirty buffers to dispose of. We
578 * must flush them to the NFS server. We have the option
579 * of waiting all the way through the commit rpc or just
580 * waiting for the initial write. The default is to only
581 * wait through the initial write so the data is in the
582 * server's cache, which is roughly similar to the state
583 * a standard disk subsystem leaves the file in on close().
584 *
585 * We cannot clear the NMODIFIED bit in np->n_flag due to
586 * potential races with other processes, and certainly
587 * cannot clear it if we don't commit.
588 */
589 int cm = nfsv3_commit_on_close ? 1 : 0;
590 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, cm);
591 /* np->n_flag &= ~NMODIFIED; */
592 } else {
593 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
594 }
595 np->n_attrstamp = 0;
596 }
597 if (np->n_flag & NWRITEERR) {
598 np->n_flag &= ~NWRITEERR;
599 error = np->n_error;
600 }
601 }
602 return (error);
603}
604
605/*
606 * nfs getattr call from vfs.
607 */
608static int
609nfs_getattr(ap)
610 struct vop_getattr_args /* {
611 struct vnode *a_vp;
612 struct vattr *a_vap;
613 struct ucred *a_cred;
614 struct proc *a_p;
615 } */ *ap;
616{
617 register struct vnode *vp = ap->a_vp;
618 register struct nfsnode *np = VTONFS(vp);
619 register caddr_t cp;
620 register u_int32_t *tl;
621 register int32_t t1, t2;
622 caddr_t bpos, dpos;
623 int error = 0;
624 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
625 int v3 = NFS_ISV3(vp);
626
627 /*
628 * Update local times for special files.
629 */
630 if (np->n_flag & (NACC | NUPD))
631 np->n_flag |= NCHG;
632 /*
633 * First look in the cache.
634 */
635 if (nfs_getattrcache(vp, ap->a_vap) == 0)
636 return (0);
637
638 if (v3 && nfsaccess_cache_timeout > 0) {
639 nfsstats.accesscache_misses++;
640 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_p, ap->a_cred);
641 if (nfs_getattrcache(vp, ap->a_vap) == 0)
642 return (0);
643 }
644
645 nfsstats.rpccnt[NFSPROC_GETATTR]++;
646 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
647 nfsm_fhtom(vp, v3);
648 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
649 if (!error) {
650 nfsm_loadattr(vp, ap->a_vap);
651 }
652 nfsm_reqdone;
653 return (error);
654}
655
656/*
657 * nfs setattr call.
658 */
659static int
660nfs_setattr(ap)
661 struct vop_setattr_args /* {
662 struct vnodeop_desc *a_desc;
663 struct vnode *a_vp;
664 struct vattr *a_vap;
665 struct ucred *a_cred;
666 struct proc *a_p;
667 } */ *ap;
668{
669 register struct vnode *vp = ap->a_vp;
670 register struct nfsnode *np = VTONFS(vp);
671 register struct vattr *vap = ap->a_vap;
672 int error = 0;
673 u_quad_t tsize;
674
675#ifndef nolint
676 tsize = (u_quad_t)0;
677#endif
678
679 /*
680 * Setting of flags is not supported.
681 */
682 if (vap->va_flags != VNOVAL)
683 return (EOPNOTSUPP);
684
685 /*
686 * Disallow write attempts if the filesystem is mounted read-only.
687 */
688 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
689 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
690 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
691 (vp->v_mount->mnt_flag & MNT_RDONLY))
692 return (EROFS);
693 if (vap->va_size != VNOVAL) {
694 switch (vp->v_type) {
695 case VDIR:
696 return (EISDIR);
697 case VCHR:
698 case VBLK:
699 case VSOCK:
700 case VFIFO:
701 if (vap->va_mtime.tv_sec == VNOVAL &&
702 vap->va_atime.tv_sec == VNOVAL &&
703 vap->va_mode == (mode_t)VNOVAL &&
704 vap->va_uid == (uid_t)VNOVAL &&
705 vap->va_gid == (gid_t)VNOVAL)
706 return (0);
707 vap->va_size = VNOVAL;
708 break;
709 default:
710 /*
711 * Disallow write attempts if the filesystem is
712 * mounted read-only.
713 */
714 if (vp->v_mount->mnt_flag & MNT_RDONLY)
715 return (EROFS);
716
717 /*
718 * We run vnode_pager_setsize() early (why?),
719 * we must set np->n_size now to avoid vinvalbuf
720 * V_SAVE races that might setsize a lower
721 * value.
722 */
723
724 tsize = np->n_size;
725 error = nfs_meta_setsize(vp, ap->a_cred,
726 ap->a_p, vap->va_size);
727
728 if (np->n_flag & NMODIFIED) {
729 if (vap->va_size == 0)
730 error = nfs_vinvalbuf(vp, 0,
731 ap->a_cred, ap->a_p, 1);
732 else
733 error = nfs_vinvalbuf(vp, V_SAVE,
734 ap->a_cred, ap->a_p, 1);
735 if (error) {
736 np->n_size = tsize;
737 vnode_pager_setsize(vp, np->n_size);
738 return (error);
739 }
740 }
741 np->n_vattr.va_size = vap->va_size;
742 };
743 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
744 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
745 vp->v_type == VREG &&
746 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
747 ap->a_p, 1)) == EINTR)
748 return (error);
749 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
750 if (error && vap->va_size != VNOVAL) {
751 np->n_size = np->n_vattr.va_size = tsize;
752 vnode_pager_setsize(vp, np->n_size);
753 }
754 return (error);
755}
756
757/*
758 * Do an nfs setattr rpc.
759 */
760static int
761nfs_setattrrpc(vp, vap, cred, procp)
762 register struct vnode *vp;
763 register struct vattr *vap;
764 struct ucred *cred;
765 struct proc *procp;
766{
767 register struct nfsv2_sattr *sp;
768 register caddr_t cp;
769 register int32_t t1, t2;
770 caddr_t bpos, dpos, cp2;
771 u_int32_t *tl;
772 int error = 0, wccflag = NFSV3_WCCRATTR;
773 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
774 int v3 = NFS_ISV3(vp);
775
776 nfsstats.rpccnt[NFSPROC_SETATTR]++;
777 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
778 nfsm_fhtom(vp, v3);
779 if (v3) {
780 nfsm_v3attrbuild(vap, TRUE);
781 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
782 *tl = nfs_false;
783 } else {
784 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
785 if (vap->va_mode == (mode_t)VNOVAL)
786 sp->sa_mode = nfs_xdrneg1;
787 else
788 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
789 if (vap->va_uid == (uid_t)VNOVAL)
790 sp->sa_uid = nfs_xdrneg1;
791 else
792 sp->sa_uid = txdr_unsigned(vap->va_uid);
793 if (vap->va_gid == (gid_t)VNOVAL)
794 sp->sa_gid = nfs_xdrneg1;
795 else
796 sp->sa_gid = txdr_unsigned(vap->va_gid);
797 sp->sa_size = txdr_unsigned(vap->va_size);
798 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
799 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
800 }
801 nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
802 if (v3) {
803 nfsm_wcc_data(vp, wccflag);
804 } else
805 nfsm_loadattr(vp, (struct vattr *)0);
806 nfsm_reqdone;
807 return (error);
808}
809
810/*
811 * nfs lookup call, one step at a time...
812 * First look in cache
813 * If not found, unlock the directory nfsnode and do the rpc
814 */
815static int
816nfs_lookup(ap)
817 struct vop_lookup_args /* {
818 struct vnodeop_desc *a_desc;
819 struct vnode *a_dvp;
820 struct vnode **a_vpp;
821 struct componentname *a_cnp;
822 } */ *ap;
823{
824 struct componentname *cnp = ap->a_cnp;
825 struct vnode *dvp = ap->a_dvp;
826 struct vnode **vpp = ap->a_vpp;
827 int flags = cnp->cn_flags;
828 struct vnode *newvp;
829 u_int32_t *tl;
830 caddr_t cp;
831 int32_t t1, t2;
832 struct nfsmount *nmp;
833 caddr_t bpos, dpos, cp2;
834 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
835 long len;
836 nfsfh_t *fhp;
837 struct nfsnode *np;
838 int lockparent, wantparent, error = 0, attrflag, fhsize;
839 int v3 = NFS_ISV3(dvp);
840 struct proc *p = cnp->cn_proc;
841
842 *vpp = NULLVP;
843 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
844 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
845 return (EROFS);
846 if (dvp->v_type != VDIR)
847 return (ENOTDIR);
848 lockparent = flags & LOCKPARENT;
849 wantparent = flags & (LOCKPARENT|WANTPARENT);
850 nmp = VFSTONFS(dvp->v_mount);
851 np = VTONFS(dvp);
852 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
853 struct vattr vattr;
854 int vpid;
855
856 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) != 0) {
857 *vpp = NULLVP;
858 return (error);
859 }
860
861 newvp = *vpp;
862 vpid = newvp->v_id;
863 /*
864 * See the comment starting `Step through' in ufs/ufs_lookup.c
865 * for an explanation of the locking protocol
866 */
867 if (dvp == newvp) {
868 VREF(newvp);
869 error = 0;
870 } else if (flags & ISDOTDOT) {
871 VOP_UNLOCK(dvp, 0, p);
872 error = vget(newvp, LK_EXCLUSIVE, p);
873 if (!error && lockparent && (flags & ISLASTCN))
874 error = vn_lock(dvp, LK_EXCLUSIVE, p);
875 } else {
876 error = vget(newvp, LK_EXCLUSIVE, p);
877 if (!lockparent || error || !(flags & ISLASTCN))
878 VOP_UNLOCK(dvp, 0, p);
879 }
880 if (!error) {
881 if (vpid == newvp->v_id) {
882 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
883 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
884 nfsstats.lookupcache_hits++;
885 if (cnp->cn_nameiop != LOOKUP &&
886 (flags & ISLASTCN))
887 cnp->cn_flags |= SAVENAME;
888 return (0);
889 }
890 cache_purge(newvp);
891 }
892 vput(newvp);
893 if (lockparent && dvp != newvp && (flags & ISLASTCN))
894 VOP_UNLOCK(dvp, 0, p);
895 }
896 error = vn_lock(dvp, LK_EXCLUSIVE, p);
897 *vpp = NULLVP;
898 if (error)
899 return (error);
900 }
901 error = 0;
902 newvp = NULLVP;
903 nfsstats.lookupcache_misses++;
904 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
905 len = cnp->cn_namelen;
906 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
907 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
908 nfsm_fhtom(dvp, v3);
909 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
910 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred);
911 if (error) {
912 nfsm_postop_attr(dvp, attrflag);
913 m_freem(mrep);
914 goto nfsmout;
915 }
916 nfsm_getfh(fhp, fhsize, v3);
917
918 /*
919 * Handle RENAME case...
920 */
921 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
922 if (NFS_CMPFH(np, fhp, fhsize)) {
923 m_freem(mrep);
924 return (EISDIR);
925 }
926 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
927 if (error) {
928 m_freem(mrep);
929 return (error);
930 }
931 newvp = NFSTOV(np);
932 if (v3) {
933 nfsm_postop_attr(newvp, attrflag);
934 nfsm_postop_attr(dvp, attrflag);
935 } else
936 nfsm_loadattr(newvp, (struct vattr *)0);
937 *vpp = newvp;
938 m_freem(mrep);
939 cnp->cn_flags |= SAVENAME;
940 if (!lockparent)
941 VOP_UNLOCK(dvp, 0, p);
942 return (0);
943 }
944
945 if (flags & ISDOTDOT) {
946 VOP_UNLOCK(dvp, 0, p);
947 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
948 if (error) {
949 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
950 return (error);
951 }
952 newvp = NFSTOV(np);
953 if (lockparent && (flags & ISLASTCN) &&
954 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
955 vput(newvp);
956 return (error);
957 }
958 } else if (NFS_CMPFH(np, fhp, fhsize)) {
959 VREF(dvp);
960 newvp = dvp;
961 } else {
962 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
963 if (error) {
964 m_freem(mrep);
965 return (error);
966 }
967 if (!lockparent || !(flags & ISLASTCN))
968 VOP_UNLOCK(dvp, 0, p);
969 newvp = NFSTOV(np);
970 }
971 if (v3) {
972 nfsm_postop_attr(newvp, attrflag);
973 nfsm_postop_attr(dvp, attrflag);
974 } else
975 nfsm_loadattr(newvp, (struct vattr *)0);
976 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
977 cnp->cn_flags |= SAVENAME;
978 if ((cnp->cn_flags & MAKEENTRY) &&
979 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
980 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
981 cache_enter(dvp, newvp, cnp);
982 }
983 *vpp = newvp;
984 nfsm_reqdone;
985 if (error) {
986 if (newvp != NULLVP) {
987 vrele(newvp);
988 *vpp = NULLVP;
989 }
990 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
991 (flags & ISLASTCN) && error == ENOENT) {
992 if (!lockparent)
993 VOP_UNLOCK(dvp, 0, p);
994 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
995 error = EROFS;
996 else
997 error = EJUSTRETURN;
998 }
999 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1000 cnp->cn_flags |= SAVENAME;
1001 }
1002 return (error);
1003}
1004
1005/*
1006 * nfs read call.
1007 * Just call nfs_bioread() to do the work.
1008 */
1009static int
1010nfs_read(ap)
1011 struct vop_read_args /* {
1012 struct vnode *a_vp;
1013 struct uio *a_uio;
1014 int a_ioflag;
1015 struct ucred *a_cred;
1016 } */ *ap;
1017{
1018 register struct vnode *vp = ap->a_vp;
1019
1020 if (vp->v_type != VREG)
1021 return (EPERM);
1022 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1023}
1024
1025/*
1026 * nfs readlink call
1027 */
1028static int
1029nfs_readlink(ap)
1030 struct vop_readlink_args /* {
1031 struct vnode *a_vp;
1032 struct uio *a_uio;
1033 struct ucred *a_cred;
1034 } */ *ap;
1035{
1036 register struct vnode *vp = ap->a_vp;
1037
1038 if (vp->v_type != VLNK)
1039 return (EINVAL);
1040 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1041}
1042
1043/*
1044 * Do a readlink rpc.
1045 * Called by nfs_doio() from below the buffer cache.
1046 */
1047int
1048nfs_readlinkrpc(vp, uiop, cred)
1049 register struct vnode *vp;
1050 struct uio *uiop;
1051 struct ucred *cred;
1052{
1053 register u_int32_t *tl;
1054 register caddr_t cp;
1055 register int32_t t1, t2;
1056 caddr_t bpos, dpos, cp2;
1057 int error = 0, len, attrflag;
1058 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1059 int v3 = NFS_ISV3(vp);
1060
1061 nfsstats.rpccnt[NFSPROC_READLINK]++;
1062 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1063 nfsm_fhtom(vp, v3);
1064 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
1065 if (v3)
1066 nfsm_postop_attr(vp, attrflag);
1067 if (!error) {
1068 nfsm_strsiz(len, NFS_MAXPATHLEN);
1069 if (len == NFS_MAXPATHLEN) {
1070 struct nfsnode *np = VTONFS(vp);
1071 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1072 len = np->n_size;
1073 }
1074 nfsm_mtouio(uiop, len);
1075 }
1076 nfsm_reqdone;
1077 return (error);
1078}
1079
1080/*
1081 * nfs read rpc call
1082 * Ditto above
1083 */
1084int
1085nfs_readrpc(vp, uiop, cred)
1086 register struct vnode *vp;
1087 struct uio *uiop;
1088 struct ucred *cred;
1089{
1090 register u_int32_t *tl;
1091 register caddr_t cp;
1092 register int32_t t1, t2;
1093 caddr_t bpos, dpos, cp2;
1094 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1095 struct nfsmount *nmp;
1096 int error = 0, len, retlen, tsiz, eof, attrflag;
1097 int v3 = NFS_ISV3(vp);
1098
1099#ifndef nolint
1100 eof = 0;
1101#endif
1102 nmp = VFSTONFS(vp->v_mount);
1103 tsiz = uiop->uio_resid;
1104 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1105 return (EFBIG);
1106 while (tsiz > 0) {
1107 nfsstats.rpccnt[NFSPROC_READ]++;
1108 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1109 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1110 nfsm_fhtom(vp, v3);
1111 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1112 if (v3) {
1113 txdr_hyper(uiop->uio_offset, tl);
1114 *(tl + 2) = txdr_unsigned(len);
1115 } else {
1116 *tl++ = txdr_unsigned(uiop->uio_offset);
1117 *tl++ = txdr_unsigned(len);
1118 *tl = 0;
1119 }
1120 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred);
1121 if (v3) {
1122 nfsm_postop_attr(vp, attrflag);
1123 if (error) {
1124 m_freem(mrep);
1125 goto nfsmout;
1126 }
1127 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1128 eof = fxdr_unsigned(int, *(tl + 1));
1129 } else
1130 nfsm_loadattr(vp, (struct vattr *)0);
1131 nfsm_strsiz(retlen, nmp->nm_rsize);
1132 nfsm_mtouio(uiop, retlen);
1133 m_freem(mrep);
1134 tsiz -= retlen;
1135 if (v3) {
1136 if (eof || retlen == 0) {
1137 tsiz = 0;
1138 }
1139 } else if (retlen < len) {
1140 tsiz = 0;
1141 }
1142 }
1143nfsmout:
1144 return (error);
1145}
1146
1147/*
1148 * nfs write call
1149 */
1150int
1151nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1152 register struct vnode *vp;
1153 register struct uio *uiop;
1154 struct ucred *cred;
1155 int *iomode, *must_commit;
1156{
1157 register u_int32_t *tl;
1158 register caddr_t cp;
1159 register int32_t t1, t2, backup;
1160 caddr_t bpos, dpos, cp2;
1161 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1162 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1163 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1164 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1165
1166#ifndef DIAGNOSTIC
1167 if (uiop->uio_iovcnt != 1)
1168 panic("nfs: writerpc iovcnt > 1");
1169#endif
1170 *must_commit = 0;
1171 tsiz = uiop->uio_resid;
1172 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1173 return (EFBIG);
1174 while (tsiz > 0) {
1175 nfsstats.rpccnt[NFSPROC_WRITE]++;
1176 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1177 nfsm_reqhead(vp, NFSPROC_WRITE,
1178 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1179 nfsm_fhtom(vp, v3);
1180 if (v3) {
1181 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1182 txdr_hyper(uiop->uio_offset, tl);
1183 tl += 2;
1184 *tl++ = txdr_unsigned(len);
1185 *tl++ = txdr_unsigned(*iomode);
1186 *tl = txdr_unsigned(len);
1187 } else {
1188 register u_int32_t x;
1189
1190 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1191 /* Set both "begin" and "current" to non-garbage. */
1192 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1193 *tl++ = x; /* "begin offset" */
1194 *tl++ = x; /* "current offset" */
1195 x = txdr_unsigned(len);
1196 *tl++ = x; /* total to this offset */
1197 *tl = x; /* size of this write */
1198 }
1199 nfsm_uiotom(uiop, len);
1200 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred);
1201 if (v3) {
1202 wccflag = NFSV3_WCCCHK;
1203 nfsm_wcc_data(vp, wccflag);
1204 if (!error) {
1205 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1206 + NFSX_V3WRITEVERF);
1207 rlen = fxdr_unsigned(int, *tl++);
1208 if (rlen == 0) {
1209 error = NFSERR_IO;
1210 m_freem(mrep);
1211 break;
1212 } else if (rlen < len) {
1213 backup = len - rlen;
1214 uiop->uio_iov->iov_base -= backup;
1215 uiop->uio_iov->iov_len += backup;
1216 uiop->uio_offset -= backup;
1217 uiop->uio_resid += backup;
1218 len = rlen;
1219 }
1220 commit = fxdr_unsigned(int, *tl++);
1221
1222 /*
1223 * Return the lowest committment level
1224 * obtained by any of the RPCs.
1225 */
1226 if (committed == NFSV3WRITE_FILESYNC)
1227 committed = commit;
1228 else if (committed == NFSV3WRITE_DATASYNC &&
1229 commit == NFSV3WRITE_UNSTABLE)
1230 committed = commit;
1231 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1232 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1233 NFSX_V3WRITEVERF);
1234 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1235 } else if (bcmp((caddr_t)tl,
1236 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1237 *must_commit = 1;
1238 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1239 NFSX_V3WRITEVERF);
1240 }
1241 }
1242 } else
1243 nfsm_loadattr(vp, (struct vattr *)0);
1244 if (wccflag)
1245 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1246 m_freem(mrep);
1247 if (error)
1248 break;
1249 tsiz -= len;
1250 }
1251nfsmout:
1252 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1253 committed = NFSV3WRITE_FILESYNC;
1254 *iomode = committed;
1255 if (error)
1256 uiop->uio_resid = tsiz;
1257 return (error);
1258}
1259
1260/*
1261 * nfs mknod rpc
1262 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1263 * mode set to specify the file type and the size field for rdev.
1264 */
1265static int
1266nfs_mknodrpc(dvp, vpp, cnp, vap)
1267 register struct vnode *dvp;
1268 register struct vnode **vpp;
1269 register struct componentname *cnp;
1270 register struct vattr *vap;
1271{
1272 register struct nfsv2_sattr *sp;
1273 register u_int32_t *tl;
1274 register caddr_t cp;
1275 register int32_t t1, t2;
1276 struct vnode *newvp = (struct vnode *)0;
1277 struct nfsnode *np = (struct nfsnode *)0;
1278 struct vattr vattr;
1279 char *cp2;
1280 caddr_t bpos, dpos;
1281 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1282 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1283 u_int32_t rdev;
1284 int v3 = NFS_ISV3(dvp);
1285
1286 if (vap->va_type == VCHR || vap->va_type == VBLK)
1287 rdev = txdr_unsigned(vap->va_rdev);
1288 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1289 rdev = nfs_xdrneg1;
1290 else {
1291 return (EOPNOTSUPP);
1292 }
1293 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1294 return (error);
1295 }
1296 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1297 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1298 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1299 nfsm_fhtom(dvp, v3);
1300 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1301 if (v3) {
1302 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1303 *tl++ = vtonfsv3_type(vap->va_type);
1304 nfsm_v3attrbuild(vap, FALSE);
1305 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1306 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1307 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1308 *tl = txdr_unsigned(uminor(vap->va_rdev));
1309 }
1310 } else {
1311 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1312 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1313 sp->sa_uid = nfs_xdrneg1;
1314 sp->sa_gid = nfs_xdrneg1;
1315 sp->sa_size = rdev;
1316 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1317 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1318 }
1319 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred);
1320 if (!error) {
1321 nfsm_mtofh(dvp, newvp, v3, gotvp);
1322 if (!gotvp) {
1323 if (newvp) {
1324 vput(newvp);
1325 newvp = (struct vnode *)0;
1326 }
1327 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1328 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1329 if (!error)
1330 newvp = NFSTOV(np);
1331 }
1332 }
1333 if (v3)
1334 nfsm_wcc_data(dvp, wccflag);
1335 nfsm_reqdone;
1336 if (error) {
1337 if (newvp)
1338 vput(newvp);
1339 } else {
1340 if (cnp->cn_flags & MAKEENTRY)
1341 cache_enter(dvp, newvp, cnp);
1342 *vpp = newvp;
1343 }
1344 VTONFS(dvp)->n_flag |= NMODIFIED;
1345 if (!wccflag)
1346 VTONFS(dvp)->n_attrstamp = 0;
1347 return (error);
1348}
1349
1350/*
1351 * nfs mknod vop
1352 * just call nfs_mknodrpc() to do the work.
1353 */
1354/* ARGSUSED */
1355static int
1356nfs_mknod(ap)
1357 struct vop_mknod_args /* {
1358 struct vnode *a_dvp;
1359 struct vnode **a_vpp;
1360 struct componentname *a_cnp;
1361 struct vattr *a_vap;
1362 } */ *ap;
1363{
1364 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1365}
1366
1367static u_long create_verf;
1368/*
1369 * nfs file create call
1370 */
1371static int
1372nfs_create(ap)
1373 struct vop_create_args /* {
1374 struct vnode *a_dvp;
1375 struct vnode **a_vpp;
1376 struct componentname *a_cnp;
1377 struct vattr *a_vap;
1378 } */ *ap;
1379{
1380 register struct vnode *dvp = ap->a_dvp;
1381 register struct vattr *vap = ap->a_vap;
1382 register struct componentname *cnp = ap->a_cnp;
1383 register struct nfsv2_sattr *sp;
1384 register u_int32_t *tl;
1385 register caddr_t cp;
1386 register int32_t t1, t2;
1387 struct nfsnode *np = (struct nfsnode *)0;
1388 struct vnode *newvp = (struct vnode *)0;
1389 caddr_t bpos, dpos, cp2;
1390 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1391 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1392 struct vattr vattr;
1393 int v3 = NFS_ISV3(dvp);
1394
1395 /*
1396 * Oops, not for me..
1397 */
1398 if (vap->va_type == VSOCK)
1399 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1400
1401 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1402 return (error);
1403 }
1404 if (vap->va_vaflags & VA_EXCLUSIVE)
1405 fmode |= O_EXCL;
1406again:
1407 nfsstats.rpccnt[NFSPROC_CREATE]++;
1408 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1409 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1410 nfsm_fhtom(dvp, v3);
1411 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1412 if (v3) {
1413 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1414 if (fmode & O_EXCL) {
1415 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1416 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1417#ifdef INET
1418 if (!TAILQ_EMPTY(&in_ifaddrhead))
1419 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1420 else
1421#endif
1422 *tl++ = create_verf;
1423 *tl = ++create_verf;
1424 } else {
1425 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1426 nfsm_v3attrbuild(vap, FALSE);
1427 }
1428 } else {
1429 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1430 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1431 sp->sa_uid = nfs_xdrneg1;
1432 sp->sa_gid = nfs_xdrneg1;
1433 sp->sa_size = 0;
1434 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1435 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1436 }
1437 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred);
1438 if (!error) {
1439 nfsm_mtofh(dvp, newvp, v3, gotvp);
1440 if (!gotvp) {
1441 if (newvp) {
1442 vput(newvp);
1443 newvp = (struct vnode *)0;
1444 }
1445 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1446 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1447 if (!error)
1448 newvp = NFSTOV(np);
1449 }
1450 }
1451 if (v3)
1452 nfsm_wcc_data(dvp, wccflag);
1453 nfsm_reqdone;
1454 if (error) {
1455 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1456 fmode &= ~O_EXCL;
1457 goto again;
1458 }
1459 if (newvp)
1460 vput(newvp);
1461 } else if (v3 && (fmode & O_EXCL)) {
1462 /*
1463 * We are normally called with only a partially initialized
1464 * VAP. Since the NFSv3 spec says that server may use the
1465 * file attributes to store the verifier, the spec requires
1466 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1467 * in atime, but we can't really assume that all servers will
1468 * so we ensure that our SETATTR sets both atime and mtime.
1469 */
1470 if (vap->va_mtime.tv_sec == VNOVAL)
1471 vfs_timestamp(&vap->va_mtime);
1472 if (vap->va_atime.tv_sec == VNOVAL)
1473 vap->va_atime = vap->va_mtime;
1474 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1475 }
1476 if (!error) {
1477 if (cnp->cn_flags & MAKEENTRY)
1478 cache_enter(dvp, newvp, cnp);
1479 *ap->a_vpp = newvp;
1480 }
1481 VTONFS(dvp)->n_flag |= NMODIFIED;
1482 if (!wccflag)
1483 VTONFS(dvp)->n_attrstamp = 0;
1484 return (error);
1485}
1486
1487/*
1488 * nfs file remove call
1489 * To try and make nfs semantics closer to ufs semantics, a file that has
1490 * other processes using the vnode is renamed instead of removed and then
1491 * removed later on the last close.
1492 * - If v_usecount > 1
1493 * If a rename is not already in the works
1494 * call nfs_sillyrename() to set it up
1495 * else
1496 * do the remove rpc
1497 */
1498static int
1499nfs_remove(ap)
1500 struct vop_remove_args /* {
1501 struct vnodeop_desc *a_desc;
1502 struct vnode * a_dvp;
1503 struct vnode * a_vp;
1504 struct componentname * a_cnp;
1505 } */ *ap;
1506{
1507 register struct vnode *vp = ap->a_vp;
1508 register struct vnode *dvp = ap->a_dvp;
1509 register struct componentname *cnp = ap->a_cnp;
1510 register struct nfsnode *np = VTONFS(vp);
1511 int error = 0;
1512 struct vattr vattr;
1513
1514#ifndef DIAGNOSTIC
1515 if ((cnp->cn_flags & HASBUF) == 0)
1516 panic("nfs_remove: no name");
1517 if (vp->v_usecount < 1)
1518 panic("nfs_remove: bad v_usecount");
1519#endif
1520 if (vp->v_type == VDIR)
1521 error = EPERM;
1522 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1523 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1524 vattr.va_nlink > 1)) {
1525 /*
1526 * Purge the name cache so that the chance of a lookup for
1527 * the name succeeding while the remove is in progress is
1528 * minimized. Without node locking it can still happen, such
1529 * that an I/O op returns ESTALE, but since you get this if
1530 * another host removes the file..
1531 */
1532 cache_purge(vp);
1533 /*
1534 * throw away biocache buffers, mainly to avoid
1535 * unnecessary delayed writes later.
1536 */
1537 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
1538 /* Do the rpc */
1539 if (error != EINTR)
1540 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1541 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1542 /*
1543 * Kludge City: If the first reply to the remove rpc is lost..
1544 * the reply to the retransmitted request will be ENOENT
1545 * since the file was in fact removed
1546 * Therefore, we cheat and return success.
1547 */
1548 if (error == ENOENT)
1549 error = 0;
1550 } else if (!np->n_sillyrename)
1551 error = nfs_sillyrename(dvp, vp, cnp);
1552 np->n_attrstamp = 0;
1553 return (error);
1554}
1555
1556/*
1557 * nfs file remove rpc called from nfs_inactive
1558 */
1559int
1560nfs_removeit(sp)
1561 register struct sillyrename *sp;
1562{
1563
1564 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1565 (struct proc *)0));
1566}
1567
1568/*
1569 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1570 */
1571static int
1572nfs_removerpc(dvp, name, namelen, cred, proc)
1573 register struct vnode *dvp;
1574 const char *name;
1575 int namelen;
1576 struct ucred *cred;
1577 struct proc *proc;
1578{
1579 register u_int32_t *tl;
1580 register caddr_t cp;
1581 register int32_t t1, t2;
1582 caddr_t bpos, dpos, cp2;
1583 int error = 0, wccflag = NFSV3_WCCRATTR;
1584 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1585 int v3 = NFS_ISV3(dvp);
1586
1587 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1588 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1589 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1590 nfsm_fhtom(dvp, v3);
1591 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1592 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred);
1593 if (v3)
1594 nfsm_wcc_data(dvp, wccflag);
1595 nfsm_reqdone;
1596 VTONFS(dvp)->n_flag |= NMODIFIED;
1597 if (!wccflag)
1598 VTONFS(dvp)->n_attrstamp = 0;
1599 return (error);
1600}
1601
1602/*
1603 * nfs file rename call
1604 */
1605static int
1606nfs_rename(ap)
1607 struct vop_rename_args /* {
1608 struct vnode *a_fdvp;
1609 struct vnode *a_fvp;
1610 struct componentname *a_fcnp;
1611 struct vnode *a_tdvp;
1612 struct vnode *a_tvp;
1613 struct componentname *a_tcnp;
1614 } */ *ap;
1615{
1616 register struct vnode *fvp = ap->a_fvp;
1617 register struct vnode *tvp = ap->a_tvp;
1618 register struct vnode *fdvp = ap->a_fdvp;
1619 register struct vnode *tdvp = ap->a_tdvp;
1620 register struct componentname *tcnp = ap->a_tcnp;
1621 register struct componentname *fcnp = ap->a_fcnp;
1622 int error;
1623
1624#ifndef DIAGNOSTIC
1625 if ((tcnp->cn_flags & HASBUF) == 0 ||
1626 (fcnp->cn_flags & HASBUF) == 0)
1627 panic("nfs_rename: no name");
1628#endif
1629 /* Check for cross-device rename */
1630 if ((fvp->v_mount != tdvp->v_mount) ||
1631 (tvp && (fvp->v_mount != tvp->v_mount))) {
1632 error = EXDEV;
1633 goto out;
1634 }
1635
1636 /*
1637 * We have to flush B_DELWRI data prior to renaming
1638 * the file. If we don't, the delayed-write buffers
1639 * can be flushed out later after the file has gone stale
1640 * under NFSV3. NFSV2 does not have this problem because
1641 * ( as far as I can tell ) it flushes dirty buffers more
1642 * often.
1643 */
1644
1645 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_proc);
1646 if (tvp)
1647 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_proc);
1648
1649 /*
1650 * If the tvp exists and is in use, sillyrename it before doing the
1651 * rename of the new file over it.
1652 * XXX Can't sillyrename a directory.
1653 */
1654 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1655 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1656 vput(tvp);
1657 tvp = NULL;
1658 }
1659
1660 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1661 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1662 tcnp->cn_proc);
1663
1664 if (fvp->v_type == VDIR) {
1665 if (tvp != NULL && tvp->v_type == VDIR)
1666 cache_purge(tdvp);
1667 cache_purge(fdvp);
1668 }
1669
1670out:
1671 if (tdvp == tvp)
1672 vrele(tdvp);
1673 else
1674 vput(tdvp);
1675 if (tvp)
1676 vput(tvp);
1677 vrele(fdvp);
1678 vrele(fvp);
1679 /*
1680 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1681 */
1682 if (error == ENOENT)
1683 error = 0;
1684 return (error);
1685}
1686
1687/*
1688 * nfs file rename rpc called from nfs_remove() above
1689 */
1690static int
1691nfs_renameit(sdvp, scnp, sp)
1692 struct vnode *sdvp;
1693 struct componentname *scnp;
1694 register struct sillyrename *sp;
1695{
1696 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1697 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc));
1698}
1699
1700/*
1701 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1702 */
1703static int
1704nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc)
1705 register struct vnode *fdvp;
1706 const char *fnameptr;
1707 int fnamelen;
1708 register struct vnode *tdvp;
1709 const char *tnameptr;
1710 int tnamelen;
1711 struct ucred *cred;
1712 struct proc *proc;
1713{
1714 register u_int32_t *tl;
1715 register caddr_t cp;
1716 register int32_t t1, t2;
1717 caddr_t bpos, dpos, cp2;
1718 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1719 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1720 int v3 = NFS_ISV3(fdvp);
1721
1722 nfsstats.rpccnt[NFSPROC_RENAME]++;
1723 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1724 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1725 nfsm_rndup(tnamelen));
1726 nfsm_fhtom(fdvp, v3);
1727 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1728 nfsm_fhtom(tdvp, v3);
1729 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1730 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred);
1731 if (v3) {
1732 nfsm_wcc_data(fdvp, fwccflag);
1733 nfsm_wcc_data(tdvp, twccflag);
1734 }
1735 nfsm_reqdone;
1736 VTONFS(fdvp)->n_flag |= NMODIFIED;
1737 VTONFS(tdvp)->n_flag |= NMODIFIED;
1738 if (!fwccflag)
1739 VTONFS(fdvp)->n_attrstamp = 0;
1740 if (!twccflag)
1741 VTONFS(tdvp)->n_attrstamp = 0;
1742 return (error);
1743}
1744
1745/*
1746 * nfs hard link create call
1747 */
1748static int
1749nfs_link(ap)
1750 struct vop_link_args /* {
1751 struct vnode *a_tdvp;
1752 struct vnode *a_vp;
1753 struct componentname *a_cnp;
1754 } */ *ap;
1755{
1756 register struct vnode *vp = ap->a_vp;
1757 register struct vnode *tdvp = ap->a_tdvp;
1758 register struct componentname *cnp = ap->a_cnp;
1759 register u_int32_t *tl;
1760 register caddr_t cp;
1761 register int32_t t1, t2;
1762 caddr_t bpos, dpos, cp2;
1763 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1764 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1765 int v3;
1766
1767 if (vp->v_mount != tdvp->v_mount) {
1768 return (EXDEV);
1769 }
1770
1771 /*
1772 * Push all writes to the server, so that the attribute cache
1773 * doesn't get "out of sync" with the server.
1774 * XXX There should be a better way!
1775 */
1776 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
1777
1778 v3 = NFS_ISV3(vp);
1779 nfsstats.rpccnt[NFSPROC_LINK]++;
1780 nfsm_reqhead(vp, NFSPROC_LINK,
1781 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1782 nfsm_fhtom(vp, v3);
1783 nfsm_fhtom(tdvp, v3);
1784 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1785 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
1786 if (v3) {
1787 nfsm_postop_attr(vp, attrflag);
1788 nfsm_wcc_data(tdvp, wccflag);
1789 }
1790 nfsm_reqdone;
1791 VTONFS(tdvp)->n_flag |= NMODIFIED;
1792 if (!attrflag)
1793 VTONFS(vp)->n_attrstamp = 0;
1794 if (!wccflag)
1795 VTONFS(tdvp)->n_attrstamp = 0;
1796 /*
1797 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1798 */
1799 if (error == EEXIST)
1800 error = 0;
1801 return (error);
1802}
1803
1804/*
1805 * nfs symbolic link create call
1806 */
1807static int
1808nfs_symlink(ap)
1809 struct vop_symlink_args /* {
1810 struct vnode *a_dvp;
1811 struct vnode **a_vpp;
1812 struct componentname *a_cnp;
1813 struct vattr *a_vap;
1814 char *a_target;
1815 } */ *ap;
1816{
1817 register struct vnode *dvp = ap->a_dvp;
1818 register struct vattr *vap = ap->a_vap;
1819 register struct componentname *cnp = ap->a_cnp;
1820 register struct nfsv2_sattr *sp;
1821 register u_int32_t *tl;
1822 register caddr_t cp;
1823 register int32_t t1, t2;
1824 caddr_t bpos, dpos, cp2;
1825 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1826 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1827 struct vnode *newvp = (struct vnode *)0;
1828 int v3 = NFS_ISV3(dvp);
1829
1830 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1831 slen = strlen(ap->a_target);
1832 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1833 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1834 nfsm_fhtom(dvp, v3);
1835 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1836 if (v3) {
1837 nfsm_v3attrbuild(vap, FALSE);
1838 }
1839 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1840 if (!v3) {
1841 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1842 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1843 sp->sa_uid = nfs_xdrneg1;
1844 sp->sa_gid = nfs_xdrneg1;
1845 sp->sa_size = nfs_xdrneg1;
1846 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1847 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1848 }
1849
1850 /*
1851 * Issue the NFS request and get the rpc response.
1852 *
1853 * Only NFSv3 responses returning an error of 0 actually return
1854 * a file handle that can be converted into newvp without having
1855 * to do an extra lookup rpc.
1856 */
1857 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred);
1858 if (v3) {
1859 if (error == 0)
1860 nfsm_mtofh(dvp, newvp, v3, gotvp);
1861 nfsm_wcc_data(dvp, wccflag);
1862 }
1863
1864 /*
1865 * out code jumps -> here, mrep is also freed.
1866 */
1867
1868 nfsm_reqdone;
1869
1870 /*
1871 * If we get an EEXIST error, silently convert it to no-error
1872 * in case of an NFS retry.
1873 */
1874 if (error == EEXIST)
1875 error = 0;
1876
1877 /*
1878 * If we do not have (or no longer have) an error, and we could
1879 * not extract the newvp from the response due to the request being
1880 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1881 * to obtain a newvp to return.
1882 */
1883 if (error == 0 && newvp == NULL) {
1884 struct nfsnode *np = NULL;
1885
1886 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1887 cnp->cn_cred, cnp->cn_proc, &np);
1888 if (!error)
1889 newvp = NFSTOV(np);
1890 }
1891 if (error) {
1892 if (newvp)
1893 vput(newvp);
1894 } else {
1895 *ap->a_vpp = newvp;
1896 }
1897 VTONFS(dvp)->n_flag |= NMODIFIED;
1898 if (!wccflag)
1899 VTONFS(dvp)->n_attrstamp = 0;
1900 return (error);
1901}
1902
1903/*
1904 * nfs make dir call
1905 */
1906static int
1907nfs_mkdir(ap)
1908 struct vop_mkdir_args /* {
1909 struct vnode *a_dvp;
1910 struct vnode **a_vpp;
1911 struct componentname *a_cnp;
1912 struct vattr *a_vap;
1913 } */ *ap;
1914{
1915 register struct vnode *dvp = ap->a_dvp;
1916 register struct vattr *vap = ap->a_vap;
1917 register struct componentname *cnp = ap->a_cnp;
1918 register struct nfsv2_sattr *sp;
1919 register u_int32_t *tl;
1920 register caddr_t cp;
1921 register int32_t t1, t2;
1922 register int len;
1923 struct nfsnode *np = (struct nfsnode *)0;
1924 struct vnode *newvp = (struct vnode *)0;
1925 caddr_t bpos, dpos, cp2;
1926 int error = 0, wccflag = NFSV3_WCCRATTR;
1927 int gotvp = 0;
1928 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1929 struct vattr vattr;
1930 int v3 = NFS_ISV3(dvp);
1931
1932 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1933 return (error);
1934 }
1935 len = cnp->cn_namelen;
1936 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1937 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1938 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1939 nfsm_fhtom(dvp, v3);
1940 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1941 if (v3) {
1942 nfsm_v3attrbuild(vap, FALSE);
1943 } else {
1944 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1945 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1946 sp->sa_uid = nfs_xdrneg1;
1947 sp->sa_gid = nfs_xdrneg1;
1948 sp->sa_size = nfs_xdrneg1;
1949 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1950 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1951 }
1952 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred);
1953 if (!error)
1954 nfsm_mtofh(dvp, newvp, v3, gotvp);
1955 if (v3)
1956 nfsm_wcc_data(dvp, wccflag);
1957 nfsm_reqdone;
1958 VTONFS(dvp)->n_flag |= NMODIFIED;
1959 if (!wccflag)
1960 VTONFS(dvp)->n_attrstamp = 0;
1961 /*
1962 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1963 * if we can succeed in looking up the directory.
1964 */
1965 if (error == EEXIST || (!error && !gotvp)) {
1966 if (newvp) {
1967 vrele(newvp);
1968 newvp = (struct vnode *)0;
1969 }
1970 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1971 cnp->cn_proc, &np);
1972 if (!error) {
1973 newvp = NFSTOV(np);
1974 if (newvp->v_type != VDIR)
1975 error = EEXIST;
1976 }
1977 }
1978 if (error) {
1979 if (newvp)
1980 vrele(newvp);
1981 } else
1982 *ap->a_vpp = newvp;
1983 return (error);
1984}
1985
1986/*
1987 * nfs remove directory call
1988 */
1989static int
1990nfs_rmdir(ap)
1991 struct vop_rmdir_args /* {
1992 struct vnode *a_dvp;
1993 struct vnode *a_vp;
1994 struct componentname *a_cnp;
1995 } */ *ap;
1996{
1997 register struct vnode *vp = ap->a_vp;
1998 register struct vnode *dvp = ap->a_dvp;
1999 register struct componentname *cnp = ap->a_cnp;
2000 register u_int32_t *tl;
2001 register caddr_t cp;
2002 register int32_t t1, t2;
2003 caddr_t bpos, dpos, cp2;
2004 int error = 0, wccflag = NFSV3_WCCRATTR;
2005 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2006 int v3 = NFS_ISV3(dvp);
2007
2008 if (dvp == vp)
2009 return (EINVAL);
2010 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2011 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2012 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2013 nfsm_fhtom(dvp, v3);
2014 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2015 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred);
2016 if (v3)
2017 nfsm_wcc_data(dvp, wccflag);
2018 nfsm_reqdone;
2019 VTONFS(dvp)->n_flag |= NMODIFIED;
2020 if (!wccflag)
2021 VTONFS(dvp)->n_attrstamp = 0;
2022 cache_purge(dvp);
2023 cache_purge(vp);
2024 /*
2025 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2026 */
2027 if (error == ENOENT)
2028 error = 0;
2029 return (error);
2030}
2031
2032/*
2033 * nfs readdir call
2034 */
2035static int
2036nfs_readdir(ap)
2037 struct vop_readdir_args /* {
2038 struct vnode *a_vp;
2039 struct uio *a_uio;
2040 struct ucred *a_cred;
2041 } */ *ap;
2042{
2043 register struct vnode *vp = ap->a_vp;
2044 register struct nfsnode *np = VTONFS(vp);
2045 register struct uio *uio = ap->a_uio;
2046 int tresid, error;
2047 struct vattr vattr;
2048
2049 if (vp->v_type != VDIR)
2050 return (EPERM);
2051 /*
2052 * First, check for hit on the EOF offset cache
2053 */
2054 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2055 (np->n_flag & NMODIFIED) == 0) {
2056 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
2057 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
2058 nfsstats.direofcache_hits++;
2059 return (0);
2060 }
2061 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
2062 np->n_mtime == vattr.va_mtime.tv_sec) {
2063 nfsstats.direofcache_hits++;
2064 return (0);
2065 }
2066 }
2067
2068 /*
2069 * Call nfs_bioread() to do the real work.
2070 */
2071 tresid = uio->uio_resid;
2072 error = nfs_bioread(vp, uio, 0, ap->a_cred);
2073
2074 if (!error && uio->uio_resid == tresid)
2075 nfsstats.direofcache_misses++;
2076 return (error);
2077}
2078
2079/*
2080 * Readdir rpc call.
2081 * Called from below the buffer cache by nfs_doio().
2082 */
2083int
2084nfs_readdirrpc(vp, uiop, cred)
2085 struct vnode *vp;
2086 register struct uio *uiop;
2087 struct ucred *cred;
2088
2089{
2090 register int len, left;
2091 register struct dirent *dp = NULL;
2092 register u_int32_t *tl;
2093 register caddr_t cp;
2094 register int32_t t1, t2;
2095 register nfsuint64 *cookiep;
2096 caddr_t bpos, dpos, cp2;
2097 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2098 nfsuint64 cookie;
2099 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2100 struct nfsnode *dnp = VTONFS(vp);
2101 u_quad_t fileno;
2102 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2103 int attrflag;
2104 int v3 = NFS_ISV3(vp);
2105
2106#ifndef DIAGNOSTIC
2107 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2108 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2109 panic("nfs readdirrpc bad uio");
2110#endif
2111
2112 /*
2113 * If there is no cookie, assume directory was stale.
2114 */
2115 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2116 if (cookiep)
2117 cookie = *cookiep;
2118 else
2119 return (NFSERR_BAD_COOKIE);
2120 /*
2121 * Loop around doing readdir rpc's of size nm_readdirsize
2122 * truncated to a multiple of DIRBLKSIZ.
2123 * The stopping criteria is EOF or buffer full.
2124 */
2125 while (more_dirs && bigenough) {
2126 nfsstats.rpccnt[NFSPROC_READDIR]++;
2127 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2128 NFSX_READDIR(v3));
2129 nfsm_fhtom(vp, v3);
2130 if (v3) {
2131 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2132 *tl++ = cookie.nfsuquad[0];
2133 *tl++ = cookie.nfsuquad[1];
2134 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2135 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2136 } else {
2137 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2138 *tl++ = cookie.nfsuquad[0];
2139 }
2140 *tl = txdr_unsigned(nmp->nm_readdirsize);
2141 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
2142 if (v3) {
2143 nfsm_postop_attr(vp, attrflag);
2144 if (!error) {
2145 nfsm_dissect(tl, u_int32_t *,
2146 2 * NFSX_UNSIGNED);
2147 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2148 dnp->n_cookieverf.nfsuquad[1] = *tl;
2149 } else {
2150 m_freem(mrep);
2151 goto nfsmout;
2152 }
2153 }
2154 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2155 more_dirs = fxdr_unsigned(int, *tl);
2156
2157 /* loop thru the dir entries, doctoring them to 4bsd form */
2158 while (more_dirs && bigenough) {
2159 if (v3) {
2160 nfsm_dissect(tl, u_int32_t *,
2161 3 * NFSX_UNSIGNED);
2162 fileno = fxdr_hyper(tl);
2163 len = fxdr_unsigned(int, *(tl + 2));
2164 } else {
2165 nfsm_dissect(tl, u_int32_t *,
2166 2 * NFSX_UNSIGNED);
2167 fileno = fxdr_unsigned(u_quad_t, *tl++);
2168 len = fxdr_unsigned(int, *tl);
2169 }
2170 if (len <= 0 || len > NFS_MAXNAMLEN) {
2171 error = EBADRPC;
2172 m_freem(mrep);
2173 goto nfsmout;
2174 }
2175 tlen = nfsm_rndup(len);
2176 if (tlen == len)
2177 tlen += 4; /* To ensure null termination */
2178 left = DIRBLKSIZ - blksiz;
2179 if ((tlen + DIRHDSIZ) > left) {
2180 dp->d_reclen += left;
2181 uiop->uio_iov->iov_base += left;
2182 uiop->uio_iov->iov_len -= left;
2183 uiop->uio_offset += left;
2184 uiop->uio_resid -= left;
2185 blksiz = 0;
2186 }
2187 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2188 bigenough = 0;
2189 if (bigenough) {
2190 dp = (struct dirent *)uiop->uio_iov->iov_base;
2191 dp->d_fileno = (int)fileno;
2192 dp->d_namlen = len;
2193 dp->d_reclen = tlen + DIRHDSIZ;
2194 dp->d_type = DT_UNKNOWN;
2195 blksiz += dp->d_reclen;
2196 if (blksiz == DIRBLKSIZ)
2197 blksiz = 0;
2198 uiop->uio_offset += DIRHDSIZ;
2199 uiop->uio_resid -= DIRHDSIZ;
2200 uiop->uio_iov->iov_base += DIRHDSIZ;
2201 uiop->uio_iov->iov_len -= DIRHDSIZ;
2202 nfsm_mtouio(uiop, len);
2203 cp = uiop->uio_iov->iov_base;
2204 tlen -= len;
2205 *cp = '\0'; /* null terminate */
2206 uiop->uio_iov->iov_base += tlen;
2207 uiop->uio_iov->iov_len -= tlen;
2208 uiop->uio_offset += tlen;
2209 uiop->uio_resid -= tlen;
2210 } else
2211 nfsm_adv(nfsm_rndup(len));
2212 if (v3) {
2213 nfsm_dissect(tl, u_int32_t *,
2214 3 * NFSX_UNSIGNED);
2215 } else {
2216 nfsm_dissect(tl, u_int32_t *,
2217 2 * NFSX_UNSIGNED);
2218 }
2219 if (bigenough) {
2220 cookie.nfsuquad[0] = *tl++;
2221 if (v3)
2222 cookie.nfsuquad[1] = *tl++;
2223 } else if (v3)
2224 tl += 2;
2225 else
2226 tl++;
2227 more_dirs = fxdr_unsigned(int, *tl);
2228 }
2229 /*
2230 * If at end of rpc data, get the eof boolean
2231 */
2232 if (!more_dirs) {
2233 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2234 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2235 }
2236 m_freem(mrep);
2237 }
2238 /*
2239 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2240 * by increasing d_reclen for the last record.
2241 */
2242 if (blksiz > 0) {
2243 left = DIRBLKSIZ - blksiz;
2244 dp->d_reclen += left;
2245 uiop->uio_iov->iov_base += left;
2246 uiop->uio_iov->iov_len -= left;
2247 uiop->uio_offset += left;
2248 uiop->uio_resid -= left;
2249 }
2250
2251 /*
2252 * We are now either at the end of the directory or have filled the
2253 * block.
2254 */
2255 if (bigenough)
2256 dnp->n_direofoffset = uiop->uio_offset;
2257 else {
2258 if (uiop->uio_resid > 0)
2259 printf("EEK! readdirrpc resid > 0\n");
2260 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2261 *cookiep = cookie;
2262 }
2263nfsmout:
2264 return (error);
2265}
2266
2267/*
2268 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2269 */
2270int
2271nfs_readdirplusrpc(vp, uiop, cred)
2272 struct vnode *vp;
2273 register struct uio *uiop;
2274 struct ucred *cred;
2275{
2276 register int len, left;
2277 register struct dirent *dp;
2278 register u_int32_t *tl;
2279 register caddr_t cp;
2280 register int32_t t1, t2;
2281 register struct vnode *newvp;
2282 register nfsuint64 *cookiep;
2283 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2284 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2285 struct nameidata nami, *ndp = &nami;
2286 struct componentname *cnp = &ndp->ni_cnd;
2287 nfsuint64 cookie;
2288 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2289 struct nfsnode *dnp = VTONFS(vp), *np;
2290 nfsfh_t *fhp;
2291 u_quad_t fileno;
2292 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2293 int attrflag, fhsize;
2294
2295#ifndef nolint
2296 dp = (struct dirent *)0;
2297#endif
2298#ifndef DIAGNOSTIC
2299 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2300 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2301 panic("nfs readdirplusrpc bad uio");
2302#endif
2303 ndp->ni_dvp = vp;
2304 newvp = NULLVP;
2305
2306 /*
2307 * If there is no cookie, assume directory was stale.
2308 */
2309 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2310 if (cookiep)
2311 cookie = *cookiep;
2312 else
2313 return (NFSERR_BAD_COOKIE);
2314 /*
2315 * Loop around doing readdir rpc's of size nm_readdirsize
2316 * truncated to a multiple of DIRBLKSIZ.
2317 * The stopping criteria is EOF or buffer full.
2318 */
2319 while (more_dirs && bigenough) {
2320 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2321 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2322 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2323 nfsm_fhtom(vp, 1);
2324 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2325 *tl++ = cookie.nfsuquad[0];
2326 *tl++ = cookie.nfsuquad[1];
2327 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2328 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2329 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2330 *tl = txdr_unsigned(nmp->nm_rsize);
2331 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
2332 nfsm_postop_attr(vp, attrflag);
2333 if (error) {
2334 m_freem(mrep);
2335 goto nfsmout;
2336 }
2337 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2338 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2339 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2340 more_dirs = fxdr_unsigned(int, *tl);
2341
2342 /* loop thru the dir entries, doctoring them to 4bsd form */
2343 while (more_dirs && bigenough) {
2344 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2345 fileno = fxdr_hyper(tl);
2346 len = fxdr_unsigned(int, *(tl + 2));
2347 if (len <= 0 || len > NFS_MAXNAMLEN) {
2348 error = EBADRPC;
2349 m_freem(mrep);
2350 goto nfsmout;
2351 }
2352 tlen = nfsm_rndup(len);
2353 if (tlen == len)
2354 tlen += 4; /* To ensure null termination*/
2355 left = DIRBLKSIZ - blksiz;
2356 if ((tlen + DIRHDSIZ) > left) {
2357 dp->d_reclen += left;
2358 uiop->uio_iov->iov_base += left;
2359 uiop->uio_iov->iov_len -= left;
2360 uiop->uio_offset += left;
2361 uiop->uio_resid -= left;
2362 blksiz = 0;
2363 }
2364 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2365 bigenough = 0;
2366 if (bigenough) {
2367 dp = (struct dirent *)uiop->uio_iov->iov_base;
2368 dp->d_fileno = (int)fileno;
2369 dp->d_namlen = len;
2370 dp->d_reclen = tlen + DIRHDSIZ;
2371 dp->d_type = DT_UNKNOWN;
2372 blksiz += dp->d_reclen;
2373 if (blksiz == DIRBLKSIZ)
2374 blksiz = 0;
2375 uiop->uio_offset += DIRHDSIZ;
2376 uiop->uio_resid -= DIRHDSIZ;
2377 uiop->uio_iov->iov_base += DIRHDSIZ;
2378 uiop->uio_iov->iov_len -= DIRHDSIZ;
2379 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2380 cnp->cn_namelen = len;
2381 nfsm_mtouio(uiop, len);
2382 cp = uiop->uio_iov->iov_base;
2383 tlen -= len;
2384 *cp = '\0';
2385 uiop->uio_iov->iov_base += tlen;
2386 uiop->uio_iov->iov_len -= tlen;
2387 uiop->uio_offset += tlen;
2388 uiop->uio_resid -= tlen;
2389 } else
2390 nfsm_adv(nfsm_rndup(len));
2391 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2392 if (bigenough) {
2393 cookie.nfsuquad[0] = *tl++;
2394 cookie.nfsuquad[1] = *tl++;
2395 } else
2396 tl += 2;
2397
2398 /*
2399 * Since the attributes are before the file handle
2400 * (sigh), we must skip over the attributes and then
2401 * come back and get them.
2402 */
2403 attrflag = fxdr_unsigned(int, *tl);
2404 if (attrflag) {
2405 dpossav1 = dpos;
2406 mdsav1 = md;
2407 nfsm_adv(NFSX_V3FATTR);
2408 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2409 doit = fxdr_unsigned(int, *tl);
2410 if (doit) {
2411 nfsm_getfh(fhp, fhsize, 1);
2412 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2413 VREF(vp);
2414 newvp = vp;
2415 np = dnp;
2416 } else {
2417 error = nfs_nget(vp->v_mount, fhp,
2418 fhsize, &np);
2419 if (error)
2420 doit = 0;
2421 else
2422 newvp = NFSTOV(np);
2423 }
2424 }
2425 if (doit && bigenough) {
2426 dpossav2 = dpos;
2427 dpos = dpossav1;
2428 mdsav2 = md;
2429 md = mdsav1;
2430 nfsm_loadattr(newvp, (struct vattr *)0);
2431 dpos = dpossav2;
2432 md = mdsav2;
2433 dp->d_type =
2434 IFTODT(VTTOIF(np->n_vattr.va_type));
2435 ndp->ni_vp = newvp;
2436 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2437 }
2438 } else {
2439 /* Just skip over the file handle */
2440 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2441 i = fxdr_unsigned(int, *tl);
2442 nfsm_adv(nfsm_rndup(i));
2443 }
2444 if (newvp != NULLVP) {
2445 if (newvp == vp)
2446 vrele(newvp);
2447 else
2448 vput(newvp);
2449 newvp = NULLVP;
2450 }
2451 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2452 more_dirs = fxdr_unsigned(int, *tl);
2453 }
2454 /*
2455 * If at end of rpc data, get the eof boolean
2456 */
2457 if (!more_dirs) {
2458 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2459 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2460 }
2461 m_freem(mrep);
2462 }
2463 /*
2464 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2465 * by increasing d_reclen for the last record.
2466 */
2467 if (blksiz > 0) {
2468 left = DIRBLKSIZ - blksiz;
2469 dp->d_reclen += left;
2470 uiop->uio_iov->iov_base += left;
2471 uiop->uio_iov->iov_len -= left;
2472 uiop->uio_offset += left;
2473 uiop->uio_resid -= left;
2474 }
2475
2476 /*
2477 * We are now either at the end of the directory or have filled the
2478 * block.
2479 */
2480 if (bigenough)
2481 dnp->n_direofoffset = uiop->uio_offset;
2482 else {
2483 if (uiop->uio_resid > 0)
2484 printf("EEK! readdirplusrpc resid > 0\n");
2485 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2486 *cookiep = cookie;
2487 }
2488nfsmout:
2489 if (newvp != NULLVP) {
2490 if (newvp == vp)
2491 vrele(newvp);
2492 else
2493 vput(newvp);
2494 newvp = NULLVP;
2495 }
2496 return (error);
2497}
2498
2499/*
2500 * Silly rename. To make the NFS filesystem that is stateless look a little
2501 * more like the "ufs" a remove of an active vnode is translated to a rename
2502 * to a funny looking filename that is removed by nfs_inactive on the
2503 * nfsnode. There is the potential for another process on a different client
2504 * to create the same funny name between the nfs_lookitup() fails and the
2505 * nfs_rename() completes, but...
2506 */
2507static int
2508nfs_sillyrename(dvp, vp, cnp)
2509 struct vnode *dvp, *vp;
2510 struct componentname *cnp;
2511{
2512 register struct sillyrename *sp;
2513 struct nfsnode *np;
2514 int error;
2515 short pid;
2516
2517 cache_purge(dvp);
2518 np = VTONFS(vp);
2519#ifndef DIAGNOSTIC
2520 if (vp->v_type == VDIR)
2521 panic("nfs: sillyrename dir");
2522#endif
2523 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2524 M_NFSREQ, M_WAITOK);
2525 sp->s_cred = crdup(cnp->cn_cred);
2526 sp->s_dvp = dvp;
2527 VREF(dvp);
2528
2529 /* Fudge together a funny name */
2530 pid = cnp->cn_proc->p_pid;
2531 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2532
2533 /* Try lookitups until we get one that isn't there */
2534 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2535 cnp->cn_proc, (struct nfsnode **)0) == 0) {
2536 sp->s_name[4]++;
2537 if (sp->s_name[4] > 'z') {
2538 error = EINVAL;
2539 goto bad;
2540 }
2541 }
2542 error = nfs_renameit(dvp, cnp, sp);
2543 if (error)
2544 goto bad;
2545 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2546 cnp->cn_proc, &np);
2547 np->n_sillyrename = sp;
2548 return (0);
2549bad:
2550 vrele(sp->s_dvp);
2551 crfree(sp->s_cred);
2552 free((caddr_t)sp, M_NFSREQ);
2553 return (error);
2554}
2555
2556/*
2557 * Look up a file name and optionally either update the file handle or
2558 * allocate an nfsnode, depending on the value of npp.
2559 * npp == NULL --> just do the lookup
2560 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2561 * handled too
2562 * *npp != NULL --> update the file handle in the vnode
2563 */
2564static int
2565nfs_lookitup(dvp, name, len, cred, procp, npp)
2566 register struct vnode *dvp;
2567 const char *name;
2568 int len;
2569 struct ucred *cred;
2570 struct proc *procp;
2571 struct nfsnode **npp;
2572{
2573 register u_int32_t *tl;
2574 register caddr_t cp;
2575 register int32_t t1, t2;
2576 struct vnode *newvp = (struct vnode *)0;
2577 struct nfsnode *np, *dnp = VTONFS(dvp);
2578 caddr_t bpos, dpos, cp2;
2579 int error = 0, fhlen, attrflag;
2580 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2581 nfsfh_t *nfhp;
2582 int v3 = NFS_ISV3(dvp);
2583
2584 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2585 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2586 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2587 nfsm_fhtom(dvp, v3);
2588 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2589 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred);
2590 if (npp && !error) {
2591 nfsm_getfh(nfhp, fhlen, v3);
2592 if (*npp) {
2593 np = *npp;
2594 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2595 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2596 np->n_fhp = &np->n_fh;
2597 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2598 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2599 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2600 np->n_fhsize = fhlen;
2601 newvp = NFSTOV(np);
2602 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2603 VREF(dvp);
2604 newvp = dvp;
2605 } else {
2606 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2607 if (error) {
2608 m_freem(mrep);
2609 return (error);
2610 }
2611 newvp = NFSTOV(np);
2612 }
2613 if (v3) {
2614 nfsm_postop_attr(newvp, attrflag);
2615 if (!attrflag && *npp == NULL) {
2616 m_freem(mrep);
2617 if (newvp == dvp)
2618 vrele(newvp);
2619 else
2620 vput(newvp);
2621 return (ENOENT);
2622 }
2623 } else
2624 nfsm_loadattr(newvp, (struct vattr *)0);
2625 }
2626 nfsm_reqdone;
2627 if (npp && *npp == NULL) {
2628 if (error) {
2629 if (newvp) {
2630 if (newvp == dvp)
2631 vrele(newvp);
2632 else
2633 vput(newvp);
2634 }
2635 } else
2636 *npp = np;
2637 }
2638 return (error);
2639}
2640
2641/*
2642 * Nfs Version 3 commit rpc
2643 */
2644int
2645nfs_commit(vp, offset, cnt, cred, procp)
2646 struct vnode *vp;
2647 u_quad_t offset;
2648 int cnt;
2649 struct ucred *cred;
2650 struct proc *procp;
2651{
2652 register caddr_t cp;
2653 register u_int32_t *tl;
2654 register int32_t t1, t2;
2655 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2656 caddr_t bpos, dpos, cp2;
2657 int error = 0, wccflag = NFSV3_WCCRATTR;
2658 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2659
2660 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2661 return (0);
2662 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2663 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2664 nfsm_fhtom(vp, 1);
2665 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2666 txdr_hyper(offset, tl);
2667 tl += 2;
2668 *tl = txdr_unsigned(cnt);
2669 nfsm_request(vp, NFSPROC_COMMIT, procp, cred);
2670 nfsm_wcc_data(vp, wccflag);
2671 if (!error) {
2672 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2673 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2674 NFSX_V3WRITEVERF)) {
2675 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2676 NFSX_V3WRITEVERF);
2677 error = NFSERR_STALEWRITEVERF;
2678 }
2679 }
2680 nfsm_reqdone;
2681 return (error);
2682}
2683
2684/*
2685 * Kludge City..
2686 * - make nfs_bmap() essentially a no-op that does no translation
2687 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2688 * (Maybe I could use the process's page mapping, but I was concerned that
2689 * Kernel Write might not be enabled and also figured copyout() would do
2690 * a lot more work than bcopy() and also it currently happens in the
2691 * context of the swapper process (2).
2692 */
2693static int
2694nfs_bmap(ap)
2695 struct vop_bmap_args /* {
2696 struct vnode *a_vp;
2697 daddr_t a_bn;
2698 struct vnode **a_vpp;
2699 daddr_t *a_bnp;
2700 int *a_runp;
2701 int *a_runb;
2702 } */ *ap;
2703{
2704 register struct vnode *vp = ap->a_vp;
2705
2706 if (ap->a_vpp != NULL)
2707 *ap->a_vpp = vp;
2708 if (ap->a_bnp != NULL)
2709 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2710 if (ap->a_runp != NULL)
2711 *ap->a_runp = 0;
2712 if (ap->a_runb != NULL)
2713 *ap->a_runb = 0;
2714 return (0);
2715}
2716
2717/*
2718 * Strategy routine.
2719 * For async requests when nfsiod(s) are running, queue the request by
2720 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2721 * request.
2722 */
2723static int
2724nfs_strategy(ap)
2725 struct vop_strategy_args *ap;
2726{
2727 register struct buf *bp = ap->a_bp;
2728 struct ucred *cr;
2729 struct proc *p;
2730 int error = 0;
2731
2732 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2733 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2734
2735 if (bp->b_flags & B_PHYS)
2736 panic("nfs physio");
2737
2738 if (bp->b_flags & B_ASYNC)
2739 p = (struct proc *)0;
2740 else
2741 p = curproc; /* XXX */
2742
2743 if (bp->b_flags & B_READ)
2744 cr = bp->b_rcred;
2745 else
2746 cr = bp->b_wcred;
2747
2748 /*
2749 * If the op is asynchronous and an i/o daemon is waiting
2750 * queue the request, wake it up and wait for completion
2751 * otherwise just do it ourselves.
2752 */
2753 if ((bp->b_flags & B_ASYNC) == 0 ||
2754 nfs_asyncio(bp, NOCRED, p))
2755 error = nfs_doio(bp, cr, p);
2756 return (error);
2757}
2758
2759/*
2760 * Mmap a file
2761 *
2762 * NB Currently unsupported.
2763 */
2764/* ARGSUSED */
2765static int
2766nfs_mmap(ap)
2767 struct vop_mmap_args /* {
2768 struct vnode *a_vp;
2769 int a_fflags;
2770 struct ucred *a_cred;
2771 struct proc *a_p;
2772 } */ *ap;
2773{
2774
2775 return (EINVAL);
2776}
2777
2778/*
2779 * fsync vnode op. Just call nfs_flush() with commit == 1.
2780 */
2781/* ARGSUSED */
2782static int
2783nfs_fsync(ap)
2784 struct vop_fsync_args /* {
2785 struct vnodeop_desc *a_desc;
2786 struct vnode * a_vp;
2787 struct ucred * a_cred;
2788 int a_waitfor;
2789 struct proc * a_p;
2790 } */ *ap;
2791{
2792
2793 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2794}
2795
2796/*
2797 * Flush all the blocks associated with a vnode.
2798 * Walk through the buffer pool and push any dirty pages
2799 * associated with the vnode.
2800 */
2801static int
2802nfs_flush(vp, cred, waitfor, p, commit)
2803 register struct vnode *vp;
2804 struct ucred *cred;
2805 int waitfor;
2806 struct proc *p;
2807 int commit;
2808{
2809 register struct nfsnode *np = VTONFS(vp);
2810 register struct buf *bp;
2811 register int i;
2812 struct buf *nbp;
2813 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2814 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2815 int passone = 1;
2816 u_quad_t off, endoff, toff;
2817 struct ucred* wcred = NULL;
2818 struct buf **bvec = NULL;
2819#ifndef NFS_COMMITBVECSIZ
2820#define NFS_COMMITBVECSIZ 20
2821#endif
2822 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2823 int bvecsize = 0, bveccount;
2824
2825 if (nmp->nm_flag & NFSMNT_INT)
2826 slpflag = PCATCH;
2827 if (!commit)
2828 passone = 0;
2829 /*
2830 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2831 * server, but nas not been committed to stable storage on the server
2832 * yet. On the first pass, the byte range is worked out and the commit
2833 * rpc is done. On the second pass, nfs_writebp() is called to do the
2834 * job.
2835 */
2836again:
2837 off = (u_quad_t)-1;
2838 endoff = 0;
2839 bvecpos = 0;
2840 if (NFS_ISV3(vp) && commit) {
2841 s = splbio();
2842 /*
2843 * Count up how many buffers waiting for a commit.
2844 */
2845 bveccount = 0;
2846 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2847 nbp = TAILQ_NEXT(bp, b_vnbufs);
2848 if (BUF_REFCNT(bp) == 0 &&
2849 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2850 == (B_DELWRI | B_NEEDCOMMIT))
2851 bveccount++;
2852 }
2853 /*
2854 * Allocate space to remember the list of bufs to commit. It is
2855 * important to use M_NOWAIT here to avoid a race with nfs_write.
2856 * If we can't get memory (for whatever reason), we will end up
2857 * committing the buffers one-by-one in the loop below.
2858 */
2859 if (bvec != NULL && bvec != bvec_on_stack)
2860 free(bvec, M_TEMP);
2861 if (bveccount > NFS_COMMITBVECSIZ) {
2862 bvec = (struct buf **)
2863 malloc(bveccount * sizeof(struct buf *),
2864 M_TEMP, M_NOWAIT);
2865 if (bvec == NULL) {
2866 bvec = bvec_on_stack;
2867 bvecsize = NFS_COMMITBVECSIZ;
2868 } else
2869 bvecsize = bveccount;
2870 } else {
2871 bvec = bvec_on_stack;
2872 bvecsize = NFS_COMMITBVECSIZ;
2873 }
2874 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2875 nbp = TAILQ_NEXT(bp, b_vnbufs);
2876 if (bvecpos >= bvecsize)
2877 break;
2878 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2879 (B_DELWRI | B_NEEDCOMMIT) ||
2880 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2881 continue;
2882 bremfree(bp);
2883 /*
2884 * Work out if all buffers are using the same cred
2885 * so we can deal with them all with one commit.
2886 *
2887 * NOTE: we are not clearing B_DONE here, so we have
2888 * to do it later on in this routine if we intend to
2889 * initiate I/O on the bp.
2890 *
2891 * Note: to avoid loopback deadlocks, we do not
2892 * assign b_runningbufspace.
2893 */
2894 if (wcred == NULL)
2895 wcred = bp->b_wcred;
2896 else if (wcred != bp->b_wcred)
2897 wcred = NOCRED;
2898 bp->b_flags |= B_WRITEINPROG;
2899 vfs_busy_pages(bp, 1);
2900
2901 /*
2902 * bp is protected by being locked, but nbp is not
2903 * and vfs_busy_pages() may sleep. We have to
2904 * recalculate nbp.
2905 */
2906 nbp = TAILQ_NEXT(bp, b_vnbufs);
2907
2908 /*
2909 * A list of these buffers is kept so that the
2910 * second loop knows which buffers have actually
2911 * been committed. This is necessary, since there
2912 * may be a race between the commit rpc and new
2913 * uncommitted writes on the file.
2914 */
2915 bvec[bvecpos++] = bp;
2916 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2917 bp->b_dirtyoff;
2918 if (toff < off)
2919 off = toff;
2920 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2921 if (toff > endoff)
2922 endoff = toff;
2923 }
2924 splx(s);
2925 }
2926 if (bvecpos > 0) {
2927 /*
2928 * Commit data on the server, as required.
2929 * If all bufs are using the same wcred, then use that with
2930 * one call for all of them, otherwise commit each one
2931 * separately.
2932 */
2933 if (wcred != NOCRED)
2934 retv = nfs_commit(vp, off, (int)(endoff - off),
2935 wcred, p);
2936 else {
2937 retv = 0;
2938 for (i = 0; i < bvecpos; i++) {
2939 off_t off, size;
2940 bp = bvec[i];
2941 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2942 bp->b_dirtyoff;
2943 size = (u_quad_t)(bp->b_dirtyend
2944 - bp->b_dirtyoff);
2945 retv = nfs_commit(vp, off, (int)size,
2946 bp->b_wcred, p);
2947 if (retv) break;
2948 }
2949 }
2950
2951 if (retv == NFSERR_STALEWRITEVERF)
2952 nfs_clearcommit(vp->v_mount);
2953
2954 /*
2955 * Now, either mark the blocks I/O done or mark the
2956 * blocks dirty, depending on whether the commit
2957 * succeeded.
2958 */
2959 for (i = 0; i < bvecpos; i++) {
2960 bp = bvec[i];
2961 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2962 if (retv) {
2963 /*
2964 * Error, leave B_DELWRI intact
2965 */
2966 vfs_unbusy_pages(bp);
2967 brelse(bp);
2968 } else {
2969 /*
2970 * Success, remove B_DELWRI ( bundirty() ).
2971 *
2972 * b_dirtyoff/b_dirtyend seem to be NFS
2973 * specific. We should probably move that
2974 * into bundirty(). XXX
2975 */
2976 s = splbio();
2977 vp->v_numoutput++;
2978 bp->b_flags |= B_ASYNC;
2979 bundirty(bp);
2980 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2981 bp->b_dirtyoff = bp->b_dirtyend = 0;
2982 splx(s);
2983 biodone(bp);
2984 }
2985 }
2986 }
2987
2988 /*
2989 * Start/do any write(s) that are required.
2990 */
2991loop:
2992 s = splbio();
2993 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2994 nbp = TAILQ_NEXT(bp, b_vnbufs);
2995 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2996 if (waitfor != MNT_WAIT || passone)
2997 continue;
2998 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2999 "nfsfsync", slpflag, slptimeo);
3000 splx(s);
3001 if (error == 0)
3002 panic("nfs_fsync: inconsistent lock");
3003 if (error == ENOLCK)
3004 goto loop;
3005 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
3006 error = EINTR;
3007 goto done;
3008 }
3009 if (slpflag == PCATCH) {
3010 slpflag = 0;
3011 slptimeo = 2 * hz;
3012 }
3013 goto loop;
3014 }
3015 if ((bp->b_flags & B_DELWRI) == 0)
3016 panic("nfs_fsync: not dirty");
3017 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
3018 BUF_UNLOCK(bp);
3019 continue;
3020 }
3021 bremfree(bp);
3022 if (passone || !commit)
3023 bp->b_flags |= B_ASYNC;
3024 else
3025 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
3026 splx(s);
3027 VOP_BWRITE(bp->b_vp, bp);
3028 goto loop;
3029 }
3030 splx(s);
3031 if (passone) {
3032 passone = 0;
3033 goto again;
3034 }
3035 if (waitfor == MNT_WAIT) {
3036 while (vp->v_numoutput) {
3037 vp->v_flag |= VBWAIT;
3038 error = tsleep((caddr_t)&vp->v_numoutput,
3039 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
3040 if (error) {
3041 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
3042 error = EINTR;
3043 goto done;
3044 }
3045 if (slpflag == PCATCH) {
3046 slpflag = 0;
3047 slptimeo = 2 * hz;
3048 }
3049 }
3050 }
3051 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
3052 goto loop;
3053 }
3054 }
3055 if (np->n_flag & NWRITEERR) {
3056 error = np->n_error;
3057 np->n_flag &= ~NWRITEERR;
3058 }
3059done:
3060 if (bvec != NULL && bvec != bvec_on_stack)
3061 free(bvec, M_TEMP);
3062 return (error);
3063}
3064
3065/*
3066 * NFS advisory byte-level locks.
3067 * Currently unsupported.
3068 */
3069static int
3070nfs_advlock(ap)
3071 struct vop_advlock_args /* {
3072 struct vnode *a_vp;
3073 caddr_t a_id;
3074 int a_op;
3075 struct flock *a_fl;
3076 int a_flags;
3077 } */ *ap;
3078{
3079 register struct nfsnode *np = VTONFS(ap->a_vp);
3080
3081 /*
3082 * The following kludge is to allow diskless support to work
3083 * until a real NFS lockd is implemented. Basically, just pretend
3084 * that this is a local lock.
3085 */
3086 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3087}
3088
3089/*
3090 * Print out the contents of an nfsnode.
3091 */
3092static int
3093nfs_print(ap)
3094 struct vop_print_args /* {
3095 struct vnode *a_vp;
3096 } */ *ap;
3097{
3098 register struct vnode *vp = ap->a_vp;
3099 register struct nfsnode *np = VTONFS(vp);
3100
3101 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3102 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3103 if (vp->v_type == VFIFO)
3104 fifo_printinfo(vp);
3105 printf("\n");
3106 return (0);
3107}
3108
3109/*
3110 * Just call nfs_writebp() with the force argument set to 1.
3111 *
3112 * NOTE: B_DONE may or may not be set in a_bp on call.
3113 */
3114static int
3115nfs_bwrite(ap)
3116 struct vop_bwrite_args /* {
3117 struct vnode *a_bp;
3118 } */ *ap;
3119{
3120 return (nfs_writebp(ap->a_bp, 1, curproc));
3121}
3122
3123/*
3124 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3125 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3126 * B_CACHE if this is a VMIO buffer.
3127 */
3128int
3129nfs_writebp(bp, force, procp)
3130 register struct buf *bp;
3131 int force;
3132 struct proc *procp;
3133{
3134 int s;
3135 int oldflags = bp->b_flags;
3136#if 0
3137 int retv = 1;
3138 off_t off;
3139#endif
3140
3141 if (BUF_REFCNT(bp) == 0)
3142 panic("bwrite: buffer is not locked???");
3143
3144 if (bp->b_flags & B_INVAL) {
3145 brelse(bp);
3146 return(0);
3147 }
3148
3149 bp->b_flags |= B_CACHE;
3150
3151 /*
3152 * Undirty the bp. We will redirty it later if the I/O fails.
3153 */
3154
3155 s = splbio();
3156 bundirty(bp);
3157 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3158
3159 bp->b_vp->v_numoutput++;
3160 curproc->p_stats->p_ru.ru_oublock++;
3161 splx(s);
3162
3163 /*
3164 * Note: to avoid loopback deadlocks, we do not
3165 * assign b_runningbufspace.
3166 */
3167 vfs_busy_pages(bp, 1);
3168
3169 if (force)
3170 bp->b_flags |= B_WRITEINPROG;
3171 BUF_KERNPROC(bp);
3172 VOP_STRATEGY(bp->b_vp, bp);
3173
3174 if( (oldflags & B_ASYNC) == 0) {
3175 int rtval = biowait(bp);
3176
3177 if (oldflags & B_DELWRI) {
3178 s = splbio();
3179 reassignbuf(bp, bp->b_vp);
3180 splx(s);
3181 }
3182
3183 brelse(bp);
3184 return (rtval);
3185 }
3186
3187 return (0);
3188}
3189
3190/*
3191 * nfs special file access vnode op.
3192 * Essentially just get vattr and then imitate iaccess() since the device is
3193 * local to the client.
3194 */
3195static int
3196nfsspec_access(ap)
3197 struct vop_access_args /* {
3198 struct vnode *a_vp;
3199 int a_mode;
3200 struct ucred *a_cred;
3201 struct proc *a_p;
3202 } */ *ap;
3203{
3204 register struct vattr *vap;
3205 register gid_t *gp;
3206 register struct ucred *cred = ap->a_cred;
3207 struct vnode *vp = ap->a_vp;
3208 mode_t mode = ap->a_mode;
3209 struct vattr vattr;
3210 register int i;
3211 int error;
3212
3213 /*
3214 * Disallow write attempts on filesystems mounted read-only;
3215 * unless the file is a socket, fifo, or a block or character
3216 * device resident on the filesystem.
3217 */
3218 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3219 switch (vp->v_type) {
3220 case VREG:
3221 case VDIR:
3222 case VLNK:
3223 return (EROFS);
3224 default:
3225 break;
3226 }
3227 }
3228 /*
3229 * If you're the super-user,
3230 * you always get access.
3231 */
3232 if (cred->cr_uid == 0)
3233 return (0);
3234 vap = &vattr;
3235 error = VOP_GETATTR(vp, vap, cred, ap->a_p);
3236 if (error)
3237 return (error);
3238 /*
3239 * Access check is based on only one of owner, group, public.
3240 * If not owner, then check group. If not a member of the
3241 * group, then check public access.
3242 */
3243 if (cred->cr_uid != vap->va_uid) {
3244 mode >>= 3;
3245 gp = cred->cr_groups;
3246 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3247 if (vap->va_gid == *gp)
3248 goto found;
3249 mode >>= 3;
3250found:
3251 ;
3252 }
3253 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3254 return (error);
3255}
3256
3257/*
3258 * Read wrapper for special devices.
3259 */
3260static int
3261nfsspec_read(ap)
3262 struct vop_read_args /* {
3263 struct vnode *a_vp;
3264 struct uio *a_uio;
3265 int a_ioflag;
3266 struct ucred *a_cred;
3267 } */ *ap;
3268{
3269 register struct nfsnode *np = VTONFS(ap->a_vp);
3270
3271 /*
3272 * Set access flag.
3273 */
3274 np->n_flag |= NACC;
3275 getnanotime(&np->n_atim);
3276 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3277}
3278
3279/*
3280 * Write wrapper for special devices.
3281 */
3282static int
3283nfsspec_write(ap)
3284 struct vop_write_args /* {
3285 struct vnode *a_vp;
3286 struct uio *a_uio;
3287 int a_ioflag;
3288 struct ucred *a_cred;
3289 } */ *ap;
3290{
3291 register struct nfsnode *np = VTONFS(ap->a_vp);
3292
3293 /*
3294 * Set update flag.
3295 */
3296 np->n_flag |= NUPD;
3297 getnanotime(&np->n_mtim);
3298 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3299}
3300
3301/*
3302 * Close wrapper for special devices.
3303 *
3304 * Update the times on the nfsnode then do device close.
3305 */
3306static int
3307nfsspec_close(ap)
3308 struct vop_close_args /* {
3309 struct vnode *a_vp;
3310 int a_fflag;
3311 struct ucred *a_cred;
3312 struct proc *a_p;
3313 } */ *ap;
3314{
3315 register struct vnode *vp = ap->a_vp;
3316 register struct nfsnode *np = VTONFS(vp);
3317 struct vattr vattr;
3318
3319 if (np->n_flag & (NACC | NUPD)) {
3320 np->n_flag |= NCHG;
3321 if (vp->v_usecount == 1 &&
3322 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3323 VATTR_NULL(&vattr);
3324 if (np->n_flag & NACC)
3325 vattr.va_atime = np->n_atim;
3326 if (np->n_flag & NUPD)
3327 vattr.va_mtime = np->n_mtim;
3328 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3329 }
3330 }
3331 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3332}
3333
3334/*
3335 * Read wrapper for fifos.
3336 */
3337static int
3338nfsfifo_read(ap)
3339 struct vop_read_args /* {
3340 struct vnode *a_vp;
3341 struct uio *a_uio;
3342 int a_ioflag;
3343 struct ucred *a_cred;
3344 } */ *ap;
3345{
3346 register struct nfsnode *np = VTONFS(ap->a_vp);
3347
3348 /*
3349 * Set access flag.
3350 */
3351 np->n_flag |= NACC;
3352 getnanotime(&np->n_atim);
3353 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3354}
3355
3356/*
3357 * Write wrapper for fifos.
3358 */
3359static int
3360nfsfifo_write(ap)
3361 struct vop_write_args /* {
3362 struct vnode *a_vp;
3363 struct uio *a_uio;
3364 int a_ioflag;
3365 struct ucred *a_cred;
3366 } */ *ap;
3367{
3368 register struct nfsnode *np = VTONFS(ap->a_vp);
3369
3370 /*
3371 * Set update flag.
3372 */
3373 np->n_flag |= NUPD;
3374 getnanotime(&np->n_mtim);
3375 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3376}
3377
3378/*
3379 * Close wrapper for fifos.
3380 *
3381 * Update the times on the nfsnode then do fifo close.
3382 */
3383static int
3384nfsfifo_close(ap)
3385 struct vop_close_args /* {
3386 struct vnode *a_vp;
3387 int a_fflag;
3388 struct ucred *a_cred;
3389 struct proc *a_p;
3390 } */ *ap;
3391{
3392 register struct vnode *vp = ap->a_vp;
3393 register struct nfsnode *np = VTONFS(vp);
3394 struct vattr vattr;
3395 struct timespec ts;
3396
3397 if (np->n_flag & (NACC | NUPD)) {
3398 getnanotime(&ts);
3399 if (np->n_flag & NACC)
3400 np->n_atim = ts;
3401 if (np->n_flag & NUPD)
3402 np->n_mtim = ts;
3403 np->n_flag |= NCHG;
3404 if (vp->v_usecount == 1 &&
3405 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3406 VATTR_NULL(&vattr);
3407 if (np->n_flag & NACC)
3408 vattr.va_atime = np->n_atim;
3409 if (np->n_flag & NUPD)
3410 vattr.va_mtime = np->n_mtim;
3411 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3412 }
3413 }
3414 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3415}
3416