Add missing prototype (fixes warning).
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
1de703da 38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.2 2003/06/17 04:28:54 dillon Exp $
984263bc
MD
39 */
40
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/buf.h>
55#include <sys/malloc.h>
56#include <sys/mbuf.h>
57#include <sys/namei.h>
58#include <sys/socket.h>
59#include <sys/vnode.h>
60#include <sys/dirent.h>
61#include <sys/fcntl.h>
62#include <sys/lockf.h>
63#include <sys/stat.h>
64#include <sys/sysctl.h>
65#include <sys/conf.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69#include <vm/vm_zone.h>
70
71#include <miscfs/fifofs/fifo.h>
72
73#include <nfs/rpcv2.h>
74#include <nfs/nfsproto.h>
75#include <nfs/nfs.h>
76#include <nfs/nfsnode.h>
77#include <nfs/nfsmount.h>
78#include <nfs/xdr_subs.h>
79#include <nfs/nfsm_subs.h>
80#include <nfs/nqnfs.h>
81
82#include <net/if.h>
83#include <netinet/in.h>
84#include <netinet/in_var.h>
85
86/* Defs */
87#define TRUE 1
88#define FALSE 0
89
90/*
91 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
92 * calls are not in getblk() and brelse() so that they would not be necessary
93 * here.
94 */
95#ifndef B_VMIO
96#define vfs_busy_pages(bp, f)
97#endif
98
99static int nfsspec_read __P((struct vop_read_args *));
100static int nfsspec_write __P((struct vop_write_args *));
101static int nfsfifo_read __P((struct vop_read_args *));
102static int nfsfifo_write __P((struct vop_write_args *));
103static int nfsspec_close __P((struct vop_close_args *));
104static int nfsfifo_close __P((struct vop_close_args *));
105#define nfs_poll vop_nopoll
106static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int));
107static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *));
108static int nfs_lookup __P((struct vop_lookup_args *));
109static int nfs_create __P((struct vop_create_args *));
110static int nfs_mknod __P((struct vop_mknod_args *));
111static int nfs_open __P((struct vop_open_args *));
112static int nfs_close __P((struct vop_close_args *));
113static int nfs_access __P((struct vop_access_args *));
114static int nfs_getattr __P((struct vop_getattr_args *));
115static int nfs_setattr __P((struct vop_setattr_args *));
116static int nfs_read __P((struct vop_read_args *));
117static int nfs_mmap __P((struct vop_mmap_args *));
118static int nfs_fsync __P((struct vop_fsync_args *));
119static int nfs_remove __P((struct vop_remove_args *));
120static int nfs_link __P((struct vop_link_args *));
121static int nfs_rename __P((struct vop_rename_args *));
122static int nfs_mkdir __P((struct vop_mkdir_args *));
123static int nfs_rmdir __P((struct vop_rmdir_args *));
124static int nfs_symlink __P((struct vop_symlink_args *));
125static int nfs_readdir __P((struct vop_readdir_args *));
126static int nfs_bmap __P((struct vop_bmap_args *));
127static int nfs_strategy __P((struct vop_strategy_args *));
128static int nfs_lookitup __P((struct vnode *, const char *, int,
129 struct ucred *, struct proc *, struct nfsnode **));
130static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
131static int nfsspec_access __P((struct vop_access_args *));
132static int nfs_readlink __P((struct vop_readlink_args *));
133static int nfs_print __P((struct vop_print_args *));
134static int nfs_advlock __P((struct vop_advlock_args *));
135static int nfs_bwrite __P((struct vop_bwrite_args *));
136/*
137 * Global vfs data structures for nfs
138 */
139vop_t **nfsv2_vnodeop_p;
140static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
141 { &vop_default_desc, (vop_t *) vop_defaultop },
142 { &vop_access_desc, (vop_t *) nfs_access },
143 { &vop_advlock_desc, (vop_t *) nfs_advlock },
144 { &vop_bmap_desc, (vop_t *) nfs_bmap },
145 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
146 { &vop_close_desc, (vop_t *) nfs_close },
147 { &vop_create_desc, (vop_t *) nfs_create },
148 { &vop_fsync_desc, (vop_t *) nfs_fsync },
149 { &vop_getattr_desc, (vop_t *) nfs_getattr },
150 { &vop_getpages_desc, (vop_t *) nfs_getpages },
151 { &vop_putpages_desc, (vop_t *) nfs_putpages },
152 { &vop_inactive_desc, (vop_t *) nfs_inactive },
153 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
154 { &vop_lease_desc, (vop_t *) vop_null },
155 { &vop_link_desc, (vop_t *) nfs_link },
156 { &vop_lock_desc, (vop_t *) vop_sharedlock },
157 { &vop_lookup_desc, (vop_t *) nfs_lookup },
158 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
159 { &vop_mknod_desc, (vop_t *) nfs_mknod },
160 { &vop_mmap_desc, (vop_t *) nfs_mmap },
161 { &vop_open_desc, (vop_t *) nfs_open },
162 { &vop_poll_desc, (vop_t *) nfs_poll },
163 { &vop_print_desc, (vop_t *) nfs_print },
164 { &vop_read_desc, (vop_t *) nfs_read },
165 { &vop_readdir_desc, (vop_t *) nfs_readdir },
166 { &vop_readlink_desc, (vop_t *) nfs_readlink },
167 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
168 { &vop_remove_desc, (vop_t *) nfs_remove },
169 { &vop_rename_desc, (vop_t *) nfs_rename },
170 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
171 { &vop_setattr_desc, (vop_t *) nfs_setattr },
172 { &vop_strategy_desc, (vop_t *) nfs_strategy },
173 { &vop_symlink_desc, (vop_t *) nfs_symlink },
174 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
175 { &vop_write_desc, (vop_t *) nfs_write },
176 { NULL, NULL }
177};
178static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
179 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
180VNODEOP_SET(nfsv2_vnodeop_opv_desc);
181
182/*
183 * Special device vnode ops
184 */
185vop_t **spec_nfsv2nodeop_p;
186static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
187 { &vop_default_desc, (vop_t *) spec_vnoperate },
188 { &vop_access_desc, (vop_t *) nfsspec_access },
189 { &vop_close_desc, (vop_t *) nfsspec_close },
190 { &vop_fsync_desc, (vop_t *) nfs_fsync },
191 { &vop_getattr_desc, (vop_t *) nfs_getattr },
192 { &vop_inactive_desc, (vop_t *) nfs_inactive },
193 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
194 { &vop_lock_desc, (vop_t *) vop_sharedlock },
195 { &vop_print_desc, (vop_t *) nfs_print },
196 { &vop_read_desc, (vop_t *) nfsspec_read },
197 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
198 { &vop_setattr_desc, (vop_t *) nfs_setattr },
199 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
200 { &vop_write_desc, (vop_t *) nfsspec_write },
201 { NULL, NULL }
202};
203static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
204 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
205VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
206
207vop_t **fifo_nfsv2nodeop_p;
208static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
209 { &vop_default_desc, (vop_t *) fifo_vnoperate },
210 { &vop_access_desc, (vop_t *) nfsspec_access },
211 { &vop_close_desc, (vop_t *) nfsfifo_close },
212 { &vop_fsync_desc, (vop_t *) nfs_fsync },
213 { &vop_getattr_desc, (vop_t *) nfs_getattr },
214 { &vop_inactive_desc, (vop_t *) nfs_inactive },
215 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
216 { &vop_lock_desc, (vop_t *) vop_sharedlock },
217 { &vop_print_desc, (vop_t *) nfs_print },
218 { &vop_read_desc, (vop_t *) nfsfifo_read },
219 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
220 { &vop_setattr_desc, (vop_t *) nfs_setattr },
221 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
222 { &vop_write_desc, (vop_t *) nfsfifo_write },
223 { NULL, NULL }
224};
225static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
226 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
227VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
228
229static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
230 struct componentname *cnp,
231 struct vattr *vap));
232static int nfs_removerpc __P((struct vnode *dvp, const char *name,
233 int namelen,
234 struct ucred *cred, struct proc *proc));
235static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
236 int fnamelen, struct vnode *tdvp,
237 const char *tnameptr, int tnamelen,
238 struct ucred *cred, struct proc *proc));
239static int nfs_renameit __P((struct vnode *sdvp,
240 struct componentname *scnp,
241 struct sillyrename *sp));
242
243/*
244 * Global variables
245 */
246extern u_int32_t nfs_true, nfs_false;
247extern u_int32_t nfs_xdrneg1;
248extern struct nfsstats nfsstats;
249extern nfstype nfsv3_type[9];
250struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
251struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
252int nfs_numasync = 0;
253#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
254
255SYSCTL_DECL(_vfs_nfs);
256
257static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
258SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
259 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
260
261static int nfsv3_commit_on_close = 0;
262SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
263 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
264#if 0
265SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
266 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
267
268SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
269 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
270#endif
271
272#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
273 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
274 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
275static int
276nfs3_access_otw(struct vnode *vp,
277 int wmode,
278 struct proc *p,
279 struct ucred *cred)
280{
281 const int v3 = 1;
282 u_int32_t *tl;
283 int error = 0, attrflag;
284
285 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
286 caddr_t bpos, dpos, cp2;
287 register int32_t t1, t2;
288 register caddr_t cp;
289 u_int32_t rmode;
290 struct nfsnode *np = VTONFS(vp);
291
292 nfsstats.rpccnt[NFSPROC_ACCESS]++;
293 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
294 nfsm_fhtom(vp, v3);
295 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
296 *tl = txdr_unsigned(wmode);
297 nfsm_request(vp, NFSPROC_ACCESS, p, cred);
298 nfsm_postop_attr(vp, attrflag);
299 if (!error) {
300 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
301 rmode = fxdr_unsigned(u_int32_t, *tl);
302 np->n_mode = rmode;
303 np->n_modeuid = cred->cr_uid;
304 np->n_modestamp = time_second;
305 }
306 nfsm_reqdone;
307 return error;
308}
309
310/*
311 * nfs access vnode op.
312 * For nfs version 2, just return ok. File accesses may fail later.
313 * For nfs version 3, use the access rpc to check accessibility. If file modes
314 * are changed on the server, accesses might still fail later.
315 */
316static int
317nfs_access(ap)
318 struct vop_access_args /* {
319 struct vnode *a_vp;
320 int a_mode;
321 struct ucred *a_cred;
322 struct proc *a_p;
323 } */ *ap;
324{
325 register struct vnode *vp = ap->a_vp;
326 int error = 0;
327 u_int32_t mode, wmode;
328 int v3 = NFS_ISV3(vp);
329 struct nfsnode *np = VTONFS(vp);
330
331 /*
332 * Disallow write attempts on filesystems mounted read-only;
333 * unless the file is a socket, fifo, or a block or character
334 * device resident on the filesystem.
335 */
336 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
337 switch (vp->v_type) {
338 case VREG:
339 case VDIR:
340 case VLNK:
341 return (EROFS);
342 default:
343 break;
344 }
345 }
346 /*
347 * For nfs v3, check to see if we have done this recently, and if
348 * so return our cached result instead of making an ACCESS call.
349 * If not, do an access rpc, otherwise you are stuck emulating
350 * ufs_access() locally using the vattr. This may not be correct,
351 * since the server may apply other access criteria such as
352 * client uid-->server uid mapping that we do not know about.
353 */
354 if (v3) {
355 if (ap->a_mode & VREAD)
356 mode = NFSV3ACCESS_READ;
357 else
358 mode = 0;
359 if (vp->v_type != VDIR) {
360 if (ap->a_mode & VWRITE)
361 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
362 if (ap->a_mode & VEXEC)
363 mode |= NFSV3ACCESS_EXECUTE;
364 } else {
365 if (ap->a_mode & VWRITE)
366 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
367 NFSV3ACCESS_DELETE);
368 if (ap->a_mode & VEXEC)
369 mode |= NFSV3ACCESS_LOOKUP;
370 }
371 /* XXX safety belt, only make blanket request if caching */
372 if (nfsaccess_cache_timeout > 0) {
373 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
374 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
375 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
376 } else {
377 wmode = mode;
378 }
379
380 /*
381 * Does our cached result allow us to give a definite yes to
382 * this request?
383 */
384 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
385 (ap->a_cred->cr_uid == np->n_modeuid) &&
386 ((np->n_mode & mode) == mode)) {
387 nfsstats.accesscache_hits++;
388 } else {
389 /*
390 * Either a no, or a don't know. Go to the wire.
391 */
392 nfsstats.accesscache_misses++;
393 error = nfs3_access_otw(vp, wmode, ap->a_p,ap->a_cred);
394 if (!error) {
395 if ((np->n_mode & mode) != mode) {
396 error = EACCES;
397 }
398 }
399 }
400 return (error);
401 } else {
402 if ((error = nfsspec_access(ap)) != 0)
403 return (error);
404
405 /*
406 * Attempt to prevent a mapped root from accessing a file
407 * which it shouldn't. We try to read a byte from the file
408 * if the user is root and the file is not zero length.
409 * After calling nfsspec_access, we should have the correct
410 * file size cached.
411 */
412 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
413 && VTONFS(vp)->n_size > 0) {
414 struct iovec aiov;
415 struct uio auio;
416 char buf[1];
417
418 aiov.iov_base = buf;
419 aiov.iov_len = 1;
420 auio.uio_iov = &aiov;
421 auio.uio_iovcnt = 1;
422 auio.uio_offset = 0;
423 auio.uio_resid = 1;
424 auio.uio_segflg = UIO_SYSSPACE;
425 auio.uio_rw = UIO_READ;
426 auio.uio_procp = ap->a_p;
427
428 if (vp->v_type == VREG)
429 error = nfs_readrpc(vp, &auio, ap->a_cred);
430 else if (vp->v_type == VDIR) {
431 char* bp;
432 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
433 aiov.iov_base = bp;
434 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
435 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
436 free(bp, M_TEMP);
437 } else if (vp->v_type == VLNK)
438 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
439 else
440 error = EACCES;
441 }
442 return (error);
443 }
444}
445
446/*
447 * nfs open vnode op
448 * Check to see if the type is ok
449 * and that deletion is not in progress.
450 * For paged in text files, you will need to flush the page cache
451 * if consistency is lost.
452 */
453/* ARGSUSED */
454static int
455nfs_open(ap)
456 struct vop_open_args /* {
457 struct vnode *a_vp;
458 int a_mode;
459 struct ucred *a_cred;
460 struct proc *a_p;
461 } */ *ap;
462{
463 register struct vnode *vp = ap->a_vp;
464 struct nfsnode *np = VTONFS(vp);
465 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
466 struct vattr vattr;
467 int error;
468
469 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
470#ifdef DIAGNOSTIC
471 printf("open eacces vtyp=%d\n",vp->v_type);
472#endif
473 return (EACCES);
474 }
475 /*
476 * Get a valid lease. If cached data is stale, flush it.
477 */
478 if (nmp->nm_flag & NFSMNT_NQNFS) {
479 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
480 do {
481 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
482 ap->a_p);
483 } while (error == NQNFS_EXPIRED);
484 if (error)
485 return (error);
486 if (np->n_lrev != np->n_brev ||
487 (np->n_flag & NQNFSNONCACHE)) {
488 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
489 ap->a_p, 1)) == EINTR)
490 return (error);
491 np->n_brev = np->n_lrev;
492 }
493 }
494 } else {
495 if (np->n_flag & NMODIFIED) {
496 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
497 ap->a_p, 1)) == EINTR)
498 return (error);
499 np->n_attrstamp = 0;
500 if (vp->v_type == VDIR)
501 np->n_direofoffset = 0;
502 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
503 if (error)
504 return (error);
505 np->n_mtime = vattr.va_mtime.tv_sec;
506 } else {
507 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
508 if (error)
509 return (error);
510 if (np->n_mtime != vattr.va_mtime.tv_sec) {
511 if (vp->v_type == VDIR)
512 np->n_direofoffset = 0;
513 if ((error = nfs_vinvalbuf(vp, V_SAVE,
514 ap->a_cred, ap->a_p, 1)) == EINTR)
515 return (error);
516 np->n_mtime = vattr.va_mtime.tv_sec;
517 }
518 }
519 }
520 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
521 np->n_attrstamp = 0; /* For Open/Close consistency */
522 return (0);
523}
524
525/*
526 * nfs close vnode op
527 * What an NFS client should do upon close after writing is a debatable issue.
528 * Most NFS clients push delayed writes to the server upon close, basically for
529 * two reasons:
530 * 1 - So that any write errors may be reported back to the client process
531 * doing the close system call. By far the two most likely errors are
532 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
533 * 2 - To put a worst case upper bound on cache inconsistency between
534 * multiple clients for the file.
535 * There is also a consistency problem for Version 2 of the protocol w.r.t.
536 * not being able to tell if other clients are writing a file concurrently,
537 * since there is no way of knowing if the changed modify time in the reply
538 * is only due to the write for this client.
539 * (NFS Version 3 provides weak cache consistency data in the reply that
540 * should be sufficient to detect and handle this case.)
541 *
542 * The current code does the following:
543 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
544 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
545 * or commit them (this satisfies 1 and 2 except for the
546 * case where the server crashes after this close but
547 * before the commit RPC, which is felt to be "good
548 * enough". Changing the last argument to nfs_flush() to
549 * a 1 would force a commit operation, if it is felt a
550 * commit is necessary now.
551 * for NQNFS - do nothing now, since 2 is dealt with via leases and
552 * 1 should be dealt with via an fsync() system call for
553 * cases where write errors are important.
554 */
555/* ARGSUSED */
556static int
557nfs_close(ap)
558 struct vop_close_args /* {
559 struct vnodeop_desc *a_desc;
560 struct vnode *a_vp;
561 int a_fflag;
562 struct ucred *a_cred;
563 struct proc *a_p;
564 } */ *ap;
565{
566 register struct vnode *vp = ap->a_vp;
567 register struct nfsnode *np = VTONFS(vp);
568 int error = 0;
569
570 if (vp->v_type == VREG) {
571 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
572 (np->n_flag & NMODIFIED)) {
573 if (NFS_ISV3(vp)) {
574 /*
575 * Under NFSv3 we have dirty buffers to dispose of. We
576 * must flush them to the NFS server. We have the option
577 * of waiting all the way through the commit rpc or just
578 * waiting for the initial write. The default is to only
579 * wait through the initial write so the data is in the
580 * server's cache, which is roughly similar to the state
581 * a standard disk subsystem leaves the file in on close().
582 *
583 * We cannot clear the NMODIFIED bit in np->n_flag due to
584 * potential races with other processes, and certainly
585 * cannot clear it if we don't commit.
586 */
587 int cm = nfsv3_commit_on_close ? 1 : 0;
588 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, cm);
589 /* np->n_flag &= ~NMODIFIED; */
590 } else {
591 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
592 }
593 np->n_attrstamp = 0;
594 }
595 if (np->n_flag & NWRITEERR) {
596 np->n_flag &= ~NWRITEERR;
597 error = np->n_error;
598 }
599 }
600 return (error);
601}
602
603/*
604 * nfs getattr call from vfs.
605 */
606static int
607nfs_getattr(ap)
608 struct vop_getattr_args /* {
609 struct vnode *a_vp;
610 struct vattr *a_vap;
611 struct ucred *a_cred;
612 struct proc *a_p;
613 } */ *ap;
614{
615 register struct vnode *vp = ap->a_vp;
616 register struct nfsnode *np = VTONFS(vp);
617 register caddr_t cp;
618 register u_int32_t *tl;
619 register int32_t t1, t2;
620 caddr_t bpos, dpos;
621 int error = 0;
622 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
623 int v3 = NFS_ISV3(vp);
624
625 /*
626 * Update local times for special files.
627 */
628 if (np->n_flag & (NACC | NUPD))
629 np->n_flag |= NCHG;
630 /*
631 * First look in the cache.
632 */
633 if (nfs_getattrcache(vp, ap->a_vap) == 0)
634 return (0);
635
636 if (v3 && nfsaccess_cache_timeout > 0) {
637 nfsstats.accesscache_misses++;
638 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_p, ap->a_cred);
639 if (nfs_getattrcache(vp, ap->a_vap) == 0)
640 return (0);
641 }
642
643 nfsstats.rpccnt[NFSPROC_GETATTR]++;
644 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
645 nfsm_fhtom(vp, v3);
646 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
647 if (!error) {
648 nfsm_loadattr(vp, ap->a_vap);
649 }
650 nfsm_reqdone;
651 return (error);
652}
653
654/*
655 * nfs setattr call.
656 */
657static int
658nfs_setattr(ap)
659 struct vop_setattr_args /* {
660 struct vnodeop_desc *a_desc;
661 struct vnode *a_vp;
662 struct vattr *a_vap;
663 struct ucred *a_cred;
664 struct proc *a_p;
665 } */ *ap;
666{
667 register struct vnode *vp = ap->a_vp;
668 register struct nfsnode *np = VTONFS(vp);
669 register struct vattr *vap = ap->a_vap;
670 int error = 0;
671 u_quad_t tsize;
672
673#ifndef nolint
674 tsize = (u_quad_t)0;
675#endif
676
677 /*
678 * Setting of flags is not supported.
679 */
680 if (vap->va_flags != VNOVAL)
681 return (EOPNOTSUPP);
682
683 /*
684 * Disallow write attempts if the filesystem is mounted read-only.
685 */
686 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
687 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
688 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
689 (vp->v_mount->mnt_flag & MNT_RDONLY))
690 return (EROFS);
691 if (vap->va_size != VNOVAL) {
692 switch (vp->v_type) {
693 case VDIR:
694 return (EISDIR);
695 case VCHR:
696 case VBLK:
697 case VSOCK:
698 case VFIFO:
699 if (vap->va_mtime.tv_sec == VNOVAL &&
700 vap->va_atime.tv_sec == VNOVAL &&
701 vap->va_mode == (mode_t)VNOVAL &&
702 vap->va_uid == (uid_t)VNOVAL &&
703 vap->va_gid == (gid_t)VNOVAL)
704 return (0);
705 vap->va_size = VNOVAL;
706 break;
707 default:
708 /*
709 * Disallow write attempts if the filesystem is
710 * mounted read-only.
711 */
712 if (vp->v_mount->mnt_flag & MNT_RDONLY)
713 return (EROFS);
714
715 /*
716 * We run vnode_pager_setsize() early (why?),
717 * we must set np->n_size now to avoid vinvalbuf
718 * V_SAVE races that might setsize a lower
719 * value.
720 */
721
722 tsize = np->n_size;
723 error = nfs_meta_setsize(vp, ap->a_cred,
724 ap->a_p, vap->va_size);
725
726 if (np->n_flag & NMODIFIED) {
727 if (vap->va_size == 0)
728 error = nfs_vinvalbuf(vp, 0,
729 ap->a_cred, ap->a_p, 1);
730 else
731 error = nfs_vinvalbuf(vp, V_SAVE,
732 ap->a_cred, ap->a_p, 1);
733 if (error) {
734 np->n_size = tsize;
735 vnode_pager_setsize(vp, np->n_size);
736 return (error);
737 }
738 }
739 np->n_vattr.va_size = vap->va_size;
740 };
741 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
742 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
743 vp->v_type == VREG &&
744 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
745 ap->a_p, 1)) == EINTR)
746 return (error);
747 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
748 if (error && vap->va_size != VNOVAL) {
749 np->n_size = np->n_vattr.va_size = tsize;
750 vnode_pager_setsize(vp, np->n_size);
751 }
752 return (error);
753}
754
755/*
756 * Do an nfs setattr rpc.
757 */
758static int
759nfs_setattrrpc(vp, vap, cred, procp)
760 register struct vnode *vp;
761 register struct vattr *vap;
762 struct ucred *cred;
763 struct proc *procp;
764{
765 register struct nfsv2_sattr *sp;
766 register caddr_t cp;
767 register int32_t t1, t2;
768 caddr_t bpos, dpos, cp2;
769 u_int32_t *tl;
770 int error = 0, wccflag = NFSV3_WCCRATTR;
771 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
772 int v3 = NFS_ISV3(vp);
773
774 nfsstats.rpccnt[NFSPROC_SETATTR]++;
775 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
776 nfsm_fhtom(vp, v3);
777 if (v3) {
778 nfsm_v3attrbuild(vap, TRUE);
779 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
780 *tl = nfs_false;
781 } else {
782 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
783 if (vap->va_mode == (mode_t)VNOVAL)
784 sp->sa_mode = nfs_xdrneg1;
785 else
786 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
787 if (vap->va_uid == (uid_t)VNOVAL)
788 sp->sa_uid = nfs_xdrneg1;
789 else
790 sp->sa_uid = txdr_unsigned(vap->va_uid);
791 if (vap->va_gid == (gid_t)VNOVAL)
792 sp->sa_gid = nfs_xdrneg1;
793 else
794 sp->sa_gid = txdr_unsigned(vap->va_gid);
795 sp->sa_size = txdr_unsigned(vap->va_size);
796 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
797 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
798 }
799 nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
800 if (v3) {
801 nfsm_wcc_data(vp, wccflag);
802 } else
803 nfsm_loadattr(vp, (struct vattr *)0);
804 nfsm_reqdone;
805 return (error);
806}
807
808/*
809 * nfs lookup call, one step at a time...
810 * First look in cache
811 * If not found, unlock the directory nfsnode and do the rpc
812 */
813static int
814nfs_lookup(ap)
815 struct vop_lookup_args /* {
816 struct vnodeop_desc *a_desc;
817 struct vnode *a_dvp;
818 struct vnode **a_vpp;
819 struct componentname *a_cnp;
820 } */ *ap;
821{
822 struct componentname *cnp = ap->a_cnp;
823 struct vnode *dvp = ap->a_dvp;
824 struct vnode **vpp = ap->a_vpp;
825 int flags = cnp->cn_flags;
826 struct vnode *newvp;
827 u_int32_t *tl;
828 caddr_t cp;
829 int32_t t1, t2;
830 struct nfsmount *nmp;
831 caddr_t bpos, dpos, cp2;
832 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
833 long len;
834 nfsfh_t *fhp;
835 struct nfsnode *np;
836 int lockparent, wantparent, error = 0, attrflag, fhsize;
837 int v3 = NFS_ISV3(dvp);
838 struct proc *p = cnp->cn_proc;
839
840 *vpp = NULLVP;
841 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
842 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
843 return (EROFS);
844 if (dvp->v_type != VDIR)
845 return (ENOTDIR);
846 lockparent = flags & LOCKPARENT;
847 wantparent = flags & (LOCKPARENT|WANTPARENT);
848 nmp = VFSTONFS(dvp->v_mount);
849 np = VTONFS(dvp);
850 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
851 struct vattr vattr;
852 int vpid;
853
854 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) != 0) {
855 *vpp = NULLVP;
856 return (error);
857 }
858
859 newvp = *vpp;
860 vpid = newvp->v_id;
861 /*
862 * See the comment starting `Step through' in ufs/ufs_lookup.c
863 * for an explanation of the locking protocol
864 */
865 if (dvp == newvp) {
866 VREF(newvp);
867 error = 0;
868 } else if (flags & ISDOTDOT) {
869 VOP_UNLOCK(dvp, 0, p);
870 error = vget(newvp, LK_EXCLUSIVE, p);
871 if (!error && lockparent && (flags & ISLASTCN))
872 error = vn_lock(dvp, LK_EXCLUSIVE, p);
873 } else {
874 error = vget(newvp, LK_EXCLUSIVE, p);
875 if (!lockparent || error || !(flags & ISLASTCN))
876 VOP_UNLOCK(dvp, 0, p);
877 }
878 if (!error) {
879 if (vpid == newvp->v_id) {
880 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
881 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
882 nfsstats.lookupcache_hits++;
883 if (cnp->cn_nameiop != LOOKUP &&
884 (flags & ISLASTCN))
885 cnp->cn_flags |= SAVENAME;
886 return (0);
887 }
888 cache_purge(newvp);
889 }
890 vput(newvp);
891 if (lockparent && dvp != newvp && (flags & ISLASTCN))
892 VOP_UNLOCK(dvp, 0, p);
893 }
894 error = vn_lock(dvp, LK_EXCLUSIVE, p);
895 *vpp = NULLVP;
896 if (error)
897 return (error);
898 }
899 error = 0;
900 newvp = NULLVP;
901 nfsstats.lookupcache_misses++;
902 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
903 len = cnp->cn_namelen;
904 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
905 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
906 nfsm_fhtom(dvp, v3);
907 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
908 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred);
909 if (error) {
910 nfsm_postop_attr(dvp, attrflag);
911 m_freem(mrep);
912 goto nfsmout;
913 }
914 nfsm_getfh(fhp, fhsize, v3);
915
916 /*
917 * Handle RENAME case...
918 */
919 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
920 if (NFS_CMPFH(np, fhp, fhsize)) {
921 m_freem(mrep);
922 return (EISDIR);
923 }
924 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
925 if (error) {
926 m_freem(mrep);
927 return (error);
928 }
929 newvp = NFSTOV(np);
930 if (v3) {
931 nfsm_postop_attr(newvp, attrflag);
932 nfsm_postop_attr(dvp, attrflag);
933 } else
934 nfsm_loadattr(newvp, (struct vattr *)0);
935 *vpp = newvp;
936 m_freem(mrep);
937 cnp->cn_flags |= SAVENAME;
938 if (!lockparent)
939 VOP_UNLOCK(dvp, 0, p);
940 return (0);
941 }
942
943 if (flags & ISDOTDOT) {
944 VOP_UNLOCK(dvp, 0, p);
945 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
946 if (error) {
947 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
948 return (error);
949 }
950 newvp = NFSTOV(np);
951 if (lockparent && (flags & ISLASTCN) &&
952 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
953 vput(newvp);
954 return (error);
955 }
956 } else if (NFS_CMPFH(np, fhp, fhsize)) {
957 VREF(dvp);
958 newvp = dvp;
959 } else {
960 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
961 if (error) {
962 m_freem(mrep);
963 return (error);
964 }
965 if (!lockparent || !(flags & ISLASTCN))
966 VOP_UNLOCK(dvp, 0, p);
967 newvp = NFSTOV(np);
968 }
969 if (v3) {
970 nfsm_postop_attr(newvp, attrflag);
971 nfsm_postop_attr(dvp, attrflag);
972 } else
973 nfsm_loadattr(newvp, (struct vattr *)0);
974 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
975 cnp->cn_flags |= SAVENAME;
976 if ((cnp->cn_flags & MAKEENTRY) &&
977 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
978 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
979 cache_enter(dvp, newvp, cnp);
980 }
981 *vpp = newvp;
982 nfsm_reqdone;
983 if (error) {
984 if (newvp != NULLVP) {
985 vrele(newvp);
986 *vpp = NULLVP;
987 }
988 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
989 (flags & ISLASTCN) && error == ENOENT) {
990 if (!lockparent)
991 VOP_UNLOCK(dvp, 0, p);
992 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
993 error = EROFS;
994 else
995 error = EJUSTRETURN;
996 }
997 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
998 cnp->cn_flags |= SAVENAME;
999 }
1000 return (error);
1001}
1002
1003/*
1004 * nfs read call.
1005 * Just call nfs_bioread() to do the work.
1006 */
1007static int
1008nfs_read(ap)
1009 struct vop_read_args /* {
1010 struct vnode *a_vp;
1011 struct uio *a_uio;
1012 int a_ioflag;
1013 struct ucred *a_cred;
1014 } */ *ap;
1015{
1016 register struct vnode *vp = ap->a_vp;
1017
1018 if (vp->v_type != VREG)
1019 return (EPERM);
1020 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1021}
1022
1023/*
1024 * nfs readlink call
1025 */
1026static int
1027nfs_readlink(ap)
1028 struct vop_readlink_args /* {
1029 struct vnode *a_vp;
1030 struct uio *a_uio;
1031 struct ucred *a_cred;
1032 } */ *ap;
1033{
1034 register struct vnode *vp = ap->a_vp;
1035
1036 if (vp->v_type != VLNK)
1037 return (EINVAL);
1038 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1039}
1040
1041/*
1042 * Do a readlink rpc.
1043 * Called by nfs_doio() from below the buffer cache.
1044 */
1045int
1046nfs_readlinkrpc(vp, uiop, cred)
1047 register struct vnode *vp;
1048 struct uio *uiop;
1049 struct ucred *cred;
1050{
1051 register u_int32_t *tl;
1052 register caddr_t cp;
1053 register int32_t t1, t2;
1054 caddr_t bpos, dpos, cp2;
1055 int error = 0, len, attrflag;
1056 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1057 int v3 = NFS_ISV3(vp);
1058
1059 nfsstats.rpccnt[NFSPROC_READLINK]++;
1060 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1061 nfsm_fhtom(vp, v3);
1062 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
1063 if (v3)
1064 nfsm_postop_attr(vp, attrflag);
1065 if (!error) {
1066 nfsm_strsiz(len, NFS_MAXPATHLEN);
1067 if (len == NFS_MAXPATHLEN) {
1068 struct nfsnode *np = VTONFS(vp);
1069 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1070 len = np->n_size;
1071 }
1072 nfsm_mtouio(uiop, len);
1073 }
1074 nfsm_reqdone;
1075 return (error);
1076}
1077
1078/*
1079 * nfs read rpc call
1080 * Ditto above
1081 */
1082int
1083nfs_readrpc(vp, uiop, cred)
1084 register struct vnode *vp;
1085 struct uio *uiop;
1086 struct ucred *cred;
1087{
1088 register u_int32_t *tl;
1089 register caddr_t cp;
1090 register int32_t t1, t2;
1091 caddr_t bpos, dpos, cp2;
1092 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1093 struct nfsmount *nmp;
1094 int error = 0, len, retlen, tsiz, eof, attrflag;
1095 int v3 = NFS_ISV3(vp);
1096
1097#ifndef nolint
1098 eof = 0;
1099#endif
1100 nmp = VFSTONFS(vp->v_mount);
1101 tsiz = uiop->uio_resid;
1102 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1103 return (EFBIG);
1104 while (tsiz > 0) {
1105 nfsstats.rpccnt[NFSPROC_READ]++;
1106 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1107 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1108 nfsm_fhtom(vp, v3);
1109 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1110 if (v3) {
1111 txdr_hyper(uiop->uio_offset, tl);
1112 *(tl + 2) = txdr_unsigned(len);
1113 } else {
1114 *tl++ = txdr_unsigned(uiop->uio_offset);
1115 *tl++ = txdr_unsigned(len);
1116 *tl = 0;
1117 }
1118 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred);
1119 if (v3) {
1120 nfsm_postop_attr(vp, attrflag);
1121 if (error) {
1122 m_freem(mrep);
1123 goto nfsmout;
1124 }
1125 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1126 eof = fxdr_unsigned(int, *(tl + 1));
1127 } else
1128 nfsm_loadattr(vp, (struct vattr *)0);
1129 nfsm_strsiz(retlen, nmp->nm_rsize);
1130 nfsm_mtouio(uiop, retlen);
1131 m_freem(mrep);
1132 tsiz -= retlen;
1133 if (v3) {
1134 if (eof || retlen == 0) {
1135 tsiz = 0;
1136 }
1137 } else if (retlen < len) {
1138 tsiz = 0;
1139 }
1140 }
1141nfsmout:
1142 return (error);
1143}
1144
1145/*
1146 * nfs write call
1147 */
1148int
1149nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1150 register struct vnode *vp;
1151 register struct uio *uiop;
1152 struct ucred *cred;
1153 int *iomode, *must_commit;
1154{
1155 register u_int32_t *tl;
1156 register caddr_t cp;
1157 register int32_t t1, t2, backup;
1158 caddr_t bpos, dpos, cp2;
1159 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1160 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1161 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1162 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1163
1164#ifndef DIAGNOSTIC
1165 if (uiop->uio_iovcnt != 1)
1166 panic("nfs: writerpc iovcnt > 1");
1167#endif
1168 *must_commit = 0;
1169 tsiz = uiop->uio_resid;
1170 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1171 return (EFBIG);
1172 while (tsiz > 0) {
1173 nfsstats.rpccnt[NFSPROC_WRITE]++;
1174 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1175 nfsm_reqhead(vp, NFSPROC_WRITE,
1176 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1177 nfsm_fhtom(vp, v3);
1178 if (v3) {
1179 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1180 txdr_hyper(uiop->uio_offset, tl);
1181 tl += 2;
1182 *tl++ = txdr_unsigned(len);
1183 *tl++ = txdr_unsigned(*iomode);
1184 *tl = txdr_unsigned(len);
1185 } else {
1186 register u_int32_t x;
1187
1188 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1189 /* Set both "begin" and "current" to non-garbage. */
1190 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1191 *tl++ = x; /* "begin offset" */
1192 *tl++ = x; /* "current offset" */
1193 x = txdr_unsigned(len);
1194 *tl++ = x; /* total to this offset */
1195 *tl = x; /* size of this write */
1196 }
1197 nfsm_uiotom(uiop, len);
1198 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred);
1199 if (v3) {
1200 wccflag = NFSV3_WCCCHK;
1201 nfsm_wcc_data(vp, wccflag);
1202 if (!error) {
1203 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1204 + NFSX_V3WRITEVERF);
1205 rlen = fxdr_unsigned(int, *tl++);
1206 if (rlen == 0) {
1207 error = NFSERR_IO;
1208 m_freem(mrep);
1209 break;
1210 } else if (rlen < len) {
1211 backup = len - rlen;
1212 uiop->uio_iov->iov_base -= backup;
1213 uiop->uio_iov->iov_len += backup;
1214 uiop->uio_offset -= backup;
1215 uiop->uio_resid += backup;
1216 len = rlen;
1217 }
1218 commit = fxdr_unsigned(int, *tl++);
1219
1220 /*
1221 * Return the lowest committment level
1222 * obtained by any of the RPCs.
1223 */
1224 if (committed == NFSV3WRITE_FILESYNC)
1225 committed = commit;
1226 else if (committed == NFSV3WRITE_DATASYNC &&
1227 commit == NFSV3WRITE_UNSTABLE)
1228 committed = commit;
1229 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1230 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1231 NFSX_V3WRITEVERF);
1232 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1233 } else if (bcmp((caddr_t)tl,
1234 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1235 *must_commit = 1;
1236 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1237 NFSX_V3WRITEVERF);
1238 }
1239 }
1240 } else
1241 nfsm_loadattr(vp, (struct vattr *)0);
1242 if (wccflag)
1243 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1244 m_freem(mrep);
1245 if (error)
1246 break;
1247 tsiz -= len;
1248 }
1249nfsmout:
1250 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1251 committed = NFSV3WRITE_FILESYNC;
1252 *iomode = committed;
1253 if (error)
1254 uiop->uio_resid = tsiz;
1255 return (error);
1256}
1257
1258/*
1259 * nfs mknod rpc
1260 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1261 * mode set to specify the file type and the size field for rdev.
1262 */
1263static int
1264nfs_mknodrpc(dvp, vpp, cnp, vap)
1265 register struct vnode *dvp;
1266 register struct vnode **vpp;
1267 register struct componentname *cnp;
1268 register struct vattr *vap;
1269{
1270 register struct nfsv2_sattr *sp;
1271 register u_int32_t *tl;
1272 register caddr_t cp;
1273 register int32_t t1, t2;
1274 struct vnode *newvp = (struct vnode *)0;
1275 struct nfsnode *np = (struct nfsnode *)0;
1276 struct vattr vattr;
1277 char *cp2;
1278 caddr_t bpos, dpos;
1279 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1280 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1281 u_int32_t rdev;
1282 int v3 = NFS_ISV3(dvp);
1283
1284 if (vap->va_type == VCHR || vap->va_type == VBLK)
1285 rdev = txdr_unsigned(vap->va_rdev);
1286 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1287 rdev = nfs_xdrneg1;
1288 else {
1289 return (EOPNOTSUPP);
1290 }
1291 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1292 return (error);
1293 }
1294 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1295 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1296 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1297 nfsm_fhtom(dvp, v3);
1298 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1299 if (v3) {
1300 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1301 *tl++ = vtonfsv3_type(vap->va_type);
1302 nfsm_v3attrbuild(vap, FALSE);
1303 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1304 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1305 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1306 *tl = txdr_unsigned(uminor(vap->va_rdev));
1307 }
1308 } else {
1309 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1310 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1311 sp->sa_uid = nfs_xdrneg1;
1312 sp->sa_gid = nfs_xdrneg1;
1313 sp->sa_size = rdev;
1314 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1315 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1316 }
1317 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred);
1318 if (!error) {
1319 nfsm_mtofh(dvp, newvp, v3, gotvp);
1320 if (!gotvp) {
1321 if (newvp) {
1322 vput(newvp);
1323 newvp = (struct vnode *)0;
1324 }
1325 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1326 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1327 if (!error)
1328 newvp = NFSTOV(np);
1329 }
1330 }
1331 if (v3)
1332 nfsm_wcc_data(dvp, wccflag);
1333 nfsm_reqdone;
1334 if (error) {
1335 if (newvp)
1336 vput(newvp);
1337 } else {
1338 if (cnp->cn_flags & MAKEENTRY)
1339 cache_enter(dvp, newvp, cnp);
1340 *vpp = newvp;
1341 }
1342 VTONFS(dvp)->n_flag |= NMODIFIED;
1343 if (!wccflag)
1344 VTONFS(dvp)->n_attrstamp = 0;
1345 return (error);
1346}
1347
1348/*
1349 * nfs mknod vop
1350 * just call nfs_mknodrpc() to do the work.
1351 */
1352/* ARGSUSED */
1353static int
1354nfs_mknod(ap)
1355 struct vop_mknod_args /* {
1356 struct vnode *a_dvp;
1357 struct vnode **a_vpp;
1358 struct componentname *a_cnp;
1359 struct vattr *a_vap;
1360 } */ *ap;
1361{
1362 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1363}
1364
1365static u_long create_verf;
1366/*
1367 * nfs file create call
1368 */
1369static int
1370nfs_create(ap)
1371 struct vop_create_args /* {
1372 struct vnode *a_dvp;
1373 struct vnode **a_vpp;
1374 struct componentname *a_cnp;
1375 struct vattr *a_vap;
1376 } */ *ap;
1377{
1378 register struct vnode *dvp = ap->a_dvp;
1379 register struct vattr *vap = ap->a_vap;
1380 register struct componentname *cnp = ap->a_cnp;
1381 register struct nfsv2_sattr *sp;
1382 register u_int32_t *tl;
1383 register caddr_t cp;
1384 register int32_t t1, t2;
1385 struct nfsnode *np = (struct nfsnode *)0;
1386 struct vnode *newvp = (struct vnode *)0;
1387 caddr_t bpos, dpos, cp2;
1388 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1389 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1390 struct vattr vattr;
1391 int v3 = NFS_ISV3(dvp);
1392
1393 /*
1394 * Oops, not for me..
1395 */
1396 if (vap->va_type == VSOCK)
1397 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1398
1399 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1400 return (error);
1401 }
1402 if (vap->va_vaflags & VA_EXCLUSIVE)
1403 fmode |= O_EXCL;
1404again:
1405 nfsstats.rpccnt[NFSPROC_CREATE]++;
1406 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1407 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1408 nfsm_fhtom(dvp, v3);
1409 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1410 if (v3) {
1411 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1412 if (fmode & O_EXCL) {
1413 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1414 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1415#ifdef INET
1416 if (!TAILQ_EMPTY(&in_ifaddrhead))
1417 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1418 else
1419#endif
1420 *tl++ = create_verf;
1421 *tl = ++create_verf;
1422 } else {
1423 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1424 nfsm_v3attrbuild(vap, FALSE);
1425 }
1426 } else {
1427 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1428 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1429 sp->sa_uid = nfs_xdrneg1;
1430 sp->sa_gid = nfs_xdrneg1;
1431 sp->sa_size = 0;
1432 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1433 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1434 }
1435 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred);
1436 if (!error) {
1437 nfsm_mtofh(dvp, newvp, v3, gotvp);
1438 if (!gotvp) {
1439 if (newvp) {
1440 vput(newvp);
1441 newvp = (struct vnode *)0;
1442 }
1443 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1444 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1445 if (!error)
1446 newvp = NFSTOV(np);
1447 }
1448 }
1449 if (v3)
1450 nfsm_wcc_data(dvp, wccflag);
1451 nfsm_reqdone;
1452 if (error) {
1453 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1454 fmode &= ~O_EXCL;
1455 goto again;
1456 }
1457 if (newvp)
1458 vput(newvp);
1459 } else if (v3 && (fmode & O_EXCL)) {
1460 /*
1461 * We are normally called with only a partially initialized
1462 * VAP. Since the NFSv3 spec says that server may use the
1463 * file attributes to store the verifier, the spec requires
1464 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1465 * in atime, but we can't really assume that all servers will
1466 * so we ensure that our SETATTR sets both atime and mtime.
1467 */
1468 if (vap->va_mtime.tv_sec == VNOVAL)
1469 vfs_timestamp(&vap->va_mtime);
1470 if (vap->va_atime.tv_sec == VNOVAL)
1471 vap->va_atime = vap->va_mtime;
1472 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1473 }
1474 if (!error) {
1475 if (cnp->cn_flags & MAKEENTRY)
1476 cache_enter(dvp, newvp, cnp);
1477 *ap->a_vpp = newvp;
1478 }
1479 VTONFS(dvp)->n_flag |= NMODIFIED;
1480 if (!wccflag)
1481 VTONFS(dvp)->n_attrstamp = 0;
1482 return (error);
1483}
1484
1485/*
1486 * nfs file remove call
1487 * To try and make nfs semantics closer to ufs semantics, a file that has
1488 * other processes using the vnode is renamed instead of removed and then
1489 * removed later on the last close.
1490 * - If v_usecount > 1
1491 * If a rename is not already in the works
1492 * call nfs_sillyrename() to set it up
1493 * else
1494 * do the remove rpc
1495 */
1496static int
1497nfs_remove(ap)
1498 struct vop_remove_args /* {
1499 struct vnodeop_desc *a_desc;
1500 struct vnode * a_dvp;
1501 struct vnode * a_vp;
1502 struct componentname * a_cnp;
1503 } */ *ap;
1504{
1505 register struct vnode *vp = ap->a_vp;
1506 register struct vnode *dvp = ap->a_dvp;
1507 register struct componentname *cnp = ap->a_cnp;
1508 register struct nfsnode *np = VTONFS(vp);
1509 int error = 0;
1510 struct vattr vattr;
1511
1512#ifndef DIAGNOSTIC
1513 if ((cnp->cn_flags & HASBUF) == 0)
1514 panic("nfs_remove: no name");
1515 if (vp->v_usecount < 1)
1516 panic("nfs_remove: bad v_usecount");
1517#endif
1518 if (vp->v_type == VDIR)
1519 error = EPERM;
1520 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1521 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1522 vattr.va_nlink > 1)) {
1523 /*
1524 * Purge the name cache so that the chance of a lookup for
1525 * the name succeeding while the remove is in progress is
1526 * minimized. Without node locking it can still happen, such
1527 * that an I/O op returns ESTALE, but since you get this if
1528 * another host removes the file..
1529 */
1530 cache_purge(vp);
1531 /*
1532 * throw away biocache buffers, mainly to avoid
1533 * unnecessary delayed writes later.
1534 */
1535 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
1536 /* Do the rpc */
1537 if (error != EINTR)
1538 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1539 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1540 /*
1541 * Kludge City: If the first reply to the remove rpc is lost..
1542 * the reply to the retransmitted request will be ENOENT
1543 * since the file was in fact removed
1544 * Therefore, we cheat and return success.
1545 */
1546 if (error == ENOENT)
1547 error = 0;
1548 } else if (!np->n_sillyrename)
1549 error = nfs_sillyrename(dvp, vp, cnp);
1550 np->n_attrstamp = 0;
1551 return (error);
1552}
1553
1554/*
1555 * nfs file remove rpc called from nfs_inactive
1556 */
1557int
1558nfs_removeit(sp)
1559 register struct sillyrename *sp;
1560{
1561
1562 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1563 (struct proc *)0));
1564}
1565
1566/*
1567 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1568 */
1569static int
1570nfs_removerpc(dvp, name, namelen, cred, proc)
1571 register struct vnode *dvp;
1572 const char *name;
1573 int namelen;
1574 struct ucred *cred;
1575 struct proc *proc;
1576{
1577 register u_int32_t *tl;
1578 register caddr_t cp;
1579 register int32_t t1, t2;
1580 caddr_t bpos, dpos, cp2;
1581 int error = 0, wccflag = NFSV3_WCCRATTR;
1582 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1583 int v3 = NFS_ISV3(dvp);
1584
1585 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1586 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1587 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1588 nfsm_fhtom(dvp, v3);
1589 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1590 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred);
1591 if (v3)
1592 nfsm_wcc_data(dvp, wccflag);
1593 nfsm_reqdone;
1594 VTONFS(dvp)->n_flag |= NMODIFIED;
1595 if (!wccflag)
1596 VTONFS(dvp)->n_attrstamp = 0;
1597 return (error);
1598}
1599
1600/*
1601 * nfs file rename call
1602 */
1603static int
1604nfs_rename(ap)
1605 struct vop_rename_args /* {
1606 struct vnode *a_fdvp;
1607 struct vnode *a_fvp;
1608 struct componentname *a_fcnp;
1609 struct vnode *a_tdvp;
1610 struct vnode *a_tvp;
1611 struct componentname *a_tcnp;
1612 } */ *ap;
1613{
1614 register struct vnode *fvp = ap->a_fvp;
1615 register struct vnode *tvp = ap->a_tvp;
1616 register struct vnode *fdvp = ap->a_fdvp;
1617 register struct vnode *tdvp = ap->a_tdvp;
1618 register struct componentname *tcnp = ap->a_tcnp;
1619 register struct componentname *fcnp = ap->a_fcnp;
1620 int error;
1621
1622#ifndef DIAGNOSTIC
1623 if ((tcnp->cn_flags & HASBUF) == 0 ||
1624 (fcnp->cn_flags & HASBUF) == 0)
1625 panic("nfs_rename: no name");
1626#endif
1627 /* Check for cross-device rename */
1628 if ((fvp->v_mount != tdvp->v_mount) ||
1629 (tvp && (fvp->v_mount != tvp->v_mount))) {
1630 error = EXDEV;
1631 goto out;
1632 }
1633
1634 /*
1635 * We have to flush B_DELWRI data prior to renaming
1636 * the file. If we don't, the delayed-write buffers
1637 * can be flushed out later after the file has gone stale
1638 * under NFSV3. NFSV2 does not have this problem because
1639 * ( as far as I can tell ) it flushes dirty buffers more
1640 * often.
1641 */
1642
1643 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_proc);
1644 if (tvp)
1645 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_proc);
1646
1647 /*
1648 * If the tvp exists and is in use, sillyrename it before doing the
1649 * rename of the new file over it.
1650 * XXX Can't sillyrename a directory.
1651 */
1652 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1653 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1654 vput(tvp);
1655 tvp = NULL;
1656 }
1657
1658 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1659 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1660 tcnp->cn_proc);
1661
1662 if (fvp->v_type == VDIR) {
1663 if (tvp != NULL && tvp->v_type == VDIR)
1664 cache_purge(tdvp);
1665 cache_purge(fdvp);
1666 }
1667
1668out:
1669 if (tdvp == tvp)
1670 vrele(tdvp);
1671 else
1672 vput(tdvp);
1673 if (tvp)
1674 vput(tvp);
1675 vrele(fdvp);
1676 vrele(fvp);
1677 /*
1678 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1679 */
1680 if (error == ENOENT)
1681 error = 0;
1682 return (error);
1683}
1684
1685/*
1686 * nfs file rename rpc called from nfs_remove() above
1687 */
1688static int
1689nfs_renameit(sdvp, scnp, sp)
1690 struct vnode *sdvp;
1691 struct componentname *scnp;
1692 register struct sillyrename *sp;
1693{
1694 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1695 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc));
1696}
1697
1698/*
1699 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1700 */
1701static int
1702nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc)
1703 register struct vnode *fdvp;
1704 const char *fnameptr;
1705 int fnamelen;
1706 register struct vnode *tdvp;
1707 const char *tnameptr;
1708 int tnamelen;
1709 struct ucred *cred;
1710 struct proc *proc;
1711{
1712 register u_int32_t *tl;
1713 register caddr_t cp;
1714 register int32_t t1, t2;
1715 caddr_t bpos, dpos, cp2;
1716 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1717 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1718 int v3 = NFS_ISV3(fdvp);
1719
1720 nfsstats.rpccnt[NFSPROC_RENAME]++;
1721 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1722 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1723 nfsm_rndup(tnamelen));
1724 nfsm_fhtom(fdvp, v3);
1725 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1726 nfsm_fhtom(tdvp, v3);
1727 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1728 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred);
1729 if (v3) {
1730 nfsm_wcc_data(fdvp, fwccflag);
1731 nfsm_wcc_data(tdvp, twccflag);
1732 }
1733 nfsm_reqdone;
1734 VTONFS(fdvp)->n_flag |= NMODIFIED;
1735 VTONFS(tdvp)->n_flag |= NMODIFIED;
1736 if (!fwccflag)
1737 VTONFS(fdvp)->n_attrstamp = 0;
1738 if (!twccflag)
1739 VTONFS(tdvp)->n_attrstamp = 0;
1740 return (error);
1741}
1742
1743/*
1744 * nfs hard link create call
1745 */
1746static int
1747nfs_link(ap)
1748 struct vop_link_args /* {
1749 struct vnode *a_tdvp;
1750 struct vnode *a_vp;
1751 struct componentname *a_cnp;
1752 } */ *ap;
1753{
1754 register struct vnode *vp = ap->a_vp;
1755 register struct vnode *tdvp = ap->a_tdvp;
1756 register struct componentname *cnp = ap->a_cnp;
1757 register u_int32_t *tl;
1758 register caddr_t cp;
1759 register int32_t t1, t2;
1760 caddr_t bpos, dpos, cp2;
1761 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1762 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1763 int v3;
1764
1765 if (vp->v_mount != tdvp->v_mount) {
1766 return (EXDEV);
1767 }
1768
1769 /*
1770 * Push all writes to the server, so that the attribute cache
1771 * doesn't get "out of sync" with the server.
1772 * XXX There should be a better way!
1773 */
1774 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
1775
1776 v3 = NFS_ISV3(vp);
1777 nfsstats.rpccnt[NFSPROC_LINK]++;
1778 nfsm_reqhead(vp, NFSPROC_LINK,
1779 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1780 nfsm_fhtom(vp, v3);
1781 nfsm_fhtom(tdvp, v3);
1782 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1783 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
1784 if (v3) {
1785 nfsm_postop_attr(vp, attrflag);
1786 nfsm_wcc_data(tdvp, wccflag);
1787 }
1788 nfsm_reqdone;
1789 VTONFS(tdvp)->n_flag |= NMODIFIED;
1790 if (!attrflag)
1791 VTONFS(vp)->n_attrstamp = 0;
1792 if (!wccflag)
1793 VTONFS(tdvp)->n_attrstamp = 0;
1794 /*
1795 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1796 */
1797 if (error == EEXIST)
1798 error = 0;
1799 return (error);
1800}
1801
1802/*
1803 * nfs symbolic link create call
1804 */
1805static int
1806nfs_symlink(ap)
1807 struct vop_symlink_args /* {
1808 struct vnode *a_dvp;
1809 struct vnode **a_vpp;
1810 struct componentname *a_cnp;
1811 struct vattr *a_vap;
1812 char *a_target;
1813 } */ *ap;
1814{
1815 register struct vnode *dvp = ap->a_dvp;
1816 register struct vattr *vap = ap->a_vap;
1817 register struct componentname *cnp = ap->a_cnp;
1818 register struct nfsv2_sattr *sp;
1819 register u_int32_t *tl;
1820 register caddr_t cp;
1821 register int32_t t1, t2;
1822 caddr_t bpos, dpos, cp2;
1823 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1824 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1825 struct vnode *newvp = (struct vnode *)0;
1826 int v3 = NFS_ISV3(dvp);
1827
1828 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1829 slen = strlen(ap->a_target);
1830 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1831 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1832 nfsm_fhtom(dvp, v3);
1833 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1834 if (v3) {
1835 nfsm_v3attrbuild(vap, FALSE);
1836 }
1837 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1838 if (!v3) {
1839 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1840 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1841 sp->sa_uid = nfs_xdrneg1;
1842 sp->sa_gid = nfs_xdrneg1;
1843 sp->sa_size = nfs_xdrneg1;
1844 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1845 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1846 }
1847
1848 /*
1849 * Issue the NFS request and get the rpc response.
1850 *
1851 * Only NFSv3 responses returning an error of 0 actually return
1852 * a file handle that can be converted into newvp without having
1853 * to do an extra lookup rpc.
1854 */
1855 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred);
1856 if (v3) {
1857 if (error == 0)
1858 nfsm_mtofh(dvp, newvp, v3, gotvp);
1859 nfsm_wcc_data(dvp, wccflag);
1860 }
1861
1862 /*
1863 * out code jumps -> here, mrep is also freed.
1864 */
1865
1866 nfsm_reqdone;
1867
1868 /*
1869 * If we get an EEXIST error, silently convert it to no-error
1870 * in case of an NFS retry.
1871 */
1872 if (error == EEXIST)
1873 error = 0;
1874
1875 /*
1876 * If we do not have (or no longer have) an error, and we could
1877 * not extract the newvp from the response due to the request being
1878 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1879 * to obtain a newvp to return.
1880 */
1881 if (error == 0 && newvp == NULL) {
1882 struct nfsnode *np = NULL;
1883
1884 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1885 cnp->cn_cred, cnp->cn_proc, &np);
1886 if (!error)
1887 newvp = NFSTOV(np);
1888 }
1889 if (error) {
1890 if (newvp)
1891 vput(newvp);
1892 } else {
1893 *ap->a_vpp = newvp;
1894 }
1895 VTONFS(dvp)->n_flag |= NMODIFIED;
1896 if (!wccflag)
1897 VTONFS(dvp)->n_attrstamp = 0;
1898 return (error);
1899}
1900
1901/*
1902 * nfs make dir call
1903 */
1904static int
1905nfs_mkdir(ap)
1906 struct vop_mkdir_args /* {
1907 struct vnode *a_dvp;
1908 struct vnode **a_vpp;
1909 struct componentname *a_cnp;
1910 struct vattr *a_vap;
1911 } */ *ap;
1912{
1913 register struct vnode *dvp = ap->a_dvp;
1914 register struct vattr *vap = ap->a_vap;
1915 register struct componentname *cnp = ap->a_cnp;
1916 register struct nfsv2_sattr *sp;
1917 register u_int32_t *tl;
1918 register caddr_t cp;
1919 register int32_t t1, t2;
1920 register int len;
1921 struct nfsnode *np = (struct nfsnode *)0;
1922 struct vnode *newvp = (struct vnode *)0;
1923 caddr_t bpos, dpos, cp2;
1924 int error = 0, wccflag = NFSV3_WCCRATTR;
1925 int gotvp = 0;
1926 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1927 struct vattr vattr;
1928 int v3 = NFS_ISV3(dvp);
1929
1930 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1931 return (error);
1932 }
1933 len = cnp->cn_namelen;
1934 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1935 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1936 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1937 nfsm_fhtom(dvp, v3);
1938 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1939 if (v3) {
1940 nfsm_v3attrbuild(vap, FALSE);
1941 } else {
1942 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1943 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1944 sp->sa_uid = nfs_xdrneg1;
1945 sp->sa_gid = nfs_xdrneg1;
1946 sp->sa_size = nfs_xdrneg1;
1947 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1948 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1949 }
1950 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred);
1951 if (!error)
1952 nfsm_mtofh(dvp, newvp, v3, gotvp);
1953 if (v3)
1954 nfsm_wcc_data(dvp, wccflag);
1955 nfsm_reqdone;
1956 VTONFS(dvp)->n_flag |= NMODIFIED;
1957 if (!wccflag)
1958 VTONFS(dvp)->n_attrstamp = 0;
1959 /*
1960 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1961 * if we can succeed in looking up the directory.
1962 */
1963 if (error == EEXIST || (!error && !gotvp)) {
1964 if (newvp) {
1965 vrele(newvp);
1966 newvp = (struct vnode *)0;
1967 }
1968 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1969 cnp->cn_proc, &np);
1970 if (!error) {
1971 newvp = NFSTOV(np);
1972 if (newvp->v_type != VDIR)
1973 error = EEXIST;
1974 }
1975 }
1976 if (error) {
1977 if (newvp)
1978 vrele(newvp);
1979 } else
1980 *ap->a_vpp = newvp;
1981 return (error);
1982}
1983
1984/*
1985 * nfs remove directory call
1986 */
1987static int
1988nfs_rmdir(ap)
1989 struct vop_rmdir_args /* {
1990 struct vnode *a_dvp;
1991 struct vnode *a_vp;
1992 struct componentname *a_cnp;
1993 } */ *ap;
1994{
1995 register struct vnode *vp = ap->a_vp;
1996 register struct vnode *dvp = ap->a_dvp;
1997 register struct componentname *cnp = ap->a_cnp;
1998 register u_int32_t *tl;
1999 register caddr_t cp;
2000 register int32_t t1, t2;
2001 caddr_t bpos, dpos, cp2;
2002 int error = 0, wccflag = NFSV3_WCCRATTR;
2003 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2004 int v3 = NFS_ISV3(dvp);
2005
2006 if (dvp == vp)
2007 return (EINVAL);
2008 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2009 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2010 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2011 nfsm_fhtom(dvp, v3);
2012 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2013 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred);
2014 if (v3)
2015 nfsm_wcc_data(dvp, wccflag);
2016 nfsm_reqdone;
2017 VTONFS(dvp)->n_flag |= NMODIFIED;
2018 if (!wccflag)
2019 VTONFS(dvp)->n_attrstamp = 0;
2020 cache_purge(dvp);
2021 cache_purge(vp);
2022 /*
2023 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2024 */
2025 if (error == ENOENT)
2026 error = 0;
2027 return (error);
2028}
2029
2030/*
2031 * nfs readdir call
2032 */
2033static int
2034nfs_readdir(ap)
2035 struct vop_readdir_args /* {
2036 struct vnode *a_vp;
2037 struct uio *a_uio;
2038 struct ucred *a_cred;
2039 } */ *ap;
2040{
2041 register struct vnode *vp = ap->a_vp;
2042 register struct nfsnode *np = VTONFS(vp);
2043 register struct uio *uio = ap->a_uio;
2044 int tresid, error;
2045 struct vattr vattr;
2046
2047 if (vp->v_type != VDIR)
2048 return (EPERM);
2049 /*
2050 * First, check for hit on the EOF offset cache
2051 */
2052 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2053 (np->n_flag & NMODIFIED) == 0) {
2054 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
2055 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
2056 nfsstats.direofcache_hits++;
2057 return (0);
2058 }
2059 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
2060 np->n_mtime == vattr.va_mtime.tv_sec) {
2061 nfsstats.direofcache_hits++;
2062 return (0);
2063 }
2064 }
2065
2066 /*
2067 * Call nfs_bioread() to do the real work.
2068 */
2069 tresid = uio->uio_resid;
2070 error = nfs_bioread(vp, uio, 0, ap->a_cred);
2071
2072 if (!error && uio->uio_resid == tresid)
2073 nfsstats.direofcache_misses++;
2074 return (error);
2075}
2076
2077/*
2078 * Readdir rpc call.
2079 * Called from below the buffer cache by nfs_doio().
2080 */
2081int
2082nfs_readdirrpc(vp, uiop, cred)
2083 struct vnode *vp;
2084 register struct uio *uiop;
2085 struct ucred *cred;
2086
2087{
2088 register int len, left;
2089 register struct dirent *dp = NULL;
2090 register u_int32_t *tl;
2091 register caddr_t cp;
2092 register int32_t t1, t2;
2093 register nfsuint64 *cookiep;
2094 caddr_t bpos, dpos, cp2;
2095 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2096 nfsuint64 cookie;
2097 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2098 struct nfsnode *dnp = VTONFS(vp);
2099 u_quad_t fileno;
2100 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2101 int attrflag;
2102 int v3 = NFS_ISV3(vp);
2103
2104#ifndef DIAGNOSTIC
2105 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2106 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2107 panic("nfs readdirrpc bad uio");
2108#endif
2109
2110 /*
2111 * If there is no cookie, assume directory was stale.
2112 */
2113 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2114 if (cookiep)
2115 cookie = *cookiep;
2116 else
2117 return (NFSERR_BAD_COOKIE);
2118 /*
2119 * Loop around doing readdir rpc's of size nm_readdirsize
2120 * truncated to a multiple of DIRBLKSIZ.
2121 * The stopping criteria is EOF or buffer full.
2122 */
2123 while (more_dirs && bigenough) {
2124 nfsstats.rpccnt[NFSPROC_READDIR]++;
2125 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2126 NFSX_READDIR(v3));
2127 nfsm_fhtom(vp, v3);
2128 if (v3) {
2129 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2130 *tl++ = cookie.nfsuquad[0];
2131 *tl++ = cookie.nfsuquad[1];
2132 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2133 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2134 } else {
2135 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2136 *tl++ = cookie.nfsuquad[0];
2137 }
2138 *tl = txdr_unsigned(nmp->nm_readdirsize);
2139 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
2140 if (v3) {
2141 nfsm_postop_attr(vp, attrflag);
2142 if (!error) {
2143 nfsm_dissect(tl, u_int32_t *,
2144 2 * NFSX_UNSIGNED);
2145 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2146 dnp->n_cookieverf.nfsuquad[1] = *tl;
2147 } else {
2148 m_freem(mrep);
2149 goto nfsmout;
2150 }
2151 }
2152 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2153 more_dirs = fxdr_unsigned(int, *tl);
2154
2155 /* loop thru the dir entries, doctoring them to 4bsd form */
2156 while (more_dirs && bigenough) {
2157 if (v3) {
2158 nfsm_dissect(tl, u_int32_t *,
2159 3 * NFSX_UNSIGNED);
2160 fileno = fxdr_hyper(tl);
2161 len = fxdr_unsigned(int, *(tl + 2));
2162 } else {
2163 nfsm_dissect(tl, u_int32_t *,
2164 2 * NFSX_UNSIGNED);
2165 fileno = fxdr_unsigned(u_quad_t, *tl++);
2166 len = fxdr_unsigned(int, *tl);
2167 }
2168 if (len <= 0 || len > NFS_MAXNAMLEN) {
2169 error = EBADRPC;
2170 m_freem(mrep);
2171 goto nfsmout;
2172 }
2173 tlen = nfsm_rndup(len);
2174 if (tlen == len)
2175 tlen += 4; /* To ensure null termination */
2176 left = DIRBLKSIZ - blksiz;
2177 if ((tlen + DIRHDSIZ) > left) {
2178 dp->d_reclen += left;
2179 uiop->uio_iov->iov_base += left;
2180 uiop->uio_iov->iov_len -= left;
2181 uiop->uio_offset += left;
2182 uiop->uio_resid -= left;
2183 blksiz = 0;
2184 }
2185 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2186 bigenough = 0;
2187 if (bigenough) {
2188 dp = (struct dirent *)uiop->uio_iov->iov_base;
2189 dp->d_fileno = (int)fileno;
2190 dp->d_namlen = len;
2191 dp->d_reclen = tlen + DIRHDSIZ;
2192 dp->d_type = DT_UNKNOWN;
2193 blksiz += dp->d_reclen;
2194 if (blksiz == DIRBLKSIZ)
2195 blksiz = 0;
2196 uiop->uio_offset += DIRHDSIZ;
2197 uiop->uio_resid -= DIRHDSIZ;
2198 uiop->uio_iov->iov_base += DIRHDSIZ;
2199 uiop->uio_iov->iov_len -= DIRHDSIZ;
2200 nfsm_mtouio(uiop, len);
2201 cp = uiop->uio_iov->iov_base;
2202 tlen -= len;
2203 *cp = '\0'; /* null terminate */
2204 uiop->uio_iov->iov_base += tlen;
2205 uiop->uio_iov->iov_len -= tlen;
2206 uiop->uio_offset += tlen;
2207 uiop->uio_resid -= tlen;
2208 } else
2209 nfsm_adv(nfsm_rndup(len));
2210 if (v3) {
2211 nfsm_dissect(tl, u_int32_t *,
2212 3 * NFSX_UNSIGNED);
2213 } else {
2214 nfsm_dissect(tl, u_int32_t *,
2215 2 * NFSX_UNSIGNED);
2216 }
2217 if (bigenough) {
2218 cookie.nfsuquad[0] = *tl++;
2219 if (v3)
2220 cookie.nfsuquad[1] = *tl++;
2221 } else if (v3)
2222 tl += 2;
2223 else
2224 tl++;
2225 more_dirs = fxdr_unsigned(int, *tl);
2226 }
2227 /*
2228 * If at end of rpc data, get the eof boolean
2229 */
2230 if (!more_dirs) {
2231 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2232 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2233 }
2234 m_freem(mrep);
2235 }
2236 /*
2237 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2238 * by increasing d_reclen for the last record.
2239 */
2240 if (blksiz > 0) {
2241 left = DIRBLKSIZ - blksiz;
2242 dp->d_reclen += left;
2243 uiop->uio_iov->iov_base += left;
2244 uiop->uio_iov->iov_len -= left;
2245 uiop->uio_offset += left;
2246 uiop->uio_resid -= left;
2247 }
2248
2249 /*
2250 * We are now either at the end of the directory or have filled the
2251 * block.
2252 */
2253 if (bigenough)
2254 dnp->n_direofoffset = uiop->uio_offset;
2255 else {
2256 if (uiop->uio_resid > 0)
2257 printf("EEK! readdirrpc resid > 0\n");
2258 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2259 *cookiep = cookie;
2260 }
2261nfsmout:
2262 return (error);
2263}
2264
2265/*
2266 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2267 */
2268int
2269nfs_readdirplusrpc(vp, uiop, cred)
2270 struct vnode *vp;
2271 register struct uio *uiop;
2272 struct ucred *cred;
2273{
2274 register int len, left;
2275 register struct dirent *dp;
2276 register u_int32_t *tl;
2277 register caddr_t cp;
2278 register int32_t t1, t2;
2279 register struct vnode *newvp;
2280 register nfsuint64 *cookiep;
2281 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2282 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2283 struct nameidata nami, *ndp = &nami;
2284 struct componentname *cnp = &ndp->ni_cnd;
2285 nfsuint64 cookie;
2286 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2287 struct nfsnode *dnp = VTONFS(vp), *np;
2288 nfsfh_t *fhp;
2289 u_quad_t fileno;
2290 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2291 int attrflag, fhsize;
2292
2293#ifndef nolint
2294 dp = (struct dirent *)0;
2295#endif
2296#ifndef DIAGNOSTIC
2297 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2298 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2299 panic("nfs readdirplusrpc bad uio");
2300#endif
2301 ndp->ni_dvp = vp;
2302 newvp = NULLVP;
2303
2304 /*
2305 * If there is no cookie, assume directory was stale.
2306 */
2307 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2308 if (cookiep)
2309 cookie = *cookiep;
2310 else
2311 return (NFSERR_BAD_COOKIE);
2312 /*
2313 * Loop around doing readdir rpc's of size nm_readdirsize
2314 * truncated to a multiple of DIRBLKSIZ.
2315 * The stopping criteria is EOF or buffer full.
2316 */
2317 while (more_dirs && bigenough) {
2318 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2319 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2320 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2321 nfsm_fhtom(vp, 1);
2322 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2323 *tl++ = cookie.nfsuquad[0];
2324 *tl++ = cookie.nfsuquad[1];
2325 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2326 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2327 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2328 *tl = txdr_unsigned(nmp->nm_rsize);
2329 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
2330 nfsm_postop_attr(vp, attrflag);
2331 if (error) {
2332 m_freem(mrep);
2333 goto nfsmout;
2334 }
2335 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2336 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2337 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2338 more_dirs = fxdr_unsigned(int, *tl);
2339
2340 /* loop thru the dir entries, doctoring them to 4bsd form */
2341 while (more_dirs && bigenough) {
2342 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2343 fileno = fxdr_hyper(tl);
2344 len = fxdr_unsigned(int, *(tl + 2));
2345 if (len <= 0 || len > NFS_MAXNAMLEN) {
2346 error = EBADRPC;
2347 m_freem(mrep);
2348 goto nfsmout;
2349 }
2350 tlen = nfsm_rndup(len);
2351 if (tlen == len)
2352 tlen += 4; /* To ensure null termination*/
2353 left = DIRBLKSIZ - blksiz;
2354 if ((tlen + DIRHDSIZ) > left) {
2355 dp->d_reclen += left;
2356 uiop->uio_iov->iov_base += left;
2357 uiop->uio_iov->iov_len -= left;
2358 uiop->uio_offset += left;
2359 uiop->uio_resid -= left;
2360 blksiz = 0;
2361 }
2362 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2363 bigenough = 0;
2364 if (bigenough) {
2365 dp = (struct dirent *)uiop->uio_iov->iov_base;
2366 dp->d_fileno = (int)fileno;
2367 dp->d_namlen = len;
2368 dp->d_reclen = tlen + DIRHDSIZ;
2369 dp->d_type = DT_UNKNOWN;
2370 blksiz += dp->d_reclen;
2371 if (blksiz == DIRBLKSIZ)
2372 blksiz = 0;
2373 uiop->uio_offset += DIRHDSIZ;
2374 uiop->uio_resid -= DIRHDSIZ;
2375 uiop->uio_iov->iov_base += DIRHDSIZ;
2376 uiop->uio_iov->iov_len -= DIRHDSIZ;
2377 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2378 cnp->cn_namelen = len;
2379 nfsm_mtouio(uiop, len);
2380 cp = uiop->uio_iov->iov_base;
2381 tlen -= len;
2382 *cp = '\0';
2383 uiop->uio_iov->iov_base += tlen;
2384 uiop->uio_iov->iov_len -= tlen;
2385 uiop->uio_offset += tlen;
2386 uiop->uio_resid -= tlen;
2387 } else
2388 nfsm_adv(nfsm_rndup(len));
2389 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2390 if (bigenough) {
2391 cookie.nfsuquad[0] = *tl++;
2392 cookie.nfsuquad[1] = *tl++;
2393 } else
2394 tl += 2;
2395
2396 /*
2397 * Since the attributes are before the file handle
2398 * (sigh), we must skip over the attributes and then
2399 * come back and get them.
2400 */
2401 attrflag = fxdr_unsigned(int, *tl);
2402 if (attrflag) {
2403 dpossav1 = dpos;
2404 mdsav1 = md;
2405 nfsm_adv(NFSX_V3FATTR);
2406 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2407 doit = fxdr_unsigned(int, *tl);
2408 if (doit) {
2409 nfsm_getfh(fhp, fhsize, 1);
2410 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2411 VREF(vp);
2412 newvp = vp;
2413 np = dnp;
2414 } else {
2415 error = nfs_nget(vp->v_mount, fhp,
2416 fhsize, &np);
2417 if (error)
2418 doit = 0;
2419 else
2420 newvp = NFSTOV(np);
2421 }
2422 }
2423 if (doit && bigenough) {
2424 dpossav2 = dpos;
2425 dpos = dpossav1;
2426 mdsav2 = md;
2427 md = mdsav1;
2428 nfsm_loadattr(newvp, (struct vattr *)0);
2429 dpos = dpossav2;
2430 md = mdsav2;
2431 dp->d_type =
2432 IFTODT(VTTOIF(np->n_vattr.va_type));
2433 ndp->ni_vp = newvp;
2434 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2435 }
2436 } else {
2437 /* Just skip over the file handle */
2438 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2439 i = fxdr_unsigned(int, *tl);
2440 nfsm_adv(nfsm_rndup(i));
2441 }
2442 if (newvp != NULLVP) {
2443 if (newvp == vp)
2444 vrele(newvp);
2445 else
2446 vput(newvp);
2447 newvp = NULLVP;
2448 }
2449 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2450 more_dirs = fxdr_unsigned(int, *tl);
2451 }
2452 /*
2453 * If at end of rpc data, get the eof boolean
2454 */
2455 if (!more_dirs) {
2456 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2457 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2458 }
2459 m_freem(mrep);
2460 }
2461 /*
2462 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2463 * by increasing d_reclen for the last record.
2464 */
2465 if (blksiz > 0) {
2466 left = DIRBLKSIZ - blksiz;
2467 dp->d_reclen += left;
2468 uiop->uio_iov->iov_base += left;
2469 uiop->uio_iov->iov_len -= left;
2470 uiop->uio_offset += left;
2471 uiop->uio_resid -= left;
2472 }
2473
2474 /*
2475 * We are now either at the end of the directory or have filled the
2476 * block.
2477 */
2478 if (bigenough)
2479 dnp->n_direofoffset = uiop->uio_offset;
2480 else {
2481 if (uiop->uio_resid > 0)
2482 printf("EEK! readdirplusrpc resid > 0\n");
2483 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2484 *cookiep = cookie;
2485 }
2486nfsmout:
2487 if (newvp != NULLVP) {
2488 if (newvp == vp)
2489 vrele(newvp);
2490 else
2491 vput(newvp);
2492 newvp = NULLVP;
2493 }
2494 return (error);
2495}
2496
2497/*
2498 * Silly rename. To make the NFS filesystem that is stateless look a little
2499 * more like the "ufs" a remove of an active vnode is translated to a rename
2500 * to a funny looking filename that is removed by nfs_inactive on the
2501 * nfsnode. There is the potential for another process on a different client
2502 * to create the same funny name between the nfs_lookitup() fails and the
2503 * nfs_rename() completes, but...
2504 */
2505static int
2506nfs_sillyrename(dvp, vp, cnp)
2507 struct vnode *dvp, *vp;
2508 struct componentname *cnp;
2509{
2510 register struct sillyrename *sp;
2511 struct nfsnode *np;
2512 int error;
2513 short pid;
2514
2515 cache_purge(dvp);
2516 np = VTONFS(vp);
2517#ifndef DIAGNOSTIC
2518 if (vp->v_type == VDIR)
2519 panic("nfs: sillyrename dir");
2520#endif
2521 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2522 M_NFSREQ, M_WAITOK);
2523 sp->s_cred = crdup(cnp->cn_cred);
2524 sp->s_dvp = dvp;
2525 VREF(dvp);
2526
2527 /* Fudge together a funny name */
2528 pid = cnp->cn_proc->p_pid;
2529 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2530
2531 /* Try lookitups until we get one that isn't there */
2532 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2533 cnp->cn_proc, (struct nfsnode **)0) == 0) {
2534 sp->s_name[4]++;
2535 if (sp->s_name[4] > 'z') {
2536 error = EINVAL;
2537 goto bad;
2538 }
2539 }
2540 error = nfs_renameit(dvp, cnp, sp);
2541 if (error)
2542 goto bad;
2543 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2544 cnp->cn_proc, &np);
2545 np->n_sillyrename = sp;
2546 return (0);
2547bad:
2548 vrele(sp->s_dvp);
2549 crfree(sp->s_cred);
2550 free((caddr_t)sp, M_NFSREQ);
2551 return (error);
2552}
2553
2554/*
2555 * Look up a file name and optionally either update the file handle or
2556 * allocate an nfsnode, depending on the value of npp.
2557 * npp == NULL --> just do the lookup
2558 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2559 * handled too
2560 * *npp != NULL --> update the file handle in the vnode
2561 */
2562static int
2563nfs_lookitup(dvp, name, len, cred, procp, npp)
2564 register struct vnode *dvp;
2565 const char *name;
2566 int len;
2567 struct ucred *cred;
2568 struct proc *procp;
2569 struct nfsnode **npp;
2570{
2571 register u_int32_t *tl;
2572 register caddr_t cp;
2573 register int32_t t1, t2;
2574 struct vnode *newvp = (struct vnode *)0;
2575 struct nfsnode *np, *dnp = VTONFS(dvp);
2576 caddr_t bpos, dpos, cp2;
2577 int error = 0, fhlen, attrflag;
2578 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2579 nfsfh_t *nfhp;
2580 int v3 = NFS_ISV3(dvp);
2581
2582 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2583 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2584 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2585 nfsm_fhtom(dvp, v3);
2586 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2587 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred);
2588 if (npp && !error) {
2589 nfsm_getfh(nfhp, fhlen, v3);
2590 if (*npp) {
2591 np = *npp;
2592 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2593 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2594 np->n_fhp = &np->n_fh;
2595 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2596 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2597 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2598 np->n_fhsize = fhlen;
2599 newvp = NFSTOV(np);
2600 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2601 VREF(dvp);
2602 newvp = dvp;
2603 } else {
2604 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2605 if (error) {
2606 m_freem(mrep);
2607 return (error);
2608 }
2609 newvp = NFSTOV(np);
2610 }
2611 if (v3) {
2612 nfsm_postop_attr(newvp, attrflag);
2613 if (!attrflag && *npp == NULL) {
2614 m_freem(mrep);
2615 if (newvp == dvp)
2616 vrele(newvp);
2617 else
2618 vput(newvp);
2619 return (ENOENT);
2620 }
2621 } else
2622 nfsm_loadattr(newvp, (struct vattr *)0);
2623 }
2624 nfsm_reqdone;
2625 if (npp && *npp == NULL) {
2626 if (error) {
2627 if (newvp) {
2628 if (newvp == dvp)
2629 vrele(newvp);
2630 else
2631 vput(newvp);
2632 }
2633 } else
2634 *npp = np;
2635 }
2636 return (error);
2637}
2638
2639/*
2640 * Nfs Version 3 commit rpc
2641 */
2642int
2643nfs_commit(vp, offset, cnt, cred, procp)
2644 struct vnode *vp;
2645 u_quad_t offset;
2646 int cnt;
2647 struct ucred *cred;
2648 struct proc *procp;
2649{
2650 register caddr_t cp;
2651 register u_int32_t *tl;
2652 register int32_t t1, t2;
2653 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2654 caddr_t bpos, dpos, cp2;
2655 int error = 0, wccflag = NFSV3_WCCRATTR;
2656 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2657
2658 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2659 return (0);
2660 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2661 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2662 nfsm_fhtom(vp, 1);
2663 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2664 txdr_hyper(offset, tl);
2665 tl += 2;
2666 *tl = txdr_unsigned(cnt);
2667 nfsm_request(vp, NFSPROC_COMMIT, procp, cred);
2668 nfsm_wcc_data(vp, wccflag);
2669 if (!error) {
2670 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2671 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2672 NFSX_V3WRITEVERF)) {
2673 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2674 NFSX_V3WRITEVERF);
2675 error = NFSERR_STALEWRITEVERF;
2676 }
2677 }
2678 nfsm_reqdone;
2679 return (error);
2680}
2681
2682/*
2683 * Kludge City..
2684 * - make nfs_bmap() essentially a no-op that does no translation
2685 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2686 * (Maybe I could use the process's page mapping, but I was concerned that
2687 * Kernel Write might not be enabled and also figured copyout() would do
2688 * a lot more work than bcopy() and also it currently happens in the
2689 * context of the swapper process (2).
2690 */
2691static int
2692nfs_bmap(ap)
2693 struct vop_bmap_args /* {
2694 struct vnode *a_vp;
2695 daddr_t a_bn;
2696 struct vnode **a_vpp;
2697 daddr_t *a_bnp;
2698 int *a_runp;
2699 int *a_runb;
2700 } */ *ap;
2701{
2702 register struct vnode *vp = ap->a_vp;
2703
2704 if (ap->a_vpp != NULL)
2705 *ap->a_vpp = vp;
2706 if (ap->a_bnp != NULL)
2707 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2708 if (ap->a_runp != NULL)
2709 *ap->a_runp = 0;
2710 if (ap->a_runb != NULL)
2711 *ap->a_runb = 0;
2712 return (0);
2713}
2714
2715/*
2716 * Strategy routine.
2717 * For async requests when nfsiod(s) are running, queue the request by
2718 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2719 * request.
2720 */
2721static int
2722nfs_strategy(ap)
2723 struct vop_strategy_args *ap;
2724{
2725 register struct buf *bp = ap->a_bp;
2726 struct ucred *cr;
2727 struct proc *p;
2728 int error = 0;
2729
2730 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2731 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2732
2733 if (bp->b_flags & B_PHYS)
2734 panic("nfs physio");
2735
2736 if (bp->b_flags & B_ASYNC)
2737 p = (struct proc *)0;
2738 else
2739 p = curproc; /* XXX */
2740
2741 if (bp->b_flags & B_READ)
2742 cr = bp->b_rcred;
2743 else
2744 cr = bp->b_wcred;
2745
2746 /*
2747 * If the op is asynchronous and an i/o daemon is waiting
2748 * queue the request, wake it up and wait for completion
2749 * otherwise just do it ourselves.
2750 */
2751 if ((bp->b_flags & B_ASYNC) == 0 ||
2752 nfs_asyncio(bp, NOCRED, p))
2753 error = nfs_doio(bp, cr, p);
2754 return (error);
2755}
2756
2757/*
2758 * Mmap a file
2759 *
2760 * NB Currently unsupported.
2761 */
2762/* ARGSUSED */
2763static int
2764nfs_mmap(ap)
2765 struct vop_mmap_args /* {
2766 struct vnode *a_vp;
2767 int a_fflags;
2768 struct ucred *a_cred;
2769 struct proc *a_p;
2770 } */ *ap;
2771{
2772
2773 return (EINVAL);
2774}
2775
2776/*
2777 * fsync vnode op. Just call nfs_flush() with commit == 1.
2778 */
2779/* ARGSUSED */
2780static int
2781nfs_fsync(ap)
2782 struct vop_fsync_args /* {
2783 struct vnodeop_desc *a_desc;
2784 struct vnode * a_vp;
2785 struct ucred * a_cred;
2786 int a_waitfor;
2787 struct proc * a_p;
2788 } */ *ap;
2789{
2790
2791 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2792}
2793
2794/*
2795 * Flush all the blocks associated with a vnode.
2796 * Walk through the buffer pool and push any dirty pages
2797 * associated with the vnode.
2798 */
2799static int
2800nfs_flush(vp, cred, waitfor, p, commit)
2801 register struct vnode *vp;
2802 struct ucred *cred;
2803 int waitfor;
2804 struct proc *p;
2805 int commit;
2806{
2807 register struct nfsnode *np = VTONFS(vp);
2808 register struct buf *bp;
2809 register int i;
2810 struct buf *nbp;
2811 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2812 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2813 int passone = 1;
2814 u_quad_t off, endoff, toff;
2815 struct ucred* wcred = NULL;
2816 struct buf **bvec = NULL;
2817#ifndef NFS_COMMITBVECSIZ
2818#define NFS_COMMITBVECSIZ 20
2819#endif
2820 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2821 int bvecsize = 0, bveccount;
2822
2823 if (nmp->nm_flag & NFSMNT_INT)
2824 slpflag = PCATCH;
2825 if (!commit)
2826 passone = 0;
2827 /*
2828 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2829 * server, but nas not been committed to stable storage on the server
2830 * yet. On the first pass, the byte range is worked out and the commit
2831 * rpc is done. On the second pass, nfs_writebp() is called to do the
2832 * job.
2833 */
2834again:
2835 off = (u_quad_t)-1;
2836 endoff = 0;
2837 bvecpos = 0;
2838 if (NFS_ISV3(vp) && commit) {
2839 s = splbio();
2840 /*
2841 * Count up how many buffers waiting for a commit.
2842 */
2843 bveccount = 0;
2844 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2845 nbp = TAILQ_NEXT(bp, b_vnbufs);
2846 if (BUF_REFCNT(bp) == 0 &&
2847 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2848 == (B_DELWRI | B_NEEDCOMMIT))
2849 bveccount++;
2850 }
2851 /*
2852 * Allocate space to remember the list of bufs to commit. It is
2853 * important to use M_NOWAIT here to avoid a race with nfs_write.
2854 * If we can't get memory (for whatever reason), we will end up
2855 * committing the buffers one-by-one in the loop below.
2856 */
2857 if (bvec != NULL && bvec != bvec_on_stack)
2858 free(bvec, M_TEMP);
2859 if (bveccount > NFS_COMMITBVECSIZ) {
2860 bvec = (struct buf **)
2861 malloc(bveccount * sizeof(struct buf *),
2862 M_TEMP, M_NOWAIT);
2863 if (bvec == NULL) {
2864 bvec = bvec_on_stack;
2865 bvecsize = NFS_COMMITBVECSIZ;
2866 } else
2867 bvecsize = bveccount;
2868 } else {
2869 bvec = bvec_on_stack;
2870 bvecsize = NFS_COMMITBVECSIZ;
2871 }
2872 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2873 nbp = TAILQ_NEXT(bp, b_vnbufs);
2874 if (bvecpos >= bvecsize)
2875 break;
2876 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2877 (B_DELWRI | B_NEEDCOMMIT) ||
2878 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2879 continue;
2880 bremfree(bp);
2881 /*
2882 * Work out if all buffers are using the same cred
2883 * so we can deal with them all with one commit.
2884 *
2885 * NOTE: we are not clearing B_DONE here, so we have
2886 * to do it later on in this routine if we intend to
2887 * initiate I/O on the bp.
2888 *
2889 * Note: to avoid loopback deadlocks, we do not
2890 * assign b_runningbufspace.
2891 */
2892 if (wcred == NULL)
2893 wcred = bp->b_wcred;
2894 else if (wcred != bp->b_wcred)
2895 wcred = NOCRED;
2896 bp->b_flags |= B_WRITEINPROG;
2897 vfs_busy_pages(bp, 1);
2898
2899 /*
2900 * bp is protected by being locked, but nbp is not
2901 * and vfs_busy_pages() may sleep. We have to
2902 * recalculate nbp.
2903 */
2904 nbp = TAILQ_NEXT(bp, b_vnbufs);
2905
2906 /*
2907 * A list of these buffers is kept so that the
2908 * second loop knows which buffers have actually
2909 * been committed. This is necessary, since there
2910 * may be a race between the commit rpc and new
2911 * uncommitted writes on the file.
2912 */
2913 bvec[bvecpos++] = bp;
2914 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2915 bp->b_dirtyoff;
2916 if (toff < off)
2917 off = toff;
2918 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2919 if (toff > endoff)
2920 endoff = toff;
2921 }
2922 splx(s);
2923 }
2924 if (bvecpos > 0) {
2925 /*
2926 * Commit data on the server, as required.
2927 * If all bufs are using the same wcred, then use that with
2928 * one call for all of them, otherwise commit each one
2929 * separately.
2930 */
2931 if (wcred != NOCRED)
2932 retv = nfs_commit(vp, off, (int)(endoff - off),
2933 wcred, p);
2934 else {
2935 retv = 0;
2936 for (i = 0; i < bvecpos; i++) {
2937 off_t off, size;
2938 bp = bvec[i];
2939 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2940 bp->b_dirtyoff;
2941 size = (u_quad_t)(bp->b_dirtyend
2942 - bp->b_dirtyoff);
2943 retv = nfs_commit(vp, off, (int)size,
2944 bp->b_wcred, p);
2945 if (retv) break;
2946 }
2947 }
2948
2949 if (retv == NFSERR_STALEWRITEVERF)
2950 nfs_clearcommit(vp->v_mount);
2951
2952 /*
2953 * Now, either mark the blocks I/O done or mark the
2954 * blocks dirty, depending on whether the commit
2955 * succeeded.
2956 */
2957 for (i = 0; i < bvecpos; i++) {
2958 bp = bvec[i];
2959 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2960 if (retv) {
2961 /*
2962 * Error, leave B_DELWRI intact
2963 */
2964 vfs_unbusy_pages(bp);
2965 brelse(bp);
2966 } else {
2967 /*
2968 * Success, remove B_DELWRI ( bundirty() ).
2969 *
2970 * b_dirtyoff/b_dirtyend seem to be NFS
2971 * specific. We should probably move that
2972 * into bundirty(). XXX
2973 */
2974 s = splbio();
2975 vp->v_numoutput++;
2976 bp->b_flags |= B_ASYNC;
2977 bundirty(bp);
2978 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2979 bp->b_dirtyoff = bp->b_dirtyend = 0;
2980 splx(s);
2981 biodone(bp);
2982 }
2983 }
2984 }
2985
2986 /*
2987 * Start/do any write(s) that are required.
2988 */
2989loop:
2990 s = splbio();
2991 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2992 nbp = TAILQ_NEXT(bp, b_vnbufs);
2993 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2994 if (waitfor != MNT_WAIT || passone)
2995 continue;
2996 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2997 "nfsfsync", slpflag, slptimeo);
2998 splx(s);
2999 if (error == 0)
3000 panic("nfs_fsync: inconsistent lock");
3001 if (error == ENOLCK)
3002 goto loop;
3003 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
3004 error = EINTR;
3005 goto done;
3006 }
3007 if (slpflag == PCATCH) {
3008 slpflag = 0;
3009 slptimeo = 2 * hz;
3010 }
3011 goto loop;
3012 }
3013 if ((bp->b_flags & B_DELWRI) == 0)
3014 panic("nfs_fsync: not dirty");
3015 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
3016 BUF_UNLOCK(bp);
3017 continue;
3018 }
3019 bremfree(bp);
3020 if (passone || !commit)
3021 bp->b_flags |= B_ASYNC;
3022 else
3023 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
3024 splx(s);
3025 VOP_BWRITE(bp->b_vp, bp);
3026 goto loop;
3027 }
3028 splx(s);
3029 if (passone) {
3030 passone = 0;
3031 goto again;
3032 }
3033 if (waitfor == MNT_WAIT) {
3034 while (vp->v_numoutput) {
3035 vp->v_flag |= VBWAIT;
3036 error = tsleep((caddr_t)&vp->v_numoutput,
3037 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
3038 if (error) {
3039 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
3040 error = EINTR;
3041 goto done;
3042 }
3043 if (slpflag == PCATCH) {
3044 slpflag = 0;
3045 slptimeo = 2 * hz;
3046 }
3047 }
3048 }
3049 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
3050 goto loop;
3051 }
3052 }
3053 if (np->n_flag & NWRITEERR) {
3054 error = np->n_error;
3055 np->n_flag &= ~NWRITEERR;
3056 }
3057done:
3058 if (bvec != NULL && bvec != bvec_on_stack)
3059 free(bvec, M_TEMP);
3060 return (error);
3061}
3062
3063/*
3064 * NFS advisory byte-level locks.
3065 * Currently unsupported.
3066 */
3067static int
3068nfs_advlock(ap)
3069 struct vop_advlock_args /* {
3070 struct vnode *a_vp;
3071 caddr_t a_id;
3072 int a_op;
3073 struct flock *a_fl;
3074 int a_flags;
3075 } */ *ap;
3076{
3077 register struct nfsnode *np = VTONFS(ap->a_vp);
3078
3079 /*
3080 * The following kludge is to allow diskless support to work
3081 * until a real NFS lockd is implemented. Basically, just pretend
3082 * that this is a local lock.
3083 */
3084 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3085}
3086
3087/*
3088 * Print out the contents of an nfsnode.
3089 */
3090static int
3091nfs_print(ap)
3092 struct vop_print_args /* {
3093 struct vnode *a_vp;
3094 } */ *ap;
3095{
3096 register struct vnode *vp = ap->a_vp;
3097 register struct nfsnode *np = VTONFS(vp);
3098
3099 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3100 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3101 if (vp->v_type == VFIFO)
3102 fifo_printinfo(vp);
3103 printf("\n");
3104 return (0);
3105}
3106
3107/*
3108 * Just call nfs_writebp() with the force argument set to 1.
3109 *
3110 * NOTE: B_DONE may or may not be set in a_bp on call.
3111 */
3112static int
3113nfs_bwrite(ap)
3114 struct vop_bwrite_args /* {
3115 struct vnode *a_bp;
3116 } */ *ap;
3117{
3118 return (nfs_writebp(ap->a_bp, 1, curproc));
3119}
3120
3121/*
3122 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3123 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3124 * B_CACHE if this is a VMIO buffer.
3125 */
3126int
3127nfs_writebp(bp, force, procp)
3128 register struct buf *bp;
3129 int force;
3130 struct proc *procp;
3131{
3132 int s;
3133 int oldflags = bp->b_flags;
3134#if 0
3135 int retv = 1;
3136 off_t off;
3137#endif
3138
3139 if (BUF_REFCNT(bp) == 0)
3140 panic("bwrite: buffer is not locked???");
3141
3142 if (bp->b_flags & B_INVAL) {
3143 brelse(bp);
3144 return(0);
3145 }
3146
3147 bp->b_flags |= B_CACHE;
3148
3149 /*
3150 * Undirty the bp. We will redirty it later if the I/O fails.
3151 */
3152
3153 s = splbio();
3154 bundirty(bp);
3155 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3156
3157 bp->b_vp->v_numoutput++;
3158 curproc->p_stats->p_ru.ru_oublock++;
3159 splx(s);
3160
3161 /*
3162 * Note: to avoid loopback deadlocks, we do not
3163 * assign b_runningbufspace.
3164 */
3165 vfs_busy_pages(bp, 1);
3166
3167 if (force)
3168 bp->b_flags |= B_WRITEINPROG;
3169 BUF_KERNPROC(bp);
3170 VOP_STRATEGY(bp->b_vp, bp);
3171
3172 if( (oldflags & B_ASYNC) == 0) {
3173 int rtval = biowait(bp);
3174
3175 if (oldflags & B_DELWRI) {
3176 s = splbio();
3177 reassignbuf(bp, bp->b_vp);
3178 splx(s);
3179 }
3180
3181 brelse(bp);
3182 return (rtval);
3183 }
3184
3185 return (0);
3186}
3187
3188/*
3189 * nfs special file access vnode op.
3190 * Essentially just get vattr and then imitate iaccess() since the device is
3191 * local to the client.
3192 */
3193static int
3194nfsspec_access(ap)
3195 struct vop_access_args /* {
3196 struct vnode *a_vp;
3197 int a_mode;
3198 struct ucred *a_cred;
3199 struct proc *a_p;
3200 } */ *ap;
3201{
3202 register struct vattr *vap;
3203 register gid_t *gp;
3204 register struct ucred *cred = ap->a_cred;
3205 struct vnode *vp = ap->a_vp;
3206 mode_t mode = ap->a_mode;
3207 struct vattr vattr;
3208 register int i;
3209 int error;
3210
3211 /*
3212 * Disallow write attempts on filesystems mounted read-only;
3213 * unless the file is a socket, fifo, or a block or character
3214 * device resident on the filesystem.
3215 */
3216 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3217 switch (vp->v_type) {
3218 case VREG:
3219 case VDIR:
3220 case VLNK:
3221 return (EROFS);
3222 default:
3223 break;
3224 }
3225 }
3226 /*
3227 * If you're the super-user,
3228 * you always get access.
3229 */
3230 if (cred->cr_uid == 0)
3231 return (0);
3232 vap = &vattr;
3233 error = VOP_GETATTR(vp, vap, cred, ap->a_p);
3234 if (error)
3235 return (error);
3236 /*
3237 * Access check is based on only one of owner, group, public.
3238 * If not owner, then check group. If not a member of the
3239 * group, then check public access.
3240 */
3241 if (cred->cr_uid != vap->va_uid) {
3242 mode >>= 3;
3243 gp = cred->cr_groups;
3244 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3245 if (vap->va_gid == *gp)
3246 goto found;
3247 mode >>= 3;
3248found:
3249 ;
3250 }
3251 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3252 return (error);
3253}
3254
3255/*
3256 * Read wrapper for special devices.
3257 */
3258static int
3259nfsspec_read(ap)
3260 struct vop_read_args /* {
3261 struct vnode *a_vp;
3262 struct uio *a_uio;
3263 int a_ioflag;
3264 struct ucred *a_cred;
3265 } */ *ap;
3266{
3267 register struct nfsnode *np = VTONFS(ap->a_vp);
3268
3269 /*
3270 * Set access flag.
3271 */
3272 np->n_flag |= NACC;
3273 getnanotime(&np->n_atim);
3274 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3275}
3276
3277/*
3278 * Write wrapper for special devices.
3279 */
3280static int
3281nfsspec_write(ap)
3282 struct vop_write_args /* {
3283 struct vnode *a_vp;
3284 struct uio *a_uio;
3285 int a_ioflag;
3286 struct ucred *a_cred;
3287 } */ *ap;
3288{
3289 register struct nfsnode *np = VTONFS(ap->a_vp);
3290
3291 /*
3292 * Set update flag.
3293 */
3294 np->n_flag |= NUPD;
3295 getnanotime(&np->n_mtim);
3296 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3297}
3298
3299/*
3300 * Close wrapper for special devices.
3301 *
3302 * Update the times on the nfsnode then do device close.
3303 */
3304static int
3305nfsspec_close(ap)
3306 struct vop_close_args /* {
3307 struct vnode *a_vp;
3308 int a_fflag;
3309 struct ucred *a_cred;
3310 struct proc *a_p;
3311 } */ *ap;
3312{
3313 register struct vnode *vp = ap->a_vp;
3314 register struct nfsnode *np = VTONFS(vp);
3315 struct vattr vattr;
3316
3317 if (np->n_flag & (NACC | NUPD)) {
3318 np->n_flag |= NCHG;
3319 if (vp->v_usecount == 1 &&
3320 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3321 VATTR_NULL(&vattr);
3322 if (np->n_flag & NACC)
3323 vattr.va_atime = np->n_atim;
3324 if (np->n_flag & NUPD)
3325 vattr.va_mtime = np->n_mtim;
3326 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3327 }
3328 }
3329 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3330}
3331
3332/*
3333 * Read wrapper for fifos.
3334 */
3335static int
3336nfsfifo_read(ap)
3337 struct vop_read_args /* {
3338 struct vnode *a_vp;
3339 struct uio *a_uio;
3340 int a_ioflag;
3341 struct ucred *a_cred;
3342 } */ *ap;
3343{
3344 register struct nfsnode *np = VTONFS(ap->a_vp);
3345
3346 /*
3347 * Set access flag.
3348 */
3349 np->n_flag |= NACC;
3350 getnanotime(&np->n_atim);
3351 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3352}
3353
3354/*
3355 * Write wrapper for fifos.
3356 */
3357static int
3358nfsfifo_write(ap)
3359 struct vop_write_args /* {
3360 struct vnode *a_vp;
3361 struct uio *a_uio;
3362 int a_ioflag;
3363 struct ucred *a_cred;
3364 } */ *ap;
3365{
3366 register struct nfsnode *np = VTONFS(ap->a_vp);
3367
3368 /*
3369 * Set update flag.
3370 */
3371 np->n_flag |= NUPD;
3372 getnanotime(&np->n_mtim);
3373 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3374}
3375
3376/*
3377 * Close wrapper for fifos.
3378 *
3379 * Update the times on the nfsnode then do fifo close.
3380 */
3381static int
3382nfsfifo_close(ap)
3383 struct vop_close_args /* {
3384 struct vnode *a_vp;
3385 int a_fflag;
3386 struct ucred *a_cred;
3387 struct proc *a_p;
3388 } */ *ap;
3389{
3390 register struct vnode *vp = ap->a_vp;
3391 register struct nfsnode *np = VTONFS(vp);
3392 struct vattr vattr;
3393 struct timespec ts;
3394
3395 if (np->n_flag & (NACC | NUPD)) {
3396 getnanotime(&ts);
3397 if (np->n_flag & NACC)
3398 np->n_atim = ts;
3399 if (np->n_flag & NUPD)
3400 np->n_mtim = ts;
3401 np->n_flag |= NCHG;
3402 if (vp->v_usecount == 1 &&
3403 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3404 VATTR_NULL(&vattr);
3405 if (np->n_flag & NACC)
3406 vattr.va_atime = np->n_atim;
3407 if (np->n_flag & NUPD)
3408 vattr.va_mtime = np->n_mtim;
3409 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3410 }
3411 }
3412 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3413}
3414