Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
38 */
39
40
41/*
42 * vnode op calls for Sun NFS version 2 and 3
43 */
44
45#include "opt_inet.h"
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/resourcevar.h>
51#include <sys/proc.h>
52#include <sys/mount.h>
53#include <sys/buf.h>
54#include <sys/malloc.h>
55#include <sys/mbuf.h>
56#include <sys/namei.h>
57#include <sys/socket.h>
58#include <sys/vnode.h>
59#include <sys/dirent.h>
60#include <sys/fcntl.h>
61#include <sys/lockf.h>
62#include <sys/stat.h>
63#include <sys/sysctl.h>
64#include <sys/conf.h>
65
66#include <vm/vm.h>
67#include <vm/vm_extern.h>
68#include <vm/vm_zone.h>
69
70#include <miscfs/fifofs/fifo.h>
71
72#include <nfs/rpcv2.h>
73#include <nfs/nfsproto.h>
74#include <nfs/nfs.h>
75#include <nfs/nfsnode.h>
76#include <nfs/nfsmount.h>
77#include <nfs/xdr_subs.h>
78#include <nfs/nfsm_subs.h>
79#include <nfs/nqnfs.h>
80
81#include <net/if.h>
82#include <netinet/in.h>
83#include <netinet/in_var.h>
84
85/* Defs */
86#define TRUE 1
87#define FALSE 0
88
89/*
90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
91 * calls are not in getblk() and brelse() so that they would not be necessary
92 * here.
93 */
94#ifndef B_VMIO
95#define vfs_busy_pages(bp, f)
96#endif
97
98static int nfsspec_read __P((struct vop_read_args *));
99static int nfsspec_write __P((struct vop_write_args *));
100static int nfsfifo_read __P((struct vop_read_args *));
101static int nfsfifo_write __P((struct vop_write_args *));
102static int nfsspec_close __P((struct vop_close_args *));
103static int nfsfifo_close __P((struct vop_close_args *));
104#define nfs_poll vop_nopoll
105static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int));
106static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *));
107static int nfs_lookup __P((struct vop_lookup_args *));
108static int nfs_create __P((struct vop_create_args *));
109static int nfs_mknod __P((struct vop_mknod_args *));
110static int nfs_open __P((struct vop_open_args *));
111static int nfs_close __P((struct vop_close_args *));
112static int nfs_access __P((struct vop_access_args *));
113static int nfs_getattr __P((struct vop_getattr_args *));
114static int nfs_setattr __P((struct vop_setattr_args *));
115static int nfs_read __P((struct vop_read_args *));
116static int nfs_mmap __P((struct vop_mmap_args *));
117static int nfs_fsync __P((struct vop_fsync_args *));
118static int nfs_remove __P((struct vop_remove_args *));
119static int nfs_link __P((struct vop_link_args *));
120static int nfs_rename __P((struct vop_rename_args *));
121static int nfs_mkdir __P((struct vop_mkdir_args *));
122static int nfs_rmdir __P((struct vop_rmdir_args *));
123static int nfs_symlink __P((struct vop_symlink_args *));
124static int nfs_readdir __P((struct vop_readdir_args *));
125static int nfs_bmap __P((struct vop_bmap_args *));
126static int nfs_strategy __P((struct vop_strategy_args *));
127static int nfs_lookitup __P((struct vnode *, const char *, int,
128 struct ucred *, struct proc *, struct nfsnode **));
129static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
130static int nfsspec_access __P((struct vop_access_args *));
131static int nfs_readlink __P((struct vop_readlink_args *));
132static int nfs_print __P((struct vop_print_args *));
133static int nfs_advlock __P((struct vop_advlock_args *));
134static int nfs_bwrite __P((struct vop_bwrite_args *));
135/*
136 * Global vfs data structures for nfs
137 */
138vop_t **nfsv2_vnodeop_p;
139static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
140 { &vop_default_desc, (vop_t *) vop_defaultop },
141 { &vop_access_desc, (vop_t *) nfs_access },
142 { &vop_advlock_desc, (vop_t *) nfs_advlock },
143 { &vop_bmap_desc, (vop_t *) nfs_bmap },
144 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
145 { &vop_close_desc, (vop_t *) nfs_close },
146 { &vop_create_desc, (vop_t *) nfs_create },
147 { &vop_fsync_desc, (vop_t *) nfs_fsync },
148 { &vop_getattr_desc, (vop_t *) nfs_getattr },
149 { &vop_getpages_desc, (vop_t *) nfs_getpages },
150 { &vop_putpages_desc, (vop_t *) nfs_putpages },
151 { &vop_inactive_desc, (vop_t *) nfs_inactive },
152 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
153 { &vop_lease_desc, (vop_t *) vop_null },
154 { &vop_link_desc, (vop_t *) nfs_link },
155 { &vop_lock_desc, (vop_t *) vop_sharedlock },
156 { &vop_lookup_desc, (vop_t *) nfs_lookup },
157 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
158 { &vop_mknod_desc, (vop_t *) nfs_mknod },
159 { &vop_mmap_desc, (vop_t *) nfs_mmap },
160 { &vop_open_desc, (vop_t *) nfs_open },
161 { &vop_poll_desc, (vop_t *) nfs_poll },
162 { &vop_print_desc, (vop_t *) nfs_print },
163 { &vop_read_desc, (vop_t *) nfs_read },
164 { &vop_readdir_desc, (vop_t *) nfs_readdir },
165 { &vop_readlink_desc, (vop_t *) nfs_readlink },
166 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
167 { &vop_remove_desc, (vop_t *) nfs_remove },
168 { &vop_rename_desc, (vop_t *) nfs_rename },
169 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
170 { &vop_setattr_desc, (vop_t *) nfs_setattr },
171 { &vop_strategy_desc, (vop_t *) nfs_strategy },
172 { &vop_symlink_desc, (vop_t *) nfs_symlink },
173 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
174 { &vop_write_desc, (vop_t *) nfs_write },
175 { NULL, NULL }
176};
177static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
178 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
179VNODEOP_SET(nfsv2_vnodeop_opv_desc);
180
181/*
182 * Special device vnode ops
183 */
184vop_t **spec_nfsv2nodeop_p;
185static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
186 { &vop_default_desc, (vop_t *) spec_vnoperate },
187 { &vop_access_desc, (vop_t *) nfsspec_access },
188 { &vop_close_desc, (vop_t *) nfsspec_close },
189 { &vop_fsync_desc, (vop_t *) nfs_fsync },
190 { &vop_getattr_desc, (vop_t *) nfs_getattr },
191 { &vop_inactive_desc, (vop_t *) nfs_inactive },
192 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
193 { &vop_lock_desc, (vop_t *) vop_sharedlock },
194 { &vop_print_desc, (vop_t *) nfs_print },
195 { &vop_read_desc, (vop_t *) nfsspec_read },
196 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
197 { &vop_setattr_desc, (vop_t *) nfs_setattr },
198 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
199 { &vop_write_desc, (vop_t *) nfsspec_write },
200 { NULL, NULL }
201};
202static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
203 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
204VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
205
206vop_t **fifo_nfsv2nodeop_p;
207static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
208 { &vop_default_desc, (vop_t *) fifo_vnoperate },
209 { &vop_access_desc, (vop_t *) nfsspec_access },
210 { &vop_close_desc, (vop_t *) nfsfifo_close },
211 { &vop_fsync_desc, (vop_t *) nfs_fsync },
212 { &vop_getattr_desc, (vop_t *) nfs_getattr },
213 { &vop_inactive_desc, (vop_t *) nfs_inactive },
214 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
215 { &vop_lock_desc, (vop_t *) vop_sharedlock },
216 { &vop_print_desc, (vop_t *) nfs_print },
217 { &vop_read_desc, (vop_t *) nfsfifo_read },
218 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
219 { &vop_setattr_desc, (vop_t *) nfs_setattr },
220 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
221 { &vop_write_desc, (vop_t *) nfsfifo_write },
222 { NULL, NULL }
223};
224static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
225 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
226VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
227
228static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
229 struct componentname *cnp,
230 struct vattr *vap));
231static int nfs_removerpc __P((struct vnode *dvp, const char *name,
232 int namelen,
233 struct ucred *cred, struct proc *proc));
234static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
235 int fnamelen, struct vnode *tdvp,
236 const char *tnameptr, int tnamelen,
237 struct ucred *cred, struct proc *proc));
238static int nfs_renameit __P((struct vnode *sdvp,
239 struct componentname *scnp,
240 struct sillyrename *sp));
241
242/*
243 * Global variables
244 */
245extern u_int32_t nfs_true, nfs_false;
246extern u_int32_t nfs_xdrneg1;
247extern struct nfsstats nfsstats;
248extern nfstype nfsv3_type[9];
249struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
250struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
251int nfs_numasync = 0;
252#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
253
254SYSCTL_DECL(_vfs_nfs);
255
256static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
257SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
258 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
259
260static int nfsv3_commit_on_close = 0;
261SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
262 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
263#if 0
264SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
265 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
266
267SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
268 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
269#endif
270
271#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
272 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
273 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
274static int
275nfs3_access_otw(struct vnode *vp,
276 int wmode,
277 struct proc *p,
278 struct ucred *cred)
279{
280 const int v3 = 1;
281 u_int32_t *tl;
282 int error = 0, attrflag;
283
284 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
285 caddr_t bpos, dpos, cp2;
286 register int32_t t1, t2;
287 register caddr_t cp;
288 u_int32_t rmode;
289 struct nfsnode *np = VTONFS(vp);
290
291 nfsstats.rpccnt[NFSPROC_ACCESS]++;
292 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
293 nfsm_fhtom(vp, v3);
294 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
295 *tl = txdr_unsigned(wmode);
296 nfsm_request(vp, NFSPROC_ACCESS, p, cred);
297 nfsm_postop_attr(vp, attrflag);
298 if (!error) {
299 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
300 rmode = fxdr_unsigned(u_int32_t, *tl);
301 np->n_mode = rmode;
302 np->n_modeuid = cred->cr_uid;
303 np->n_modestamp = time_second;
304 }
305 nfsm_reqdone;
306 return error;
307}
308
309/*
310 * nfs access vnode op.
311 * For nfs version 2, just return ok. File accesses may fail later.
312 * For nfs version 3, use the access rpc to check accessibility. If file modes
313 * are changed on the server, accesses might still fail later.
314 */
315static int
316nfs_access(ap)
317 struct vop_access_args /* {
318 struct vnode *a_vp;
319 int a_mode;
320 struct ucred *a_cred;
321 struct proc *a_p;
322 } */ *ap;
323{
324 register struct vnode *vp = ap->a_vp;
325 int error = 0;
326 u_int32_t mode, wmode;
327 int v3 = NFS_ISV3(vp);
328 struct nfsnode *np = VTONFS(vp);
329
330 /*
331 * Disallow write attempts on filesystems mounted read-only;
332 * unless the file is a socket, fifo, or a block or character
333 * device resident on the filesystem.
334 */
335 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
336 switch (vp->v_type) {
337 case VREG:
338 case VDIR:
339 case VLNK:
340 return (EROFS);
341 default:
342 break;
343 }
344 }
345 /*
346 * For nfs v3, check to see if we have done this recently, and if
347 * so return our cached result instead of making an ACCESS call.
348 * If not, do an access rpc, otherwise you are stuck emulating
349 * ufs_access() locally using the vattr. This may not be correct,
350 * since the server may apply other access criteria such as
351 * client uid-->server uid mapping that we do not know about.
352 */
353 if (v3) {
354 if (ap->a_mode & VREAD)
355 mode = NFSV3ACCESS_READ;
356 else
357 mode = 0;
358 if (vp->v_type != VDIR) {
359 if (ap->a_mode & VWRITE)
360 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
361 if (ap->a_mode & VEXEC)
362 mode |= NFSV3ACCESS_EXECUTE;
363 } else {
364 if (ap->a_mode & VWRITE)
365 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
366 NFSV3ACCESS_DELETE);
367 if (ap->a_mode & VEXEC)
368 mode |= NFSV3ACCESS_LOOKUP;
369 }
370 /* XXX safety belt, only make blanket request if caching */
371 if (nfsaccess_cache_timeout > 0) {
372 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
373 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
374 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
375 } else {
376 wmode = mode;
377 }
378
379 /*
380 * Does our cached result allow us to give a definite yes to
381 * this request?
382 */
383 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
384 (ap->a_cred->cr_uid == np->n_modeuid) &&
385 ((np->n_mode & mode) == mode)) {
386 nfsstats.accesscache_hits++;
387 } else {
388 /*
389 * Either a no, or a don't know. Go to the wire.
390 */
391 nfsstats.accesscache_misses++;
392 error = nfs3_access_otw(vp, wmode, ap->a_p,ap->a_cred);
393 if (!error) {
394 if ((np->n_mode & mode) != mode) {
395 error = EACCES;
396 }
397 }
398 }
399 return (error);
400 } else {
401 if ((error = nfsspec_access(ap)) != 0)
402 return (error);
403
404 /*
405 * Attempt to prevent a mapped root from accessing a file
406 * which it shouldn't. We try to read a byte from the file
407 * if the user is root and the file is not zero length.
408 * After calling nfsspec_access, we should have the correct
409 * file size cached.
410 */
411 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
412 && VTONFS(vp)->n_size > 0) {
413 struct iovec aiov;
414 struct uio auio;
415 char buf[1];
416
417 aiov.iov_base = buf;
418 aiov.iov_len = 1;
419 auio.uio_iov = &aiov;
420 auio.uio_iovcnt = 1;
421 auio.uio_offset = 0;
422 auio.uio_resid = 1;
423 auio.uio_segflg = UIO_SYSSPACE;
424 auio.uio_rw = UIO_READ;
425 auio.uio_procp = ap->a_p;
426
427 if (vp->v_type == VREG)
428 error = nfs_readrpc(vp, &auio, ap->a_cred);
429 else if (vp->v_type == VDIR) {
430 char* bp;
431 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
432 aiov.iov_base = bp;
433 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
434 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
435 free(bp, M_TEMP);
436 } else if (vp->v_type == VLNK)
437 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
438 else
439 error = EACCES;
440 }
441 return (error);
442 }
443}
444
445/*
446 * nfs open vnode op
447 * Check to see if the type is ok
448 * and that deletion is not in progress.
449 * For paged in text files, you will need to flush the page cache
450 * if consistency is lost.
451 */
452/* ARGSUSED */
453static int
454nfs_open(ap)
455 struct vop_open_args /* {
456 struct vnode *a_vp;
457 int a_mode;
458 struct ucred *a_cred;
459 struct proc *a_p;
460 } */ *ap;
461{
462 register struct vnode *vp = ap->a_vp;
463 struct nfsnode *np = VTONFS(vp);
464 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
465 struct vattr vattr;
466 int error;
467
468 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
469#ifdef DIAGNOSTIC
470 printf("open eacces vtyp=%d\n",vp->v_type);
471#endif
472 return (EACCES);
473 }
474 /*
475 * Get a valid lease. If cached data is stale, flush it.
476 */
477 if (nmp->nm_flag & NFSMNT_NQNFS) {
478 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
479 do {
480 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
481 ap->a_p);
482 } while (error == NQNFS_EXPIRED);
483 if (error)
484 return (error);
485 if (np->n_lrev != np->n_brev ||
486 (np->n_flag & NQNFSNONCACHE)) {
487 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
488 ap->a_p, 1)) == EINTR)
489 return (error);
490 np->n_brev = np->n_lrev;
491 }
492 }
493 } else {
494 if (np->n_flag & NMODIFIED) {
495 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
496 ap->a_p, 1)) == EINTR)
497 return (error);
498 np->n_attrstamp = 0;
499 if (vp->v_type == VDIR)
500 np->n_direofoffset = 0;
501 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
502 if (error)
503 return (error);
504 np->n_mtime = vattr.va_mtime.tv_sec;
505 } else {
506 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
507 if (error)
508 return (error);
509 if (np->n_mtime != vattr.va_mtime.tv_sec) {
510 if (vp->v_type == VDIR)
511 np->n_direofoffset = 0;
512 if ((error = nfs_vinvalbuf(vp, V_SAVE,
513 ap->a_cred, ap->a_p, 1)) == EINTR)
514 return (error);
515 np->n_mtime = vattr.va_mtime.tv_sec;
516 }
517 }
518 }
519 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
520 np->n_attrstamp = 0; /* For Open/Close consistency */
521 return (0);
522}
523
524/*
525 * nfs close vnode op
526 * What an NFS client should do upon close after writing is a debatable issue.
527 * Most NFS clients push delayed writes to the server upon close, basically for
528 * two reasons:
529 * 1 - So that any write errors may be reported back to the client process
530 * doing the close system call. By far the two most likely errors are
531 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
532 * 2 - To put a worst case upper bound on cache inconsistency between
533 * multiple clients for the file.
534 * There is also a consistency problem for Version 2 of the protocol w.r.t.
535 * not being able to tell if other clients are writing a file concurrently,
536 * since there is no way of knowing if the changed modify time in the reply
537 * is only due to the write for this client.
538 * (NFS Version 3 provides weak cache consistency data in the reply that
539 * should be sufficient to detect and handle this case.)
540 *
541 * The current code does the following:
542 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
543 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
544 * or commit them (this satisfies 1 and 2 except for the
545 * case where the server crashes after this close but
546 * before the commit RPC, which is felt to be "good
547 * enough". Changing the last argument to nfs_flush() to
548 * a 1 would force a commit operation, if it is felt a
549 * commit is necessary now.
550 * for NQNFS - do nothing now, since 2 is dealt with via leases and
551 * 1 should be dealt with via an fsync() system call for
552 * cases where write errors are important.
553 */
554/* ARGSUSED */
555static int
556nfs_close(ap)
557 struct vop_close_args /* {
558 struct vnodeop_desc *a_desc;
559 struct vnode *a_vp;
560 int a_fflag;
561 struct ucred *a_cred;
562 struct proc *a_p;
563 } */ *ap;
564{
565 register struct vnode *vp = ap->a_vp;
566 register struct nfsnode *np = VTONFS(vp);
567 int error = 0;
568
569 if (vp->v_type == VREG) {
570 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
571 (np->n_flag & NMODIFIED)) {
572 if (NFS_ISV3(vp)) {
573 /*
574 * Under NFSv3 we have dirty buffers to dispose of. We
575 * must flush them to the NFS server. We have the option
576 * of waiting all the way through the commit rpc or just
577 * waiting for the initial write. The default is to only
578 * wait through the initial write so the data is in the
579 * server's cache, which is roughly similar to the state
580 * a standard disk subsystem leaves the file in on close().
581 *
582 * We cannot clear the NMODIFIED bit in np->n_flag due to
583 * potential races with other processes, and certainly
584 * cannot clear it if we don't commit.
585 */
586 int cm = nfsv3_commit_on_close ? 1 : 0;
587 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, cm);
588 /* np->n_flag &= ~NMODIFIED; */
589 } else {
590 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
591 }
592 np->n_attrstamp = 0;
593 }
594 if (np->n_flag & NWRITEERR) {
595 np->n_flag &= ~NWRITEERR;
596 error = np->n_error;
597 }
598 }
599 return (error);
600}
601
602/*
603 * nfs getattr call from vfs.
604 */
605static int
606nfs_getattr(ap)
607 struct vop_getattr_args /* {
608 struct vnode *a_vp;
609 struct vattr *a_vap;
610 struct ucred *a_cred;
611 struct proc *a_p;
612 } */ *ap;
613{
614 register struct vnode *vp = ap->a_vp;
615 register struct nfsnode *np = VTONFS(vp);
616 register caddr_t cp;
617 register u_int32_t *tl;
618 register int32_t t1, t2;
619 caddr_t bpos, dpos;
620 int error = 0;
621 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
622 int v3 = NFS_ISV3(vp);
623
624 /*
625 * Update local times for special files.
626 */
627 if (np->n_flag & (NACC | NUPD))
628 np->n_flag |= NCHG;
629 /*
630 * First look in the cache.
631 */
632 if (nfs_getattrcache(vp, ap->a_vap) == 0)
633 return (0);
634
635 if (v3 && nfsaccess_cache_timeout > 0) {
636 nfsstats.accesscache_misses++;
637 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_p, ap->a_cred);
638 if (nfs_getattrcache(vp, ap->a_vap) == 0)
639 return (0);
640 }
641
642 nfsstats.rpccnt[NFSPROC_GETATTR]++;
643 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
644 nfsm_fhtom(vp, v3);
645 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
646 if (!error) {
647 nfsm_loadattr(vp, ap->a_vap);
648 }
649 nfsm_reqdone;
650 return (error);
651}
652
653/*
654 * nfs setattr call.
655 */
656static int
657nfs_setattr(ap)
658 struct vop_setattr_args /* {
659 struct vnodeop_desc *a_desc;
660 struct vnode *a_vp;
661 struct vattr *a_vap;
662 struct ucred *a_cred;
663 struct proc *a_p;
664 } */ *ap;
665{
666 register struct vnode *vp = ap->a_vp;
667 register struct nfsnode *np = VTONFS(vp);
668 register struct vattr *vap = ap->a_vap;
669 int error = 0;
670 u_quad_t tsize;
671
672#ifndef nolint
673 tsize = (u_quad_t)0;
674#endif
675
676 /*
677 * Setting of flags is not supported.
678 */
679 if (vap->va_flags != VNOVAL)
680 return (EOPNOTSUPP);
681
682 /*
683 * Disallow write attempts if the filesystem is mounted read-only.
684 */
685 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
686 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
687 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
688 (vp->v_mount->mnt_flag & MNT_RDONLY))
689 return (EROFS);
690 if (vap->va_size != VNOVAL) {
691 switch (vp->v_type) {
692 case VDIR:
693 return (EISDIR);
694 case VCHR:
695 case VBLK:
696 case VSOCK:
697 case VFIFO:
698 if (vap->va_mtime.tv_sec == VNOVAL &&
699 vap->va_atime.tv_sec == VNOVAL &&
700 vap->va_mode == (mode_t)VNOVAL &&
701 vap->va_uid == (uid_t)VNOVAL &&
702 vap->va_gid == (gid_t)VNOVAL)
703 return (0);
704 vap->va_size = VNOVAL;
705 break;
706 default:
707 /*
708 * Disallow write attempts if the filesystem is
709 * mounted read-only.
710 */
711 if (vp->v_mount->mnt_flag & MNT_RDONLY)
712 return (EROFS);
713
714 /*
715 * We run vnode_pager_setsize() early (why?),
716 * we must set np->n_size now to avoid vinvalbuf
717 * V_SAVE races that might setsize a lower
718 * value.
719 */
720
721 tsize = np->n_size;
722 error = nfs_meta_setsize(vp, ap->a_cred,
723 ap->a_p, vap->va_size);
724
725 if (np->n_flag & NMODIFIED) {
726 if (vap->va_size == 0)
727 error = nfs_vinvalbuf(vp, 0,
728 ap->a_cred, ap->a_p, 1);
729 else
730 error = nfs_vinvalbuf(vp, V_SAVE,
731 ap->a_cred, ap->a_p, 1);
732 if (error) {
733 np->n_size = tsize;
734 vnode_pager_setsize(vp, np->n_size);
735 return (error);
736 }
737 }
738 np->n_vattr.va_size = vap->va_size;
739 };
740 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
741 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
742 vp->v_type == VREG &&
743 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
744 ap->a_p, 1)) == EINTR)
745 return (error);
746 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
747 if (error && vap->va_size != VNOVAL) {
748 np->n_size = np->n_vattr.va_size = tsize;
749 vnode_pager_setsize(vp, np->n_size);
750 }
751 return (error);
752}
753
754/*
755 * Do an nfs setattr rpc.
756 */
757static int
758nfs_setattrrpc(vp, vap, cred, procp)
759 register struct vnode *vp;
760 register struct vattr *vap;
761 struct ucred *cred;
762 struct proc *procp;
763{
764 register struct nfsv2_sattr *sp;
765 register caddr_t cp;
766 register int32_t t1, t2;
767 caddr_t bpos, dpos, cp2;
768 u_int32_t *tl;
769 int error = 0, wccflag = NFSV3_WCCRATTR;
770 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
771 int v3 = NFS_ISV3(vp);
772
773 nfsstats.rpccnt[NFSPROC_SETATTR]++;
774 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
775 nfsm_fhtom(vp, v3);
776 if (v3) {
777 nfsm_v3attrbuild(vap, TRUE);
778 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
779 *tl = nfs_false;
780 } else {
781 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
782 if (vap->va_mode == (mode_t)VNOVAL)
783 sp->sa_mode = nfs_xdrneg1;
784 else
785 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
786 if (vap->va_uid == (uid_t)VNOVAL)
787 sp->sa_uid = nfs_xdrneg1;
788 else
789 sp->sa_uid = txdr_unsigned(vap->va_uid);
790 if (vap->va_gid == (gid_t)VNOVAL)
791 sp->sa_gid = nfs_xdrneg1;
792 else
793 sp->sa_gid = txdr_unsigned(vap->va_gid);
794 sp->sa_size = txdr_unsigned(vap->va_size);
795 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
796 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
797 }
798 nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
799 if (v3) {
800 nfsm_wcc_data(vp, wccflag);
801 } else
802 nfsm_loadattr(vp, (struct vattr *)0);
803 nfsm_reqdone;
804 return (error);
805}
806
807/*
808 * nfs lookup call, one step at a time...
809 * First look in cache
810 * If not found, unlock the directory nfsnode and do the rpc
811 */
812static int
813nfs_lookup(ap)
814 struct vop_lookup_args /* {
815 struct vnodeop_desc *a_desc;
816 struct vnode *a_dvp;
817 struct vnode **a_vpp;
818 struct componentname *a_cnp;
819 } */ *ap;
820{
821 struct componentname *cnp = ap->a_cnp;
822 struct vnode *dvp = ap->a_dvp;
823 struct vnode **vpp = ap->a_vpp;
824 int flags = cnp->cn_flags;
825 struct vnode *newvp;
826 u_int32_t *tl;
827 caddr_t cp;
828 int32_t t1, t2;
829 struct nfsmount *nmp;
830 caddr_t bpos, dpos, cp2;
831 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
832 long len;
833 nfsfh_t *fhp;
834 struct nfsnode *np;
835 int lockparent, wantparent, error = 0, attrflag, fhsize;
836 int v3 = NFS_ISV3(dvp);
837 struct proc *p = cnp->cn_proc;
838
839 *vpp = NULLVP;
840 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
841 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
842 return (EROFS);
843 if (dvp->v_type != VDIR)
844 return (ENOTDIR);
845 lockparent = flags & LOCKPARENT;
846 wantparent = flags & (LOCKPARENT|WANTPARENT);
847 nmp = VFSTONFS(dvp->v_mount);
848 np = VTONFS(dvp);
849 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
850 struct vattr vattr;
851 int vpid;
852
853 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) != 0) {
854 *vpp = NULLVP;
855 return (error);
856 }
857
858 newvp = *vpp;
859 vpid = newvp->v_id;
860 /*
861 * See the comment starting `Step through' in ufs/ufs_lookup.c
862 * for an explanation of the locking protocol
863 */
864 if (dvp == newvp) {
865 VREF(newvp);
866 error = 0;
867 } else if (flags & ISDOTDOT) {
868 VOP_UNLOCK(dvp, 0, p);
869 error = vget(newvp, LK_EXCLUSIVE, p);
870 if (!error && lockparent && (flags & ISLASTCN))
871 error = vn_lock(dvp, LK_EXCLUSIVE, p);
872 } else {
873 error = vget(newvp, LK_EXCLUSIVE, p);
874 if (!lockparent || error || !(flags & ISLASTCN))
875 VOP_UNLOCK(dvp, 0, p);
876 }
877 if (!error) {
878 if (vpid == newvp->v_id) {
879 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
880 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
881 nfsstats.lookupcache_hits++;
882 if (cnp->cn_nameiop != LOOKUP &&
883 (flags & ISLASTCN))
884 cnp->cn_flags |= SAVENAME;
885 return (0);
886 }
887 cache_purge(newvp);
888 }
889 vput(newvp);
890 if (lockparent && dvp != newvp && (flags & ISLASTCN))
891 VOP_UNLOCK(dvp, 0, p);
892 }
893 error = vn_lock(dvp, LK_EXCLUSIVE, p);
894 *vpp = NULLVP;
895 if (error)
896 return (error);
897 }
898 error = 0;
899 newvp = NULLVP;
900 nfsstats.lookupcache_misses++;
901 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
902 len = cnp->cn_namelen;
903 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
904 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
905 nfsm_fhtom(dvp, v3);
906 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
907 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred);
908 if (error) {
909 nfsm_postop_attr(dvp, attrflag);
910 m_freem(mrep);
911 goto nfsmout;
912 }
913 nfsm_getfh(fhp, fhsize, v3);
914
915 /*
916 * Handle RENAME case...
917 */
918 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
919 if (NFS_CMPFH(np, fhp, fhsize)) {
920 m_freem(mrep);
921 return (EISDIR);
922 }
923 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
924 if (error) {
925 m_freem(mrep);
926 return (error);
927 }
928 newvp = NFSTOV(np);
929 if (v3) {
930 nfsm_postop_attr(newvp, attrflag);
931 nfsm_postop_attr(dvp, attrflag);
932 } else
933 nfsm_loadattr(newvp, (struct vattr *)0);
934 *vpp = newvp;
935 m_freem(mrep);
936 cnp->cn_flags |= SAVENAME;
937 if (!lockparent)
938 VOP_UNLOCK(dvp, 0, p);
939 return (0);
940 }
941
942 if (flags & ISDOTDOT) {
943 VOP_UNLOCK(dvp, 0, p);
944 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
945 if (error) {
946 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
947 return (error);
948 }
949 newvp = NFSTOV(np);
950 if (lockparent && (flags & ISLASTCN) &&
951 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
952 vput(newvp);
953 return (error);
954 }
955 } else if (NFS_CMPFH(np, fhp, fhsize)) {
956 VREF(dvp);
957 newvp = dvp;
958 } else {
959 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
960 if (error) {
961 m_freem(mrep);
962 return (error);
963 }
964 if (!lockparent || !(flags & ISLASTCN))
965 VOP_UNLOCK(dvp, 0, p);
966 newvp = NFSTOV(np);
967 }
968 if (v3) {
969 nfsm_postop_attr(newvp, attrflag);
970 nfsm_postop_attr(dvp, attrflag);
971 } else
972 nfsm_loadattr(newvp, (struct vattr *)0);
973 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
974 cnp->cn_flags |= SAVENAME;
975 if ((cnp->cn_flags & MAKEENTRY) &&
976 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
977 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
978 cache_enter(dvp, newvp, cnp);
979 }
980 *vpp = newvp;
981 nfsm_reqdone;
982 if (error) {
983 if (newvp != NULLVP) {
984 vrele(newvp);
985 *vpp = NULLVP;
986 }
987 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
988 (flags & ISLASTCN) && error == ENOENT) {
989 if (!lockparent)
990 VOP_UNLOCK(dvp, 0, p);
991 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
992 error = EROFS;
993 else
994 error = EJUSTRETURN;
995 }
996 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
997 cnp->cn_flags |= SAVENAME;
998 }
999 return (error);
1000}
1001
1002/*
1003 * nfs read call.
1004 * Just call nfs_bioread() to do the work.
1005 */
1006static int
1007nfs_read(ap)
1008 struct vop_read_args /* {
1009 struct vnode *a_vp;
1010 struct uio *a_uio;
1011 int a_ioflag;
1012 struct ucred *a_cred;
1013 } */ *ap;
1014{
1015 register struct vnode *vp = ap->a_vp;
1016
1017 if (vp->v_type != VREG)
1018 return (EPERM);
1019 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1020}
1021
1022/*
1023 * nfs readlink call
1024 */
1025static int
1026nfs_readlink(ap)
1027 struct vop_readlink_args /* {
1028 struct vnode *a_vp;
1029 struct uio *a_uio;
1030 struct ucred *a_cred;
1031 } */ *ap;
1032{
1033 register struct vnode *vp = ap->a_vp;
1034
1035 if (vp->v_type != VLNK)
1036 return (EINVAL);
1037 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1038}
1039
1040/*
1041 * Do a readlink rpc.
1042 * Called by nfs_doio() from below the buffer cache.
1043 */
1044int
1045nfs_readlinkrpc(vp, uiop, cred)
1046 register struct vnode *vp;
1047 struct uio *uiop;
1048 struct ucred *cred;
1049{
1050 register u_int32_t *tl;
1051 register caddr_t cp;
1052 register int32_t t1, t2;
1053 caddr_t bpos, dpos, cp2;
1054 int error = 0, len, attrflag;
1055 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1056 int v3 = NFS_ISV3(vp);
1057
1058 nfsstats.rpccnt[NFSPROC_READLINK]++;
1059 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1060 nfsm_fhtom(vp, v3);
1061 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
1062 if (v3)
1063 nfsm_postop_attr(vp, attrflag);
1064 if (!error) {
1065 nfsm_strsiz(len, NFS_MAXPATHLEN);
1066 if (len == NFS_MAXPATHLEN) {
1067 struct nfsnode *np = VTONFS(vp);
1068 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1069 len = np->n_size;
1070 }
1071 nfsm_mtouio(uiop, len);
1072 }
1073 nfsm_reqdone;
1074 return (error);
1075}
1076
1077/*
1078 * nfs read rpc call
1079 * Ditto above
1080 */
1081int
1082nfs_readrpc(vp, uiop, cred)
1083 register struct vnode *vp;
1084 struct uio *uiop;
1085 struct ucred *cred;
1086{
1087 register u_int32_t *tl;
1088 register caddr_t cp;
1089 register int32_t t1, t2;
1090 caddr_t bpos, dpos, cp2;
1091 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1092 struct nfsmount *nmp;
1093 int error = 0, len, retlen, tsiz, eof, attrflag;
1094 int v3 = NFS_ISV3(vp);
1095
1096#ifndef nolint
1097 eof = 0;
1098#endif
1099 nmp = VFSTONFS(vp->v_mount);
1100 tsiz = uiop->uio_resid;
1101 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1102 return (EFBIG);
1103 while (tsiz > 0) {
1104 nfsstats.rpccnt[NFSPROC_READ]++;
1105 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1106 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1107 nfsm_fhtom(vp, v3);
1108 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1109 if (v3) {
1110 txdr_hyper(uiop->uio_offset, tl);
1111 *(tl + 2) = txdr_unsigned(len);
1112 } else {
1113 *tl++ = txdr_unsigned(uiop->uio_offset);
1114 *tl++ = txdr_unsigned(len);
1115 *tl = 0;
1116 }
1117 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred);
1118 if (v3) {
1119 nfsm_postop_attr(vp, attrflag);
1120 if (error) {
1121 m_freem(mrep);
1122 goto nfsmout;
1123 }
1124 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1125 eof = fxdr_unsigned(int, *(tl + 1));
1126 } else
1127 nfsm_loadattr(vp, (struct vattr *)0);
1128 nfsm_strsiz(retlen, nmp->nm_rsize);
1129 nfsm_mtouio(uiop, retlen);
1130 m_freem(mrep);
1131 tsiz -= retlen;
1132 if (v3) {
1133 if (eof || retlen == 0) {
1134 tsiz = 0;
1135 }
1136 } else if (retlen < len) {
1137 tsiz = 0;
1138 }
1139 }
1140nfsmout:
1141 return (error);
1142}
1143
1144/*
1145 * nfs write call
1146 */
1147int
1148nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1149 register struct vnode *vp;
1150 register struct uio *uiop;
1151 struct ucred *cred;
1152 int *iomode, *must_commit;
1153{
1154 register u_int32_t *tl;
1155 register caddr_t cp;
1156 register int32_t t1, t2, backup;
1157 caddr_t bpos, dpos, cp2;
1158 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1159 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1160 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1161 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1162
1163#ifndef DIAGNOSTIC
1164 if (uiop->uio_iovcnt != 1)
1165 panic("nfs: writerpc iovcnt > 1");
1166#endif
1167 *must_commit = 0;
1168 tsiz = uiop->uio_resid;
1169 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1170 return (EFBIG);
1171 while (tsiz > 0) {
1172 nfsstats.rpccnt[NFSPROC_WRITE]++;
1173 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1174 nfsm_reqhead(vp, NFSPROC_WRITE,
1175 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1176 nfsm_fhtom(vp, v3);
1177 if (v3) {
1178 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1179 txdr_hyper(uiop->uio_offset, tl);
1180 tl += 2;
1181 *tl++ = txdr_unsigned(len);
1182 *tl++ = txdr_unsigned(*iomode);
1183 *tl = txdr_unsigned(len);
1184 } else {
1185 register u_int32_t x;
1186
1187 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1188 /* Set both "begin" and "current" to non-garbage. */
1189 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1190 *tl++ = x; /* "begin offset" */
1191 *tl++ = x; /* "current offset" */
1192 x = txdr_unsigned(len);
1193 *tl++ = x; /* total to this offset */
1194 *tl = x; /* size of this write */
1195 }
1196 nfsm_uiotom(uiop, len);
1197 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred);
1198 if (v3) {
1199 wccflag = NFSV3_WCCCHK;
1200 nfsm_wcc_data(vp, wccflag);
1201 if (!error) {
1202 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1203 + NFSX_V3WRITEVERF);
1204 rlen = fxdr_unsigned(int, *tl++);
1205 if (rlen == 0) {
1206 error = NFSERR_IO;
1207 m_freem(mrep);
1208 break;
1209 } else if (rlen < len) {
1210 backup = len - rlen;
1211 uiop->uio_iov->iov_base -= backup;
1212 uiop->uio_iov->iov_len += backup;
1213 uiop->uio_offset -= backup;
1214 uiop->uio_resid += backup;
1215 len = rlen;
1216 }
1217 commit = fxdr_unsigned(int, *tl++);
1218
1219 /*
1220 * Return the lowest committment level
1221 * obtained by any of the RPCs.
1222 */
1223 if (committed == NFSV3WRITE_FILESYNC)
1224 committed = commit;
1225 else if (committed == NFSV3WRITE_DATASYNC &&
1226 commit == NFSV3WRITE_UNSTABLE)
1227 committed = commit;
1228 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1229 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1230 NFSX_V3WRITEVERF);
1231 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1232 } else if (bcmp((caddr_t)tl,
1233 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1234 *must_commit = 1;
1235 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1236 NFSX_V3WRITEVERF);
1237 }
1238 }
1239 } else
1240 nfsm_loadattr(vp, (struct vattr *)0);
1241 if (wccflag)
1242 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1243 m_freem(mrep);
1244 if (error)
1245 break;
1246 tsiz -= len;
1247 }
1248nfsmout:
1249 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1250 committed = NFSV3WRITE_FILESYNC;
1251 *iomode = committed;
1252 if (error)
1253 uiop->uio_resid = tsiz;
1254 return (error);
1255}
1256
1257/*
1258 * nfs mknod rpc
1259 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1260 * mode set to specify the file type and the size field for rdev.
1261 */
1262static int
1263nfs_mknodrpc(dvp, vpp, cnp, vap)
1264 register struct vnode *dvp;
1265 register struct vnode **vpp;
1266 register struct componentname *cnp;
1267 register struct vattr *vap;
1268{
1269 register struct nfsv2_sattr *sp;
1270 register u_int32_t *tl;
1271 register caddr_t cp;
1272 register int32_t t1, t2;
1273 struct vnode *newvp = (struct vnode *)0;
1274 struct nfsnode *np = (struct nfsnode *)0;
1275 struct vattr vattr;
1276 char *cp2;
1277 caddr_t bpos, dpos;
1278 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1279 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1280 u_int32_t rdev;
1281 int v3 = NFS_ISV3(dvp);
1282
1283 if (vap->va_type == VCHR || vap->va_type == VBLK)
1284 rdev = txdr_unsigned(vap->va_rdev);
1285 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1286 rdev = nfs_xdrneg1;
1287 else {
1288 return (EOPNOTSUPP);
1289 }
1290 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1291 return (error);
1292 }
1293 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1294 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1295 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1296 nfsm_fhtom(dvp, v3);
1297 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1298 if (v3) {
1299 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1300 *tl++ = vtonfsv3_type(vap->va_type);
1301 nfsm_v3attrbuild(vap, FALSE);
1302 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1303 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1304 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1305 *tl = txdr_unsigned(uminor(vap->va_rdev));
1306 }
1307 } else {
1308 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1309 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1310 sp->sa_uid = nfs_xdrneg1;
1311 sp->sa_gid = nfs_xdrneg1;
1312 sp->sa_size = rdev;
1313 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1314 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1315 }
1316 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred);
1317 if (!error) {
1318 nfsm_mtofh(dvp, newvp, v3, gotvp);
1319 if (!gotvp) {
1320 if (newvp) {
1321 vput(newvp);
1322 newvp = (struct vnode *)0;
1323 }
1324 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1325 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1326 if (!error)
1327 newvp = NFSTOV(np);
1328 }
1329 }
1330 if (v3)
1331 nfsm_wcc_data(dvp, wccflag);
1332 nfsm_reqdone;
1333 if (error) {
1334 if (newvp)
1335 vput(newvp);
1336 } else {
1337 if (cnp->cn_flags & MAKEENTRY)
1338 cache_enter(dvp, newvp, cnp);
1339 *vpp = newvp;
1340 }
1341 VTONFS(dvp)->n_flag |= NMODIFIED;
1342 if (!wccflag)
1343 VTONFS(dvp)->n_attrstamp = 0;
1344 return (error);
1345}
1346
1347/*
1348 * nfs mknod vop
1349 * just call nfs_mknodrpc() to do the work.
1350 */
1351/* ARGSUSED */
1352static int
1353nfs_mknod(ap)
1354 struct vop_mknod_args /* {
1355 struct vnode *a_dvp;
1356 struct vnode **a_vpp;
1357 struct componentname *a_cnp;
1358 struct vattr *a_vap;
1359 } */ *ap;
1360{
1361 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1362}
1363
1364static u_long create_verf;
1365/*
1366 * nfs file create call
1367 */
1368static int
1369nfs_create(ap)
1370 struct vop_create_args /* {
1371 struct vnode *a_dvp;
1372 struct vnode **a_vpp;
1373 struct componentname *a_cnp;
1374 struct vattr *a_vap;
1375 } */ *ap;
1376{
1377 register struct vnode *dvp = ap->a_dvp;
1378 register struct vattr *vap = ap->a_vap;
1379 register struct componentname *cnp = ap->a_cnp;
1380 register struct nfsv2_sattr *sp;
1381 register u_int32_t *tl;
1382 register caddr_t cp;
1383 register int32_t t1, t2;
1384 struct nfsnode *np = (struct nfsnode *)0;
1385 struct vnode *newvp = (struct vnode *)0;
1386 caddr_t bpos, dpos, cp2;
1387 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1388 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1389 struct vattr vattr;
1390 int v3 = NFS_ISV3(dvp);
1391
1392 /*
1393 * Oops, not for me..
1394 */
1395 if (vap->va_type == VSOCK)
1396 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1397
1398 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1399 return (error);
1400 }
1401 if (vap->va_vaflags & VA_EXCLUSIVE)
1402 fmode |= O_EXCL;
1403again:
1404 nfsstats.rpccnt[NFSPROC_CREATE]++;
1405 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1406 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1407 nfsm_fhtom(dvp, v3);
1408 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1409 if (v3) {
1410 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1411 if (fmode & O_EXCL) {
1412 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1413 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1414#ifdef INET
1415 if (!TAILQ_EMPTY(&in_ifaddrhead))
1416 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1417 else
1418#endif
1419 *tl++ = create_verf;
1420 *tl = ++create_verf;
1421 } else {
1422 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1423 nfsm_v3attrbuild(vap, FALSE);
1424 }
1425 } else {
1426 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1427 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1428 sp->sa_uid = nfs_xdrneg1;
1429 sp->sa_gid = nfs_xdrneg1;
1430 sp->sa_size = 0;
1431 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1432 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1433 }
1434 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred);
1435 if (!error) {
1436 nfsm_mtofh(dvp, newvp, v3, gotvp);
1437 if (!gotvp) {
1438 if (newvp) {
1439 vput(newvp);
1440 newvp = (struct vnode *)0;
1441 }
1442 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1443 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1444 if (!error)
1445 newvp = NFSTOV(np);
1446 }
1447 }
1448 if (v3)
1449 nfsm_wcc_data(dvp, wccflag);
1450 nfsm_reqdone;
1451 if (error) {
1452 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1453 fmode &= ~O_EXCL;
1454 goto again;
1455 }
1456 if (newvp)
1457 vput(newvp);
1458 } else if (v3 && (fmode & O_EXCL)) {
1459 /*
1460 * We are normally called with only a partially initialized
1461 * VAP. Since the NFSv3 spec says that server may use the
1462 * file attributes to store the verifier, the spec requires
1463 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1464 * in atime, but we can't really assume that all servers will
1465 * so we ensure that our SETATTR sets both atime and mtime.
1466 */
1467 if (vap->va_mtime.tv_sec == VNOVAL)
1468 vfs_timestamp(&vap->va_mtime);
1469 if (vap->va_atime.tv_sec == VNOVAL)
1470 vap->va_atime = vap->va_mtime;
1471 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1472 }
1473 if (!error) {
1474 if (cnp->cn_flags & MAKEENTRY)
1475 cache_enter(dvp, newvp, cnp);
1476 *ap->a_vpp = newvp;
1477 }
1478 VTONFS(dvp)->n_flag |= NMODIFIED;
1479 if (!wccflag)
1480 VTONFS(dvp)->n_attrstamp = 0;
1481 return (error);
1482}
1483
1484/*
1485 * nfs file remove call
1486 * To try and make nfs semantics closer to ufs semantics, a file that has
1487 * other processes using the vnode is renamed instead of removed and then
1488 * removed later on the last close.
1489 * - If v_usecount > 1
1490 * If a rename is not already in the works
1491 * call nfs_sillyrename() to set it up
1492 * else
1493 * do the remove rpc
1494 */
1495static int
1496nfs_remove(ap)
1497 struct vop_remove_args /* {
1498 struct vnodeop_desc *a_desc;
1499 struct vnode * a_dvp;
1500 struct vnode * a_vp;
1501 struct componentname * a_cnp;
1502 } */ *ap;
1503{
1504 register struct vnode *vp = ap->a_vp;
1505 register struct vnode *dvp = ap->a_dvp;
1506 register struct componentname *cnp = ap->a_cnp;
1507 register struct nfsnode *np = VTONFS(vp);
1508 int error = 0;
1509 struct vattr vattr;
1510
1511#ifndef DIAGNOSTIC
1512 if ((cnp->cn_flags & HASBUF) == 0)
1513 panic("nfs_remove: no name");
1514 if (vp->v_usecount < 1)
1515 panic("nfs_remove: bad v_usecount");
1516#endif
1517 if (vp->v_type == VDIR)
1518 error = EPERM;
1519 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1520 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1521 vattr.va_nlink > 1)) {
1522 /*
1523 * Purge the name cache so that the chance of a lookup for
1524 * the name succeeding while the remove is in progress is
1525 * minimized. Without node locking it can still happen, such
1526 * that an I/O op returns ESTALE, but since you get this if
1527 * another host removes the file..
1528 */
1529 cache_purge(vp);
1530 /*
1531 * throw away biocache buffers, mainly to avoid
1532 * unnecessary delayed writes later.
1533 */
1534 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
1535 /* Do the rpc */
1536 if (error != EINTR)
1537 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1538 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1539 /*
1540 * Kludge City: If the first reply to the remove rpc is lost..
1541 * the reply to the retransmitted request will be ENOENT
1542 * since the file was in fact removed
1543 * Therefore, we cheat and return success.
1544 */
1545 if (error == ENOENT)
1546 error = 0;
1547 } else if (!np->n_sillyrename)
1548 error = nfs_sillyrename(dvp, vp, cnp);
1549 np->n_attrstamp = 0;
1550 return (error);
1551}
1552
1553/*
1554 * nfs file remove rpc called from nfs_inactive
1555 */
1556int
1557nfs_removeit(sp)
1558 register struct sillyrename *sp;
1559{
1560
1561 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1562 (struct proc *)0));
1563}
1564
1565/*
1566 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1567 */
1568static int
1569nfs_removerpc(dvp, name, namelen, cred, proc)
1570 register struct vnode *dvp;
1571 const char *name;
1572 int namelen;
1573 struct ucred *cred;
1574 struct proc *proc;
1575{
1576 register u_int32_t *tl;
1577 register caddr_t cp;
1578 register int32_t t1, t2;
1579 caddr_t bpos, dpos, cp2;
1580 int error = 0, wccflag = NFSV3_WCCRATTR;
1581 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1582 int v3 = NFS_ISV3(dvp);
1583
1584 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1585 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1586 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1587 nfsm_fhtom(dvp, v3);
1588 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1589 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred);
1590 if (v3)
1591 nfsm_wcc_data(dvp, wccflag);
1592 nfsm_reqdone;
1593 VTONFS(dvp)->n_flag |= NMODIFIED;
1594 if (!wccflag)
1595 VTONFS(dvp)->n_attrstamp = 0;
1596 return (error);
1597}
1598
1599/*
1600 * nfs file rename call
1601 */
1602static int
1603nfs_rename(ap)
1604 struct vop_rename_args /* {
1605 struct vnode *a_fdvp;
1606 struct vnode *a_fvp;
1607 struct componentname *a_fcnp;
1608 struct vnode *a_tdvp;
1609 struct vnode *a_tvp;
1610 struct componentname *a_tcnp;
1611 } */ *ap;
1612{
1613 register struct vnode *fvp = ap->a_fvp;
1614 register struct vnode *tvp = ap->a_tvp;
1615 register struct vnode *fdvp = ap->a_fdvp;
1616 register struct vnode *tdvp = ap->a_tdvp;
1617 register struct componentname *tcnp = ap->a_tcnp;
1618 register struct componentname *fcnp = ap->a_fcnp;
1619 int error;
1620
1621#ifndef DIAGNOSTIC
1622 if ((tcnp->cn_flags & HASBUF) == 0 ||
1623 (fcnp->cn_flags & HASBUF) == 0)
1624 panic("nfs_rename: no name");
1625#endif
1626 /* Check for cross-device rename */
1627 if ((fvp->v_mount != tdvp->v_mount) ||
1628 (tvp && (fvp->v_mount != tvp->v_mount))) {
1629 error = EXDEV;
1630 goto out;
1631 }
1632
1633 /*
1634 * We have to flush B_DELWRI data prior to renaming
1635 * the file. If we don't, the delayed-write buffers
1636 * can be flushed out later after the file has gone stale
1637 * under NFSV3. NFSV2 does not have this problem because
1638 * ( as far as I can tell ) it flushes dirty buffers more
1639 * often.
1640 */
1641
1642 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_proc);
1643 if (tvp)
1644 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_proc);
1645
1646 /*
1647 * If the tvp exists and is in use, sillyrename it before doing the
1648 * rename of the new file over it.
1649 * XXX Can't sillyrename a directory.
1650 */
1651 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1652 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1653 vput(tvp);
1654 tvp = NULL;
1655 }
1656
1657 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1658 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1659 tcnp->cn_proc);
1660
1661 if (fvp->v_type == VDIR) {
1662 if (tvp != NULL && tvp->v_type == VDIR)
1663 cache_purge(tdvp);
1664 cache_purge(fdvp);
1665 }
1666
1667out:
1668 if (tdvp == tvp)
1669 vrele(tdvp);
1670 else
1671 vput(tdvp);
1672 if (tvp)
1673 vput(tvp);
1674 vrele(fdvp);
1675 vrele(fvp);
1676 /*
1677 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1678 */
1679 if (error == ENOENT)
1680 error = 0;
1681 return (error);
1682}
1683
1684/*
1685 * nfs file rename rpc called from nfs_remove() above
1686 */
1687static int
1688nfs_renameit(sdvp, scnp, sp)
1689 struct vnode *sdvp;
1690 struct componentname *scnp;
1691 register struct sillyrename *sp;
1692{
1693 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1694 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc));
1695}
1696
1697/*
1698 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1699 */
1700static int
1701nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc)
1702 register struct vnode *fdvp;
1703 const char *fnameptr;
1704 int fnamelen;
1705 register struct vnode *tdvp;
1706 const char *tnameptr;
1707 int tnamelen;
1708 struct ucred *cred;
1709 struct proc *proc;
1710{
1711 register u_int32_t *tl;
1712 register caddr_t cp;
1713 register int32_t t1, t2;
1714 caddr_t bpos, dpos, cp2;
1715 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1716 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1717 int v3 = NFS_ISV3(fdvp);
1718
1719 nfsstats.rpccnt[NFSPROC_RENAME]++;
1720 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1721 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1722 nfsm_rndup(tnamelen));
1723 nfsm_fhtom(fdvp, v3);
1724 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1725 nfsm_fhtom(tdvp, v3);
1726 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1727 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred);
1728 if (v3) {
1729 nfsm_wcc_data(fdvp, fwccflag);
1730 nfsm_wcc_data(tdvp, twccflag);
1731 }
1732 nfsm_reqdone;
1733 VTONFS(fdvp)->n_flag |= NMODIFIED;
1734 VTONFS(tdvp)->n_flag |= NMODIFIED;
1735 if (!fwccflag)
1736 VTONFS(fdvp)->n_attrstamp = 0;
1737 if (!twccflag)
1738 VTONFS(tdvp)->n_attrstamp = 0;
1739 return (error);
1740}
1741
1742/*
1743 * nfs hard link create call
1744 */
1745static int
1746nfs_link(ap)
1747 struct vop_link_args /* {
1748 struct vnode *a_tdvp;
1749 struct vnode *a_vp;
1750 struct componentname *a_cnp;
1751 } */ *ap;
1752{
1753 register struct vnode *vp = ap->a_vp;
1754 register struct vnode *tdvp = ap->a_tdvp;
1755 register struct componentname *cnp = ap->a_cnp;
1756 register u_int32_t *tl;
1757 register caddr_t cp;
1758 register int32_t t1, t2;
1759 caddr_t bpos, dpos, cp2;
1760 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1761 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1762 int v3;
1763
1764 if (vp->v_mount != tdvp->v_mount) {
1765 return (EXDEV);
1766 }
1767
1768 /*
1769 * Push all writes to the server, so that the attribute cache
1770 * doesn't get "out of sync" with the server.
1771 * XXX There should be a better way!
1772 */
1773 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
1774
1775 v3 = NFS_ISV3(vp);
1776 nfsstats.rpccnt[NFSPROC_LINK]++;
1777 nfsm_reqhead(vp, NFSPROC_LINK,
1778 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1779 nfsm_fhtom(vp, v3);
1780 nfsm_fhtom(tdvp, v3);
1781 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1782 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
1783 if (v3) {
1784 nfsm_postop_attr(vp, attrflag);
1785 nfsm_wcc_data(tdvp, wccflag);
1786 }
1787 nfsm_reqdone;
1788 VTONFS(tdvp)->n_flag |= NMODIFIED;
1789 if (!attrflag)
1790 VTONFS(vp)->n_attrstamp = 0;
1791 if (!wccflag)
1792 VTONFS(tdvp)->n_attrstamp = 0;
1793 /*
1794 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1795 */
1796 if (error == EEXIST)
1797 error = 0;
1798 return (error);
1799}
1800
1801/*
1802 * nfs symbolic link create call
1803 */
1804static int
1805nfs_symlink(ap)
1806 struct vop_symlink_args /* {
1807 struct vnode *a_dvp;
1808 struct vnode **a_vpp;
1809 struct componentname *a_cnp;
1810 struct vattr *a_vap;
1811 char *a_target;
1812 } */ *ap;
1813{
1814 register struct vnode *dvp = ap->a_dvp;
1815 register struct vattr *vap = ap->a_vap;
1816 register struct componentname *cnp = ap->a_cnp;
1817 register struct nfsv2_sattr *sp;
1818 register u_int32_t *tl;
1819 register caddr_t cp;
1820 register int32_t t1, t2;
1821 caddr_t bpos, dpos, cp2;
1822 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1823 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1824 struct vnode *newvp = (struct vnode *)0;
1825 int v3 = NFS_ISV3(dvp);
1826
1827 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1828 slen = strlen(ap->a_target);
1829 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1830 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1831 nfsm_fhtom(dvp, v3);
1832 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1833 if (v3) {
1834 nfsm_v3attrbuild(vap, FALSE);
1835 }
1836 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1837 if (!v3) {
1838 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1839 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1840 sp->sa_uid = nfs_xdrneg1;
1841 sp->sa_gid = nfs_xdrneg1;
1842 sp->sa_size = nfs_xdrneg1;
1843 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1844 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1845 }
1846
1847 /*
1848 * Issue the NFS request and get the rpc response.
1849 *
1850 * Only NFSv3 responses returning an error of 0 actually return
1851 * a file handle that can be converted into newvp without having
1852 * to do an extra lookup rpc.
1853 */
1854 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred);
1855 if (v3) {
1856 if (error == 0)
1857 nfsm_mtofh(dvp, newvp, v3, gotvp);
1858 nfsm_wcc_data(dvp, wccflag);
1859 }
1860
1861 /*
1862 * out code jumps -> here, mrep is also freed.
1863 */
1864
1865 nfsm_reqdone;
1866
1867 /*
1868 * If we get an EEXIST error, silently convert it to no-error
1869 * in case of an NFS retry.
1870 */
1871 if (error == EEXIST)
1872 error = 0;
1873
1874 /*
1875 * If we do not have (or no longer have) an error, and we could
1876 * not extract the newvp from the response due to the request being
1877 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1878 * to obtain a newvp to return.
1879 */
1880 if (error == 0 && newvp == NULL) {
1881 struct nfsnode *np = NULL;
1882
1883 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1884 cnp->cn_cred, cnp->cn_proc, &np);
1885 if (!error)
1886 newvp = NFSTOV(np);
1887 }
1888 if (error) {
1889 if (newvp)
1890 vput(newvp);
1891 } else {
1892 *ap->a_vpp = newvp;
1893 }
1894 VTONFS(dvp)->n_flag |= NMODIFIED;
1895 if (!wccflag)
1896 VTONFS(dvp)->n_attrstamp = 0;
1897 return (error);
1898}
1899
1900/*
1901 * nfs make dir call
1902 */
1903static int
1904nfs_mkdir(ap)
1905 struct vop_mkdir_args /* {
1906 struct vnode *a_dvp;
1907 struct vnode **a_vpp;
1908 struct componentname *a_cnp;
1909 struct vattr *a_vap;
1910 } */ *ap;
1911{
1912 register struct vnode *dvp = ap->a_dvp;
1913 register struct vattr *vap = ap->a_vap;
1914 register struct componentname *cnp = ap->a_cnp;
1915 register struct nfsv2_sattr *sp;
1916 register u_int32_t *tl;
1917 register caddr_t cp;
1918 register int32_t t1, t2;
1919 register int len;
1920 struct nfsnode *np = (struct nfsnode *)0;
1921 struct vnode *newvp = (struct vnode *)0;
1922 caddr_t bpos, dpos, cp2;
1923 int error = 0, wccflag = NFSV3_WCCRATTR;
1924 int gotvp = 0;
1925 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1926 struct vattr vattr;
1927 int v3 = NFS_ISV3(dvp);
1928
1929 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1930 return (error);
1931 }
1932 len = cnp->cn_namelen;
1933 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1934 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1935 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1936 nfsm_fhtom(dvp, v3);
1937 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1938 if (v3) {
1939 nfsm_v3attrbuild(vap, FALSE);
1940 } else {
1941 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1942 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1943 sp->sa_uid = nfs_xdrneg1;
1944 sp->sa_gid = nfs_xdrneg1;
1945 sp->sa_size = nfs_xdrneg1;
1946 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1947 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1948 }
1949 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred);
1950 if (!error)
1951 nfsm_mtofh(dvp, newvp, v3, gotvp);
1952 if (v3)
1953 nfsm_wcc_data(dvp, wccflag);
1954 nfsm_reqdone;
1955 VTONFS(dvp)->n_flag |= NMODIFIED;
1956 if (!wccflag)
1957 VTONFS(dvp)->n_attrstamp = 0;
1958 /*
1959 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1960 * if we can succeed in looking up the directory.
1961 */
1962 if (error == EEXIST || (!error && !gotvp)) {
1963 if (newvp) {
1964 vrele(newvp);
1965 newvp = (struct vnode *)0;
1966 }
1967 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1968 cnp->cn_proc, &np);
1969 if (!error) {
1970 newvp = NFSTOV(np);
1971 if (newvp->v_type != VDIR)
1972 error = EEXIST;
1973 }
1974 }
1975 if (error) {
1976 if (newvp)
1977 vrele(newvp);
1978 } else
1979 *ap->a_vpp = newvp;
1980 return (error);
1981}
1982
1983/*
1984 * nfs remove directory call
1985 */
1986static int
1987nfs_rmdir(ap)
1988 struct vop_rmdir_args /* {
1989 struct vnode *a_dvp;
1990 struct vnode *a_vp;
1991 struct componentname *a_cnp;
1992 } */ *ap;
1993{
1994 register struct vnode *vp = ap->a_vp;
1995 register struct vnode *dvp = ap->a_dvp;
1996 register struct componentname *cnp = ap->a_cnp;
1997 register u_int32_t *tl;
1998 register caddr_t cp;
1999 register int32_t t1, t2;
2000 caddr_t bpos, dpos, cp2;
2001 int error = 0, wccflag = NFSV3_WCCRATTR;
2002 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2003 int v3 = NFS_ISV3(dvp);
2004
2005 if (dvp == vp)
2006 return (EINVAL);
2007 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2008 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2009 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2010 nfsm_fhtom(dvp, v3);
2011 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2012 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred);
2013 if (v3)
2014 nfsm_wcc_data(dvp, wccflag);
2015 nfsm_reqdone;
2016 VTONFS(dvp)->n_flag |= NMODIFIED;
2017 if (!wccflag)
2018 VTONFS(dvp)->n_attrstamp = 0;
2019 cache_purge(dvp);
2020 cache_purge(vp);
2021 /*
2022 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2023 */
2024 if (error == ENOENT)
2025 error = 0;
2026 return (error);
2027}
2028
2029/*
2030 * nfs readdir call
2031 */
2032static int
2033nfs_readdir(ap)
2034 struct vop_readdir_args /* {
2035 struct vnode *a_vp;
2036 struct uio *a_uio;
2037 struct ucred *a_cred;
2038 } */ *ap;
2039{
2040 register struct vnode *vp = ap->a_vp;
2041 register struct nfsnode *np = VTONFS(vp);
2042 register struct uio *uio = ap->a_uio;
2043 int tresid, error;
2044 struct vattr vattr;
2045
2046 if (vp->v_type != VDIR)
2047 return (EPERM);
2048 /*
2049 * First, check for hit on the EOF offset cache
2050 */
2051 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2052 (np->n_flag & NMODIFIED) == 0) {
2053 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
2054 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
2055 nfsstats.direofcache_hits++;
2056 return (0);
2057 }
2058 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
2059 np->n_mtime == vattr.va_mtime.tv_sec) {
2060 nfsstats.direofcache_hits++;
2061 return (0);
2062 }
2063 }
2064
2065 /*
2066 * Call nfs_bioread() to do the real work.
2067 */
2068 tresid = uio->uio_resid;
2069 error = nfs_bioread(vp, uio, 0, ap->a_cred);
2070
2071 if (!error && uio->uio_resid == tresid)
2072 nfsstats.direofcache_misses++;
2073 return (error);
2074}
2075
2076/*
2077 * Readdir rpc call.
2078 * Called from below the buffer cache by nfs_doio().
2079 */
2080int
2081nfs_readdirrpc(vp, uiop, cred)
2082 struct vnode *vp;
2083 register struct uio *uiop;
2084 struct ucred *cred;
2085
2086{
2087 register int len, left;
2088 register struct dirent *dp = NULL;
2089 register u_int32_t *tl;
2090 register caddr_t cp;
2091 register int32_t t1, t2;
2092 register nfsuint64 *cookiep;
2093 caddr_t bpos, dpos, cp2;
2094 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2095 nfsuint64 cookie;
2096 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2097 struct nfsnode *dnp = VTONFS(vp);
2098 u_quad_t fileno;
2099 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2100 int attrflag;
2101 int v3 = NFS_ISV3(vp);
2102
2103#ifndef DIAGNOSTIC
2104 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2105 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2106 panic("nfs readdirrpc bad uio");
2107#endif
2108
2109 /*
2110 * If there is no cookie, assume directory was stale.
2111 */
2112 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2113 if (cookiep)
2114 cookie = *cookiep;
2115 else
2116 return (NFSERR_BAD_COOKIE);
2117 /*
2118 * Loop around doing readdir rpc's of size nm_readdirsize
2119 * truncated to a multiple of DIRBLKSIZ.
2120 * The stopping criteria is EOF or buffer full.
2121 */
2122 while (more_dirs && bigenough) {
2123 nfsstats.rpccnt[NFSPROC_READDIR]++;
2124 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2125 NFSX_READDIR(v3));
2126 nfsm_fhtom(vp, v3);
2127 if (v3) {
2128 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2129 *tl++ = cookie.nfsuquad[0];
2130 *tl++ = cookie.nfsuquad[1];
2131 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2132 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2133 } else {
2134 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2135 *tl++ = cookie.nfsuquad[0];
2136 }
2137 *tl = txdr_unsigned(nmp->nm_readdirsize);
2138 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
2139 if (v3) {
2140 nfsm_postop_attr(vp, attrflag);
2141 if (!error) {
2142 nfsm_dissect(tl, u_int32_t *,
2143 2 * NFSX_UNSIGNED);
2144 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2145 dnp->n_cookieverf.nfsuquad[1] = *tl;
2146 } else {
2147 m_freem(mrep);
2148 goto nfsmout;
2149 }
2150 }
2151 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2152 more_dirs = fxdr_unsigned(int, *tl);
2153
2154 /* loop thru the dir entries, doctoring them to 4bsd form */
2155 while (more_dirs && bigenough) {
2156 if (v3) {
2157 nfsm_dissect(tl, u_int32_t *,
2158 3 * NFSX_UNSIGNED);
2159 fileno = fxdr_hyper(tl);
2160 len = fxdr_unsigned(int, *(tl + 2));
2161 } else {
2162 nfsm_dissect(tl, u_int32_t *,
2163 2 * NFSX_UNSIGNED);
2164 fileno = fxdr_unsigned(u_quad_t, *tl++);
2165 len = fxdr_unsigned(int, *tl);
2166 }
2167 if (len <= 0 || len > NFS_MAXNAMLEN) {
2168 error = EBADRPC;
2169 m_freem(mrep);
2170 goto nfsmout;
2171 }
2172 tlen = nfsm_rndup(len);
2173 if (tlen == len)
2174 tlen += 4; /* To ensure null termination */
2175 left = DIRBLKSIZ - blksiz;
2176 if ((tlen + DIRHDSIZ) > left) {
2177 dp->d_reclen += left;
2178 uiop->uio_iov->iov_base += left;
2179 uiop->uio_iov->iov_len -= left;
2180 uiop->uio_offset += left;
2181 uiop->uio_resid -= left;
2182 blksiz = 0;
2183 }
2184 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2185 bigenough = 0;
2186 if (bigenough) {
2187 dp = (struct dirent *)uiop->uio_iov->iov_base;
2188 dp->d_fileno = (int)fileno;
2189 dp->d_namlen = len;
2190 dp->d_reclen = tlen + DIRHDSIZ;
2191 dp->d_type = DT_UNKNOWN;
2192 blksiz += dp->d_reclen;
2193 if (blksiz == DIRBLKSIZ)
2194 blksiz = 0;
2195 uiop->uio_offset += DIRHDSIZ;
2196 uiop->uio_resid -= DIRHDSIZ;
2197 uiop->uio_iov->iov_base += DIRHDSIZ;
2198 uiop->uio_iov->iov_len -= DIRHDSIZ;
2199 nfsm_mtouio(uiop, len);
2200 cp = uiop->uio_iov->iov_base;
2201 tlen -= len;
2202 *cp = '\0'; /* null terminate */
2203 uiop->uio_iov->iov_base += tlen;
2204 uiop->uio_iov->iov_len -= tlen;
2205 uiop->uio_offset += tlen;
2206 uiop->uio_resid -= tlen;
2207 } else
2208 nfsm_adv(nfsm_rndup(len));
2209 if (v3) {
2210 nfsm_dissect(tl, u_int32_t *,
2211 3 * NFSX_UNSIGNED);
2212 } else {
2213 nfsm_dissect(tl, u_int32_t *,
2214 2 * NFSX_UNSIGNED);
2215 }
2216 if (bigenough) {
2217 cookie.nfsuquad[0] = *tl++;
2218 if (v3)
2219 cookie.nfsuquad[1] = *tl++;
2220 } else if (v3)
2221 tl += 2;
2222 else
2223 tl++;
2224 more_dirs = fxdr_unsigned(int, *tl);
2225 }
2226 /*
2227 * If at end of rpc data, get the eof boolean
2228 */
2229 if (!more_dirs) {
2230 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2231 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2232 }
2233 m_freem(mrep);
2234 }
2235 /*
2236 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2237 * by increasing d_reclen for the last record.
2238 */
2239 if (blksiz > 0) {
2240 left = DIRBLKSIZ - blksiz;
2241 dp->d_reclen += left;
2242 uiop->uio_iov->iov_base += left;
2243 uiop->uio_iov->iov_len -= left;
2244 uiop->uio_offset += left;
2245 uiop->uio_resid -= left;
2246 }
2247
2248 /*
2249 * We are now either at the end of the directory or have filled the
2250 * block.
2251 */
2252 if (bigenough)
2253 dnp->n_direofoffset = uiop->uio_offset;
2254 else {
2255 if (uiop->uio_resid > 0)
2256 printf("EEK! readdirrpc resid > 0\n");
2257 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2258 *cookiep = cookie;
2259 }
2260nfsmout:
2261 return (error);
2262}
2263
2264/*
2265 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2266 */
2267int
2268nfs_readdirplusrpc(vp, uiop, cred)
2269 struct vnode *vp;
2270 register struct uio *uiop;
2271 struct ucred *cred;
2272{
2273 register int len, left;
2274 register struct dirent *dp;
2275 register u_int32_t *tl;
2276 register caddr_t cp;
2277 register int32_t t1, t2;
2278 register struct vnode *newvp;
2279 register nfsuint64 *cookiep;
2280 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2281 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2282 struct nameidata nami, *ndp = &nami;
2283 struct componentname *cnp = &ndp->ni_cnd;
2284 nfsuint64 cookie;
2285 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2286 struct nfsnode *dnp = VTONFS(vp), *np;
2287 nfsfh_t *fhp;
2288 u_quad_t fileno;
2289 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2290 int attrflag, fhsize;
2291
2292#ifndef nolint
2293 dp = (struct dirent *)0;
2294#endif
2295#ifndef DIAGNOSTIC
2296 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2297 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2298 panic("nfs readdirplusrpc bad uio");
2299#endif
2300 ndp->ni_dvp = vp;
2301 newvp = NULLVP;
2302
2303 /*
2304 * If there is no cookie, assume directory was stale.
2305 */
2306 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2307 if (cookiep)
2308 cookie = *cookiep;
2309 else
2310 return (NFSERR_BAD_COOKIE);
2311 /*
2312 * Loop around doing readdir rpc's of size nm_readdirsize
2313 * truncated to a multiple of DIRBLKSIZ.
2314 * The stopping criteria is EOF or buffer full.
2315 */
2316 while (more_dirs && bigenough) {
2317 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2318 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2319 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2320 nfsm_fhtom(vp, 1);
2321 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2322 *tl++ = cookie.nfsuquad[0];
2323 *tl++ = cookie.nfsuquad[1];
2324 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2325 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2326 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2327 *tl = txdr_unsigned(nmp->nm_rsize);
2328 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
2329 nfsm_postop_attr(vp, attrflag);
2330 if (error) {
2331 m_freem(mrep);
2332 goto nfsmout;
2333 }
2334 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2335 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2336 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2337 more_dirs = fxdr_unsigned(int, *tl);
2338
2339 /* loop thru the dir entries, doctoring them to 4bsd form */
2340 while (more_dirs && bigenough) {
2341 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2342 fileno = fxdr_hyper(tl);
2343 len = fxdr_unsigned(int, *(tl + 2));
2344 if (len <= 0 || len > NFS_MAXNAMLEN) {
2345 error = EBADRPC;
2346 m_freem(mrep);
2347 goto nfsmout;
2348 }
2349 tlen = nfsm_rndup(len);
2350 if (tlen == len)
2351 tlen += 4; /* To ensure null termination*/
2352 left = DIRBLKSIZ - blksiz;
2353 if ((tlen + DIRHDSIZ) > left) {
2354 dp->d_reclen += left;
2355 uiop->uio_iov->iov_base += left;
2356 uiop->uio_iov->iov_len -= left;
2357 uiop->uio_offset += left;
2358 uiop->uio_resid -= left;
2359 blksiz = 0;
2360 }
2361 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2362 bigenough = 0;
2363 if (bigenough) {
2364 dp = (struct dirent *)uiop->uio_iov->iov_base;
2365 dp->d_fileno = (int)fileno;
2366 dp->d_namlen = len;
2367 dp->d_reclen = tlen + DIRHDSIZ;
2368 dp->d_type = DT_UNKNOWN;
2369 blksiz += dp->d_reclen;
2370 if (blksiz == DIRBLKSIZ)
2371 blksiz = 0;
2372 uiop->uio_offset += DIRHDSIZ;
2373 uiop->uio_resid -= DIRHDSIZ;
2374 uiop->uio_iov->iov_base += DIRHDSIZ;
2375 uiop->uio_iov->iov_len -= DIRHDSIZ;
2376 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2377 cnp->cn_namelen = len;
2378 nfsm_mtouio(uiop, len);
2379 cp = uiop->uio_iov->iov_base;
2380 tlen -= len;
2381 *cp = '\0';
2382 uiop->uio_iov->iov_base += tlen;
2383 uiop->uio_iov->iov_len -= tlen;
2384 uiop->uio_offset += tlen;
2385 uiop->uio_resid -= tlen;
2386 } else
2387 nfsm_adv(nfsm_rndup(len));
2388 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2389 if (bigenough) {
2390 cookie.nfsuquad[0] = *tl++;
2391 cookie.nfsuquad[1] = *tl++;
2392 } else
2393 tl += 2;
2394
2395 /*
2396 * Since the attributes are before the file handle
2397 * (sigh), we must skip over the attributes and then
2398 * come back and get them.
2399 */
2400 attrflag = fxdr_unsigned(int, *tl);
2401 if (attrflag) {
2402 dpossav1 = dpos;
2403 mdsav1 = md;
2404 nfsm_adv(NFSX_V3FATTR);
2405 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2406 doit = fxdr_unsigned(int, *tl);
2407 if (doit) {
2408 nfsm_getfh(fhp, fhsize, 1);
2409 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2410 VREF(vp);
2411 newvp = vp;
2412 np = dnp;
2413 } else {
2414 error = nfs_nget(vp->v_mount, fhp,
2415 fhsize, &np);
2416 if (error)
2417 doit = 0;
2418 else
2419 newvp = NFSTOV(np);
2420 }
2421 }
2422 if (doit && bigenough) {
2423 dpossav2 = dpos;
2424 dpos = dpossav1;
2425 mdsav2 = md;
2426 md = mdsav1;
2427 nfsm_loadattr(newvp, (struct vattr *)0);
2428 dpos = dpossav2;
2429 md = mdsav2;
2430 dp->d_type =
2431 IFTODT(VTTOIF(np->n_vattr.va_type));
2432 ndp->ni_vp = newvp;
2433 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2434 }
2435 } else {
2436 /* Just skip over the file handle */
2437 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2438 i = fxdr_unsigned(int, *tl);
2439 nfsm_adv(nfsm_rndup(i));
2440 }
2441 if (newvp != NULLVP) {
2442 if (newvp == vp)
2443 vrele(newvp);
2444 else
2445 vput(newvp);
2446 newvp = NULLVP;
2447 }
2448 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2449 more_dirs = fxdr_unsigned(int, *tl);
2450 }
2451 /*
2452 * If at end of rpc data, get the eof boolean
2453 */
2454 if (!more_dirs) {
2455 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2456 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2457 }
2458 m_freem(mrep);
2459 }
2460 /*
2461 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2462 * by increasing d_reclen for the last record.
2463 */
2464 if (blksiz > 0) {
2465 left = DIRBLKSIZ - blksiz;
2466 dp->d_reclen += left;
2467 uiop->uio_iov->iov_base += left;
2468 uiop->uio_iov->iov_len -= left;
2469 uiop->uio_offset += left;
2470 uiop->uio_resid -= left;
2471 }
2472
2473 /*
2474 * We are now either at the end of the directory or have filled the
2475 * block.
2476 */
2477 if (bigenough)
2478 dnp->n_direofoffset = uiop->uio_offset;
2479 else {
2480 if (uiop->uio_resid > 0)
2481 printf("EEK! readdirplusrpc resid > 0\n");
2482 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2483 *cookiep = cookie;
2484 }
2485nfsmout:
2486 if (newvp != NULLVP) {
2487 if (newvp == vp)
2488 vrele(newvp);
2489 else
2490 vput(newvp);
2491 newvp = NULLVP;
2492 }
2493 return (error);
2494}
2495
2496/*
2497 * Silly rename. To make the NFS filesystem that is stateless look a little
2498 * more like the "ufs" a remove of an active vnode is translated to a rename
2499 * to a funny looking filename that is removed by nfs_inactive on the
2500 * nfsnode. There is the potential for another process on a different client
2501 * to create the same funny name between the nfs_lookitup() fails and the
2502 * nfs_rename() completes, but...
2503 */
2504static int
2505nfs_sillyrename(dvp, vp, cnp)
2506 struct vnode *dvp, *vp;
2507 struct componentname *cnp;
2508{
2509 register struct sillyrename *sp;
2510 struct nfsnode *np;
2511 int error;
2512 short pid;
2513
2514 cache_purge(dvp);
2515 np = VTONFS(vp);
2516#ifndef DIAGNOSTIC
2517 if (vp->v_type == VDIR)
2518 panic("nfs: sillyrename dir");
2519#endif
2520 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2521 M_NFSREQ, M_WAITOK);
2522 sp->s_cred = crdup(cnp->cn_cred);
2523 sp->s_dvp = dvp;
2524 VREF(dvp);
2525
2526 /* Fudge together a funny name */
2527 pid = cnp->cn_proc->p_pid;
2528 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2529
2530 /* Try lookitups until we get one that isn't there */
2531 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2532 cnp->cn_proc, (struct nfsnode **)0) == 0) {
2533 sp->s_name[4]++;
2534 if (sp->s_name[4] > 'z') {
2535 error = EINVAL;
2536 goto bad;
2537 }
2538 }
2539 error = nfs_renameit(dvp, cnp, sp);
2540 if (error)
2541 goto bad;
2542 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2543 cnp->cn_proc, &np);
2544 np->n_sillyrename = sp;
2545 return (0);
2546bad:
2547 vrele(sp->s_dvp);
2548 crfree(sp->s_cred);
2549 free((caddr_t)sp, M_NFSREQ);
2550 return (error);
2551}
2552
2553/*
2554 * Look up a file name and optionally either update the file handle or
2555 * allocate an nfsnode, depending on the value of npp.
2556 * npp == NULL --> just do the lookup
2557 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2558 * handled too
2559 * *npp != NULL --> update the file handle in the vnode
2560 */
2561static int
2562nfs_lookitup(dvp, name, len, cred, procp, npp)
2563 register struct vnode *dvp;
2564 const char *name;
2565 int len;
2566 struct ucred *cred;
2567 struct proc *procp;
2568 struct nfsnode **npp;
2569{
2570 register u_int32_t *tl;
2571 register caddr_t cp;
2572 register int32_t t1, t2;
2573 struct vnode *newvp = (struct vnode *)0;
2574 struct nfsnode *np, *dnp = VTONFS(dvp);
2575 caddr_t bpos, dpos, cp2;
2576 int error = 0, fhlen, attrflag;
2577 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2578 nfsfh_t *nfhp;
2579 int v3 = NFS_ISV3(dvp);
2580
2581 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2582 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2583 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2584 nfsm_fhtom(dvp, v3);
2585 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2586 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred);
2587 if (npp && !error) {
2588 nfsm_getfh(nfhp, fhlen, v3);
2589 if (*npp) {
2590 np = *npp;
2591 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2592 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2593 np->n_fhp = &np->n_fh;
2594 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2595 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2596 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2597 np->n_fhsize = fhlen;
2598 newvp = NFSTOV(np);
2599 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2600 VREF(dvp);
2601 newvp = dvp;
2602 } else {
2603 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2604 if (error) {
2605 m_freem(mrep);
2606 return (error);
2607 }
2608 newvp = NFSTOV(np);
2609 }
2610 if (v3) {
2611 nfsm_postop_attr(newvp, attrflag);
2612 if (!attrflag && *npp == NULL) {
2613 m_freem(mrep);
2614 if (newvp == dvp)
2615 vrele(newvp);
2616 else
2617 vput(newvp);
2618 return (ENOENT);
2619 }
2620 } else
2621 nfsm_loadattr(newvp, (struct vattr *)0);
2622 }
2623 nfsm_reqdone;
2624 if (npp && *npp == NULL) {
2625 if (error) {
2626 if (newvp) {
2627 if (newvp == dvp)
2628 vrele(newvp);
2629 else
2630 vput(newvp);
2631 }
2632 } else
2633 *npp = np;
2634 }
2635 return (error);
2636}
2637
2638/*
2639 * Nfs Version 3 commit rpc
2640 */
2641int
2642nfs_commit(vp, offset, cnt, cred, procp)
2643 struct vnode *vp;
2644 u_quad_t offset;
2645 int cnt;
2646 struct ucred *cred;
2647 struct proc *procp;
2648{
2649 register caddr_t cp;
2650 register u_int32_t *tl;
2651 register int32_t t1, t2;
2652 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2653 caddr_t bpos, dpos, cp2;
2654 int error = 0, wccflag = NFSV3_WCCRATTR;
2655 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2656
2657 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2658 return (0);
2659 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2660 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2661 nfsm_fhtom(vp, 1);
2662 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2663 txdr_hyper(offset, tl);
2664 tl += 2;
2665 *tl = txdr_unsigned(cnt);
2666 nfsm_request(vp, NFSPROC_COMMIT, procp, cred);
2667 nfsm_wcc_data(vp, wccflag);
2668 if (!error) {
2669 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2670 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2671 NFSX_V3WRITEVERF)) {
2672 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2673 NFSX_V3WRITEVERF);
2674 error = NFSERR_STALEWRITEVERF;
2675 }
2676 }
2677 nfsm_reqdone;
2678 return (error);
2679}
2680
2681/*
2682 * Kludge City..
2683 * - make nfs_bmap() essentially a no-op that does no translation
2684 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2685 * (Maybe I could use the process's page mapping, but I was concerned that
2686 * Kernel Write might not be enabled and also figured copyout() would do
2687 * a lot more work than bcopy() and also it currently happens in the
2688 * context of the swapper process (2).
2689 */
2690static int
2691nfs_bmap(ap)
2692 struct vop_bmap_args /* {
2693 struct vnode *a_vp;
2694 daddr_t a_bn;
2695 struct vnode **a_vpp;
2696 daddr_t *a_bnp;
2697 int *a_runp;
2698 int *a_runb;
2699 } */ *ap;
2700{
2701 register struct vnode *vp = ap->a_vp;
2702
2703 if (ap->a_vpp != NULL)
2704 *ap->a_vpp = vp;
2705 if (ap->a_bnp != NULL)
2706 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2707 if (ap->a_runp != NULL)
2708 *ap->a_runp = 0;
2709 if (ap->a_runb != NULL)
2710 *ap->a_runb = 0;
2711 return (0);
2712}
2713
2714/*
2715 * Strategy routine.
2716 * For async requests when nfsiod(s) are running, queue the request by
2717 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2718 * request.
2719 */
2720static int
2721nfs_strategy(ap)
2722 struct vop_strategy_args *ap;
2723{
2724 register struct buf *bp = ap->a_bp;
2725 struct ucred *cr;
2726 struct proc *p;
2727 int error = 0;
2728
2729 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2730 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2731
2732 if (bp->b_flags & B_PHYS)
2733 panic("nfs physio");
2734
2735 if (bp->b_flags & B_ASYNC)
2736 p = (struct proc *)0;
2737 else
2738 p = curproc; /* XXX */
2739
2740 if (bp->b_flags & B_READ)
2741 cr = bp->b_rcred;
2742 else
2743 cr = bp->b_wcred;
2744
2745 /*
2746 * If the op is asynchronous and an i/o daemon is waiting
2747 * queue the request, wake it up and wait for completion
2748 * otherwise just do it ourselves.
2749 */
2750 if ((bp->b_flags & B_ASYNC) == 0 ||
2751 nfs_asyncio(bp, NOCRED, p))
2752 error = nfs_doio(bp, cr, p);
2753 return (error);
2754}
2755
2756/*
2757 * Mmap a file
2758 *
2759 * NB Currently unsupported.
2760 */
2761/* ARGSUSED */
2762static int
2763nfs_mmap(ap)
2764 struct vop_mmap_args /* {
2765 struct vnode *a_vp;
2766 int a_fflags;
2767 struct ucred *a_cred;
2768 struct proc *a_p;
2769 } */ *ap;
2770{
2771
2772 return (EINVAL);
2773}
2774
2775/*
2776 * fsync vnode op. Just call nfs_flush() with commit == 1.
2777 */
2778/* ARGSUSED */
2779static int
2780nfs_fsync(ap)
2781 struct vop_fsync_args /* {
2782 struct vnodeop_desc *a_desc;
2783 struct vnode * a_vp;
2784 struct ucred * a_cred;
2785 int a_waitfor;
2786 struct proc * a_p;
2787 } */ *ap;
2788{
2789
2790 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2791}
2792
2793/*
2794 * Flush all the blocks associated with a vnode.
2795 * Walk through the buffer pool and push any dirty pages
2796 * associated with the vnode.
2797 */
2798static int
2799nfs_flush(vp, cred, waitfor, p, commit)
2800 register struct vnode *vp;
2801 struct ucred *cred;
2802 int waitfor;
2803 struct proc *p;
2804 int commit;
2805{
2806 register struct nfsnode *np = VTONFS(vp);
2807 register struct buf *bp;
2808 register int i;
2809 struct buf *nbp;
2810 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2811 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2812 int passone = 1;
2813 u_quad_t off, endoff, toff;
2814 struct ucred* wcred = NULL;
2815 struct buf **bvec = NULL;
2816#ifndef NFS_COMMITBVECSIZ
2817#define NFS_COMMITBVECSIZ 20
2818#endif
2819 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2820 int bvecsize = 0, bveccount;
2821
2822 if (nmp->nm_flag & NFSMNT_INT)
2823 slpflag = PCATCH;
2824 if (!commit)
2825 passone = 0;
2826 /*
2827 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2828 * server, but nas not been committed to stable storage on the server
2829 * yet. On the first pass, the byte range is worked out and the commit
2830 * rpc is done. On the second pass, nfs_writebp() is called to do the
2831 * job.
2832 */
2833again:
2834 off = (u_quad_t)-1;
2835 endoff = 0;
2836 bvecpos = 0;
2837 if (NFS_ISV3(vp) && commit) {
2838 s = splbio();
2839 /*
2840 * Count up how many buffers waiting for a commit.
2841 */
2842 bveccount = 0;
2843 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2844 nbp = TAILQ_NEXT(bp, b_vnbufs);
2845 if (BUF_REFCNT(bp) == 0 &&
2846 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2847 == (B_DELWRI | B_NEEDCOMMIT))
2848 bveccount++;
2849 }
2850 /*
2851 * Allocate space to remember the list of bufs to commit. It is
2852 * important to use M_NOWAIT here to avoid a race with nfs_write.
2853 * If we can't get memory (for whatever reason), we will end up
2854 * committing the buffers one-by-one in the loop below.
2855 */
2856 if (bvec != NULL && bvec != bvec_on_stack)
2857 free(bvec, M_TEMP);
2858 if (bveccount > NFS_COMMITBVECSIZ) {
2859 bvec = (struct buf **)
2860 malloc(bveccount * sizeof(struct buf *),
2861 M_TEMP, M_NOWAIT);
2862 if (bvec == NULL) {
2863 bvec = bvec_on_stack;
2864 bvecsize = NFS_COMMITBVECSIZ;
2865 } else
2866 bvecsize = bveccount;
2867 } else {
2868 bvec = bvec_on_stack;
2869 bvecsize = NFS_COMMITBVECSIZ;
2870 }
2871 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2872 nbp = TAILQ_NEXT(bp, b_vnbufs);
2873 if (bvecpos >= bvecsize)
2874 break;
2875 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2876 (B_DELWRI | B_NEEDCOMMIT) ||
2877 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2878 continue;
2879 bremfree(bp);
2880 /*
2881 * Work out if all buffers are using the same cred
2882 * so we can deal with them all with one commit.
2883 *
2884 * NOTE: we are not clearing B_DONE here, so we have
2885 * to do it later on in this routine if we intend to
2886 * initiate I/O on the bp.
2887 *
2888 * Note: to avoid loopback deadlocks, we do not
2889 * assign b_runningbufspace.
2890 */
2891 if (wcred == NULL)
2892 wcred = bp->b_wcred;
2893 else if (wcred != bp->b_wcred)
2894 wcred = NOCRED;
2895 bp->b_flags |= B_WRITEINPROG;
2896 vfs_busy_pages(bp, 1);
2897
2898 /*
2899 * bp is protected by being locked, but nbp is not
2900 * and vfs_busy_pages() may sleep. We have to
2901 * recalculate nbp.
2902 */
2903 nbp = TAILQ_NEXT(bp, b_vnbufs);
2904
2905 /*
2906 * A list of these buffers is kept so that the
2907 * second loop knows which buffers have actually
2908 * been committed. This is necessary, since there
2909 * may be a race between the commit rpc and new
2910 * uncommitted writes on the file.
2911 */
2912 bvec[bvecpos++] = bp;
2913 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2914 bp->b_dirtyoff;
2915 if (toff < off)
2916 off = toff;
2917 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2918 if (toff > endoff)
2919 endoff = toff;
2920 }
2921 splx(s);
2922 }
2923 if (bvecpos > 0) {
2924 /*
2925 * Commit data on the server, as required.
2926 * If all bufs are using the same wcred, then use that with
2927 * one call for all of them, otherwise commit each one
2928 * separately.
2929 */
2930 if (wcred != NOCRED)
2931 retv = nfs_commit(vp, off, (int)(endoff - off),
2932 wcred, p);
2933 else {
2934 retv = 0;
2935 for (i = 0; i < bvecpos; i++) {
2936 off_t off, size;
2937 bp = bvec[i];
2938 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2939 bp->b_dirtyoff;
2940 size = (u_quad_t)(bp->b_dirtyend
2941 - bp->b_dirtyoff);
2942 retv = nfs_commit(vp, off, (int)size,
2943 bp->b_wcred, p);
2944 if (retv) break;
2945 }
2946 }
2947
2948 if (retv == NFSERR_STALEWRITEVERF)
2949 nfs_clearcommit(vp->v_mount);
2950
2951 /*
2952 * Now, either mark the blocks I/O done or mark the
2953 * blocks dirty, depending on whether the commit
2954 * succeeded.
2955 */
2956 for (i = 0; i < bvecpos; i++) {
2957 bp = bvec[i];
2958 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2959 if (retv) {
2960 /*
2961 * Error, leave B_DELWRI intact
2962 */
2963 vfs_unbusy_pages(bp);
2964 brelse(bp);
2965 } else {
2966 /*
2967 * Success, remove B_DELWRI ( bundirty() ).
2968 *
2969 * b_dirtyoff/b_dirtyend seem to be NFS
2970 * specific. We should probably move that
2971 * into bundirty(). XXX
2972 */
2973 s = splbio();
2974 vp->v_numoutput++;
2975 bp->b_flags |= B_ASYNC;
2976 bundirty(bp);
2977 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2978 bp->b_dirtyoff = bp->b_dirtyend = 0;
2979 splx(s);
2980 biodone(bp);
2981 }
2982 }
2983 }
2984
2985 /*
2986 * Start/do any write(s) that are required.
2987 */
2988loop:
2989 s = splbio();
2990 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2991 nbp = TAILQ_NEXT(bp, b_vnbufs);
2992 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2993 if (waitfor != MNT_WAIT || passone)
2994 continue;
2995 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2996 "nfsfsync", slpflag, slptimeo);
2997 splx(s);
2998 if (error == 0)
2999 panic("nfs_fsync: inconsistent lock");
3000 if (error == ENOLCK)
3001 goto loop;
3002 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
3003 error = EINTR;
3004 goto done;
3005 }
3006 if (slpflag == PCATCH) {
3007 slpflag = 0;
3008 slptimeo = 2 * hz;
3009 }
3010 goto loop;
3011 }
3012 if ((bp->b_flags & B_DELWRI) == 0)
3013 panic("nfs_fsync: not dirty");
3014 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
3015 BUF_UNLOCK(bp);
3016 continue;
3017 }
3018 bremfree(bp);
3019 if (passone || !commit)
3020 bp->b_flags |= B_ASYNC;
3021 else
3022 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
3023 splx(s);
3024 VOP_BWRITE(bp->b_vp, bp);
3025 goto loop;
3026 }
3027 splx(s);
3028 if (passone) {
3029 passone = 0;
3030 goto again;
3031 }
3032 if (waitfor == MNT_WAIT) {
3033 while (vp->v_numoutput) {
3034 vp->v_flag |= VBWAIT;
3035 error = tsleep((caddr_t)&vp->v_numoutput,
3036 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
3037 if (error) {
3038 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
3039 error = EINTR;
3040 goto done;
3041 }
3042 if (slpflag == PCATCH) {
3043 slpflag = 0;
3044 slptimeo = 2 * hz;
3045 }
3046 }
3047 }
3048 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
3049 goto loop;
3050 }
3051 }
3052 if (np->n_flag & NWRITEERR) {
3053 error = np->n_error;
3054 np->n_flag &= ~NWRITEERR;
3055 }
3056done:
3057 if (bvec != NULL && bvec != bvec_on_stack)
3058 free(bvec, M_TEMP);
3059 return (error);
3060}
3061
3062/*
3063 * NFS advisory byte-level locks.
3064 * Currently unsupported.
3065 */
3066static int
3067nfs_advlock(ap)
3068 struct vop_advlock_args /* {
3069 struct vnode *a_vp;
3070 caddr_t a_id;
3071 int a_op;
3072 struct flock *a_fl;
3073 int a_flags;
3074 } */ *ap;
3075{
3076 register struct nfsnode *np = VTONFS(ap->a_vp);
3077
3078 /*
3079 * The following kludge is to allow diskless support to work
3080 * until a real NFS lockd is implemented. Basically, just pretend
3081 * that this is a local lock.
3082 */
3083 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3084}
3085
3086/*
3087 * Print out the contents of an nfsnode.
3088 */
3089static int
3090nfs_print(ap)
3091 struct vop_print_args /* {
3092 struct vnode *a_vp;
3093 } */ *ap;
3094{
3095 register struct vnode *vp = ap->a_vp;
3096 register struct nfsnode *np = VTONFS(vp);
3097
3098 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3099 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3100 if (vp->v_type == VFIFO)
3101 fifo_printinfo(vp);
3102 printf("\n");
3103 return (0);
3104}
3105
3106/*
3107 * Just call nfs_writebp() with the force argument set to 1.
3108 *
3109 * NOTE: B_DONE may or may not be set in a_bp on call.
3110 */
3111static int
3112nfs_bwrite(ap)
3113 struct vop_bwrite_args /* {
3114 struct vnode *a_bp;
3115 } */ *ap;
3116{
3117 return (nfs_writebp(ap->a_bp, 1, curproc));
3118}
3119
3120/*
3121 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3122 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3123 * B_CACHE if this is a VMIO buffer.
3124 */
3125int
3126nfs_writebp(bp, force, procp)
3127 register struct buf *bp;
3128 int force;
3129 struct proc *procp;
3130{
3131 int s;
3132 int oldflags = bp->b_flags;
3133#if 0
3134 int retv = 1;
3135 off_t off;
3136#endif
3137
3138 if (BUF_REFCNT(bp) == 0)
3139 panic("bwrite: buffer is not locked???");
3140
3141 if (bp->b_flags & B_INVAL) {
3142 brelse(bp);
3143 return(0);
3144 }
3145
3146 bp->b_flags |= B_CACHE;
3147
3148 /*
3149 * Undirty the bp. We will redirty it later if the I/O fails.
3150 */
3151
3152 s = splbio();
3153 bundirty(bp);
3154 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3155
3156 bp->b_vp->v_numoutput++;
3157 curproc->p_stats->p_ru.ru_oublock++;
3158 splx(s);
3159
3160 /*
3161 * Note: to avoid loopback deadlocks, we do not
3162 * assign b_runningbufspace.
3163 */
3164 vfs_busy_pages(bp, 1);
3165
3166 if (force)
3167 bp->b_flags |= B_WRITEINPROG;
3168 BUF_KERNPROC(bp);
3169 VOP_STRATEGY(bp->b_vp, bp);
3170
3171 if( (oldflags & B_ASYNC) == 0) {
3172 int rtval = biowait(bp);
3173
3174 if (oldflags & B_DELWRI) {
3175 s = splbio();
3176 reassignbuf(bp, bp->b_vp);
3177 splx(s);
3178 }
3179
3180 brelse(bp);
3181 return (rtval);
3182 }
3183
3184 return (0);
3185}
3186
3187/*
3188 * nfs special file access vnode op.
3189 * Essentially just get vattr and then imitate iaccess() since the device is
3190 * local to the client.
3191 */
3192static int
3193nfsspec_access(ap)
3194 struct vop_access_args /* {
3195 struct vnode *a_vp;
3196 int a_mode;
3197 struct ucred *a_cred;
3198 struct proc *a_p;
3199 } */ *ap;
3200{
3201 register struct vattr *vap;
3202 register gid_t *gp;
3203 register struct ucred *cred = ap->a_cred;
3204 struct vnode *vp = ap->a_vp;
3205 mode_t mode = ap->a_mode;
3206 struct vattr vattr;
3207 register int i;
3208 int error;
3209
3210 /*
3211 * Disallow write attempts on filesystems mounted read-only;
3212 * unless the file is a socket, fifo, or a block or character
3213 * device resident on the filesystem.
3214 */
3215 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3216 switch (vp->v_type) {
3217 case VREG:
3218 case VDIR:
3219 case VLNK:
3220 return (EROFS);
3221 default:
3222 break;
3223 }
3224 }
3225 /*
3226 * If you're the super-user,
3227 * you always get access.
3228 */
3229 if (cred->cr_uid == 0)
3230 return (0);
3231 vap = &vattr;
3232 error = VOP_GETATTR(vp, vap, cred, ap->a_p);
3233 if (error)
3234 return (error);
3235 /*
3236 * Access check is based on only one of owner, group, public.
3237 * If not owner, then check group. If not a member of the
3238 * group, then check public access.
3239 */
3240 if (cred->cr_uid != vap->va_uid) {
3241 mode >>= 3;
3242 gp = cred->cr_groups;
3243 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3244 if (vap->va_gid == *gp)
3245 goto found;
3246 mode >>= 3;
3247found:
3248 ;
3249 }
3250 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3251 return (error);
3252}
3253
3254/*
3255 * Read wrapper for special devices.
3256 */
3257static int
3258nfsspec_read(ap)
3259 struct vop_read_args /* {
3260 struct vnode *a_vp;
3261 struct uio *a_uio;
3262 int a_ioflag;
3263 struct ucred *a_cred;
3264 } */ *ap;
3265{
3266 register struct nfsnode *np = VTONFS(ap->a_vp);
3267
3268 /*
3269 * Set access flag.
3270 */
3271 np->n_flag |= NACC;
3272 getnanotime(&np->n_atim);
3273 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3274}
3275
3276/*
3277 * Write wrapper for special devices.
3278 */
3279static int
3280nfsspec_write(ap)
3281 struct vop_write_args /* {
3282 struct vnode *a_vp;
3283 struct uio *a_uio;
3284 int a_ioflag;
3285 struct ucred *a_cred;
3286 } */ *ap;
3287{
3288 register struct nfsnode *np = VTONFS(ap->a_vp);
3289
3290 /*
3291 * Set update flag.
3292 */
3293 np->n_flag |= NUPD;
3294 getnanotime(&np->n_mtim);
3295 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3296}
3297
3298/*
3299 * Close wrapper for special devices.
3300 *
3301 * Update the times on the nfsnode then do device close.
3302 */
3303static int
3304nfsspec_close(ap)
3305 struct vop_close_args /* {
3306 struct vnode *a_vp;
3307 int a_fflag;
3308 struct ucred *a_cred;
3309 struct proc *a_p;
3310 } */ *ap;
3311{
3312 register struct vnode *vp = ap->a_vp;
3313 register struct nfsnode *np = VTONFS(vp);
3314 struct vattr vattr;
3315
3316 if (np->n_flag & (NACC | NUPD)) {
3317 np->n_flag |= NCHG;
3318 if (vp->v_usecount == 1 &&
3319 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3320 VATTR_NULL(&vattr);
3321 if (np->n_flag & NACC)
3322 vattr.va_atime = np->n_atim;
3323 if (np->n_flag & NUPD)
3324 vattr.va_mtime = np->n_mtim;
3325 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3326 }
3327 }
3328 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3329}
3330
3331/*
3332 * Read wrapper for fifos.
3333 */
3334static int
3335nfsfifo_read(ap)
3336 struct vop_read_args /* {
3337 struct vnode *a_vp;
3338 struct uio *a_uio;
3339 int a_ioflag;
3340 struct ucred *a_cred;
3341 } */ *ap;
3342{
3343 register struct nfsnode *np = VTONFS(ap->a_vp);
3344
3345 /*
3346 * Set access flag.
3347 */
3348 np->n_flag |= NACC;
3349 getnanotime(&np->n_atim);
3350 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3351}
3352
3353/*
3354 * Write wrapper for fifos.
3355 */
3356static int
3357nfsfifo_write(ap)
3358 struct vop_write_args /* {
3359 struct vnode *a_vp;
3360 struct uio *a_uio;
3361 int a_ioflag;
3362 struct ucred *a_cred;
3363 } */ *ap;
3364{
3365 register struct nfsnode *np = VTONFS(ap->a_vp);
3366
3367 /*
3368 * Set update flag.
3369 */
3370 np->n_flag |= NUPD;
3371 getnanotime(&np->n_mtim);
3372 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3373}
3374
3375/*
3376 * Close wrapper for fifos.
3377 *
3378 * Update the times on the nfsnode then do fifo close.
3379 */
3380static int
3381nfsfifo_close(ap)
3382 struct vop_close_args /* {
3383 struct vnode *a_vp;
3384 int a_fflag;
3385 struct ucred *a_cred;
3386 struct proc *a_p;
3387 } */ *ap;
3388{
3389 register struct vnode *vp = ap->a_vp;
3390 register struct nfsnode *np = VTONFS(vp);
3391 struct vattr vattr;
3392 struct timespec ts;
3393
3394 if (np->n_flag & (NACC | NUPD)) {
3395 getnanotime(&ts);
3396 if (np->n_flag & NACC)
3397 np->n_atim = ts;
3398 if (np->n_flag & NUPD)
3399 np->n_mtim = ts;
3400 np->n_flag |= NCHG;
3401 if (vp->v_usecount == 1 &&
3402 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3403 VATTR_NULL(&vattr);
3404 if (np->n_flag & NACC)
3405 vattr.va_atime = np->n_atim;
3406 if (np->n_flag & NUPD)
3407 vattr.va_mtime = np->n_mtim;
3408 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3409 }
3410 }
3411 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3412}
3413