* K&R function cleanup
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
c1cf1e59 38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.14 2003/10/10 22:01:13 dillon Exp $
984263bc
MD
39 */
40
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/buf.h>
55#include <sys/malloc.h>
56#include <sys/mbuf.h>
57#include <sys/namei.h>
58#include <sys/socket.h>
59#include <sys/vnode.h>
60#include <sys/dirent.h>
61#include <sys/fcntl.h>
62#include <sys/lockf.h>
63#include <sys/stat.h>
64#include <sys/sysctl.h>
65#include <sys/conf.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69#include <vm/vm_zone.h>
70
3020e3be
MD
71#include <sys/buf2.h>
72
1f2de5d4 73#include <vfs/fifofs/fifo.h>
984263bc 74
1f2de5d4
MD
75#include "rpcv2.h"
76#include "nfsproto.h"
77#include "nfs.h"
1f2de5d4 78#include "nfsmount.h"
c1cf1e59 79#include "nfsnode.h"
1f2de5d4
MD
80#include "xdr_subs.h"
81#include "nfsm_subs.h"
82#include "nqnfs.h"
984263bc
MD
83
84#include <net/if.h>
85#include <netinet/in.h>
86#include <netinet/in_var.h>
87
88/* Defs */
89#define TRUE 1
90#define FALSE 0
91
92/*
93 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
94 * calls are not in getblk() and brelse() so that they would not be necessary
95 * here.
96 */
97#ifndef B_VMIO
98#define vfs_busy_pages(bp, f)
99#endif
100
a6ee311a
RG
101static int nfsspec_read (struct vop_read_args *);
102static int nfsspec_write (struct vop_write_args *);
103static int nfsfifo_read (struct vop_read_args *);
104static int nfsfifo_write (struct vop_write_args *);
105static int nfsspec_close (struct vop_close_args *);
106static int nfsfifo_close (struct vop_close_args *);
984263bc 107#define nfs_poll vop_nopoll
a6ee311a
RG
108static int nfs_flush (struct vnode *,int,struct thread *,int);
109static int nfs_setattrrpc (struct vnode *,struct vattr *,struct ucred *,struct thread *);
110static int nfs_lookup (struct vop_lookup_args *);
111static int nfs_create (struct vop_create_args *);
112static int nfs_mknod (struct vop_mknod_args *);
113static int nfs_open (struct vop_open_args *);
114static int nfs_close (struct vop_close_args *);
115static int nfs_access (struct vop_access_args *);
116static int nfs_getattr (struct vop_getattr_args *);
117static int nfs_setattr (struct vop_setattr_args *);
118static int nfs_read (struct vop_read_args *);
119static int nfs_mmap (struct vop_mmap_args *);
120static int nfs_fsync (struct vop_fsync_args *);
121static int nfs_remove (struct vop_remove_args *);
122static int nfs_link (struct vop_link_args *);
123static int nfs_rename (struct vop_rename_args *);
124static int nfs_mkdir (struct vop_mkdir_args *);
125static int nfs_rmdir (struct vop_rmdir_args *);
126static int nfs_symlink (struct vop_symlink_args *);
127static int nfs_readdir (struct vop_readdir_args *);
128static int nfs_bmap (struct vop_bmap_args *);
129static int nfs_strategy (struct vop_strategy_args *);
130static int nfs_lookitup (struct vnode *, const char *, int,
131 struct ucred *, struct thread *, struct nfsnode **);
132static int nfs_sillyrename (struct vnode *,struct vnode *,struct componentname *);
133static int nfsspec_access (struct vop_access_args *);
134static int nfs_readlink (struct vop_readlink_args *);
135static int nfs_print (struct vop_print_args *);
136static int nfs_advlock (struct vop_advlock_args *);
137static int nfs_bwrite (struct vop_bwrite_args *);
984263bc
MD
138/*
139 * Global vfs data structures for nfs
140 */
141vop_t **nfsv2_vnodeop_p;
142static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
143 { &vop_default_desc, (vop_t *) vop_defaultop },
144 { &vop_access_desc, (vop_t *) nfs_access },
145 { &vop_advlock_desc, (vop_t *) nfs_advlock },
146 { &vop_bmap_desc, (vop_t *) nfs_bmap },
147 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
148 { &vop_close_desc, (vop_t *) nfs_close },
149 { &vop_create_desc, (vop_t *) nfs_create },
150 { &vop_fsync_desc, (vop_t *) nfs_fsync },
151 { &vop_getattr_desc, (vop_t *) nfs_getattr },
152 { &vop_getpages_desc, (vop_t *) nfs_getpages },
153 { &vop_putpages_desc, (vop_t *) nfs_putpages },
154 { &vop_inactive_desc, (vop_t *) nfs_inactive },
155 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
156 { &vop_lease_desc, (vop_t *) vop_null },
157 { &vop_link_desc, (vop_t *) nfs_link },
158 { &vop_lock_desc, (vop_t *) vop_sharedlock },
159 { &vop_lookup_desc, (vop_t *) nfs_lookup },
160 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
161 { &vop_mknod_desc, (vop_t *) nfs_mknod },
162 { &vop_mmap_desc, (vop_t *) nfs_mmap },
163 { &vop_open_desc, (vop_t *) nfs_open },
164 { &vop_poll_desc, (vop_t *) nfs_poll },
165 { &vop_print_desc, (vop_t *) nfs_print },
166 { &vop_read_desc, (vop_t *) nfs_read },
167 { &vop_readdir_desc, (vop_t *) nfs_readdir },
168 { &vop_readlink_desc, (vop_t *) nfs_readlink },
169 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
170 { &vop_remove_desc, (vop_t *) nfs_remove },
171 { &vop_rename_desc, (vop_t *) nfs_rename },
172 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
173 { &vop_setattr_desc, (vop_t *) nfs_setattr },
174 { &vop_strategy_desc, (vop_t *) nfs_strategy },
175 { &vop_symlink_desc, (vop_t *) nfs_symlink },
176 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
177 { &vop_write_desc, (vop_t *) nfs_write },
178 { NULL, NULL }
179};
180static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
181 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
182VNODEOP_SET(nfsv2_vnodeop_opv_desc);
183
184/*
185 * Special device vnode ops
186 */
187vop_t **spec_nfsv2nodeop_p;
188static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
189 { &vop_default_desc, (vop_t *) spec_vnoperate },
190 { &vop_access_desc, (vop_t *) nfsspec_access },
191 { &vop_close_desc, (vop_t *) nfsspec_close },
192 { &vop_fsync_desc, (vop_t *) nfs_fsync },
193 { &vop_getattr_desc, (vop_t *) nfs_getattr },
194 { &vop_inactive_desc, (vop_t *) nfs_inactive },
195 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
196 { &vop_lock_desc, (vop_t *) vop_sharedlock },
197 { &vop_print_desc, (vop_t *) nfs_print },
198 { &vop_read_desc, (vop_t *) nfsspec_read },
199 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
200 { &vop_setattr_desc, (vop_t *) nfs_setattr },
201 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
202 { &vop_write_desc, (vop_t *) nfsspec_write },
203 { NULL, NULL }
204};
205static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
206 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
207VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
208
209vop_t **fifo_nfsv2nodeop_p;
210static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
211 { &vop_default_desc, (vop_t *) fifo_vnoperate },
212 { &vop_access_desc, (vop_t *) nfsspec_access },
213 { &vop_close_desc, (vop_t *) nfsfifo_close },
214 { &vop_fsync_desc, (vop_t *) nfs_fsync },
215 { &vop_getattr_desc, (vop_t *) nfs_getattr },
216 { &vop_inactive_desc, (vop_t *) nfs_inactive },
217 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
218 { &vop_lock_desc, (vop_t *) vop_sharedlock },
219 { &vop_print_desc, (vop_t *) nfs_print },
220 { &vop_read_desc, (vop_t *) nfsfifo_read },
221 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
222 { &vop_setattr_desc, (vop_t *) nfs_setattr },
223 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
224 { &vop_write_desc, (vop_t *) nfsfifo_write },
225 { NULL, NULL }
226};
227static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
228 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
229VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
230
a6ee311a 231static int nfs_mknodrpc (struct vnode *dvp, struct vnode **vpp,
984263bc 232 struct componentname *cnp,
a6ee311a
RG
233 struct vattr *vap);
234static int nfs_removerpc (struct vnode *dvp, const char *name,
984263bc 235 int namelen,
a6ee311a
RG
236 struct ucred *cred, struct thread *td);
237static int nfs_renamerpc (struct vnode *fdvp, const char *fnameptr,
984263bc
MD
238 int fnamelen, struct vnode *tdvp,
239 const char *tnameptr, int tnamelen,
a6ee311a
RG
240 struct ucred *cred, struct thread *td);
241static int nfs_renameit (struct vnode *sdvp,
984263bc 242 struct componentname *scnp,
a6ee311a 243 struct sillyrename *sp);
984263bc
MD
244
245/*
246 * Global variables
247 */
248extern u_int32_t nfs_true, nfs_false;
249extern u_int32_t nfs_xdrneg1;
250extern struct nfsstats nfsstats;
251extern nfstype nfsv3_type[9];
dadab5e9 252struct thread *nfs_iodwant[NFS_MAXASYNCDAEMON];
984263bc
MD
253struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
254int nfs_numasync = 0;
255#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
256
257SYSCTL_DECL(_vfs_nfs);
258
259static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
260SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
261 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
262
263static int nfsv3_commit_on_close = 0;
264SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
265 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
266#if 0
267SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
268 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
269
270SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
271 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
272#endif
273
274#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
275 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
276 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
277static int
dadab5e9
MD
278nfs3_access_otw(struct vnode *vp, int wmode,
279 struct thread *td, struct ucred *cred)
984263bc
MD
280{
281 const int v3 = 1;
282 u_int32_t *tl;
283 int error = 0, attrflag;
284
285 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
286 caddr_t bpos, dpos, cp2;
40393ded
RG
287 int32_t t1, t2;
288 caddr_t cp;
984263bc
MD
289 u_int32_t rmode;
290 struct nfsnode *np = VTONFS(vp);
291
292 nfsstats.rpccnt[NFSPROC_ACCESS]++;
293 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
294 nfsm_fhtom(vp, v3);
295 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
296 *tl = txdr_unsigned(wmode);
dadab5e9 297 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
984263bc
MD
298 nfsm_postop_attr(vp, attrflag);
299 if (!error) {
300 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
301 rmode = fxdr_unsigned(u_int32_t, *tl);
302 np->n_mode = rmode;
303 np->n_modeuid = cred->cr_uid;
304 np->n_modestamp = time_second;
305 }
306 nfsm_reqdone;
307 return error;
308}
309
310/*
311 * nfs access vnode op.
312 * For nfs version 2, just return ok. File accesses may fail later.
313 * For nfs version 3, use the access rpc to check accessibility. If file modes
314 * are changed on the server, accesses might still fail later.
315 */
316static int
317nfs_access(ap)
318 struct vop_access_args /* {
319 struct vnode *a_vp;
320 int a_mode;
321 struct ucred *a_cred;
dadab5e9 322 struct thread *a_td;
984263bc
MD
323 } */ *ap;
324{
40393ded 325 struct vnode *vp = ap->a_vp;
984263bc
MD
326 int error = 0;
327 u_int32_t mode, wmode;
328 int v3 = NFS_ISV3(vp);
329 struct nfsnode *np = VTONFS(vp);
330
331 /*
332 * Disallow write attempts on filesystems mounted read-only;
333 * unless the file is a socket, fifo, or a block or character
334 * device resident on the filesystem.
335 */
336 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
337 switch (vp->v_type) {
338 case VREG:
339 case VDIR:
340 case VLNK:
341 return (EROFS);
342 default:
343 break;
344 }
345 }
346 /*
347 * For nfs v3, check to see if we have done this recently, and if
348 * so return our cached result instead of making an ACCESS call.
349 * If not, do an access rpc, otherwise you are stuck emulating
350 * ufs_access() locally using the vattr. This may not be correct,
351 * since the server may apply other access criteria such as
352 * client uid-->server uid mapping that we do not know about.
353 */
354 if (v3) {
355 if (ap->a_mode & VREAD)
356 mode = NFSV3ACCESS_READ;
357 else
358 mode = 0;
359 if (vp->v_type != VDIR) {
360 if (ap->a_mode & VWRITE)
361 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
362 if (ap->a_mode & VEXEC)
363 mode |= NFSV3ACCESS_EXECUTE;
364 } else {
365 if (ap->a_mode & VWRITE)
366 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
367 NFSV3ACCESS_DELETE);
368 if (ap->a_mode & VEXEC)
369 mode |= NFSV3ACCESS_LOOKUP;
370 }
371 /* XXX safety belt, only make blanket request if caching */
372 if (nfsaccess_cache_timeout > 0) {
373 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
374 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
375 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
376 } else {
377 wmode = mode;
378 }
379
380 /*
381 * Does our cached result allow us to give a definite yes to
382 * this request?
383 */
384 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
385 (ap->a_cred->cr_uid == np->n_modeuid) &&
386 ((np->n_mode & mode) == mode)) {
387 nfsstats.accesscache_hits++;
388 } else {
389 /*
390 * Either a no, or a don't know. Go to the wire.
391 */
392 nfsstats.accesscache_misses++;
dadab5e9 393 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
984263bc
MD
394 if (!error) {
395 if ((np->n_mode & mode) != mode) {
396 error = EACCES;
397 }
398 }
399 }
984263bc
MD
400 } else {
401 if ((error = nfsspec_access(ap)) != 0)
402 return (error);
403
404 /*
405 * Attempt to prevent a mapped root from accessing a file
406 * which it shouldn't. We try to read a byte from the file
407 * if the user is root and the file is not zero length.
408 * After calling nfsspec_access, we should have the correct
409 * file size cached.
410 */
411 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
412 && VTONFS(vp)->n_size > 0) {
413 struct iovec aiov;
414 struct uio auio;
415 char buf[1];
416
417 aiov.iov_base = buf;
418 aiov.iov_len = 1;
419 auio.uio_iov = &aiov;
420 auio.uio_iovcnt = 1;
421 auio.uio_offset = 0;
422 auio.uio_resid = 1;
423 auio.uio_segflg = UIO_SYSSPACE;
424 auio.uio_rw = UIO_READ;
dadab5e9 425 auio.uio_td = ap->a_td;
984263bc 426
c1cf1e59 427 if (vp->v_type == VREG) {
3b568787 428 error = nfs_readrpc(vp, &auio);
c1cf1e59 429 } else if (vp->v_type == VDIR) {
984263bc
MD
430 char* bp;
431 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
432 aiov.iov_base = bp;
433 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
3b568787 434 error = nfs_readdirrpc(vp, &auio);
984263bc 435 free(bp, M_TEMP);
c1cf1e59 436 } else if (vp->v_type == VLNK) {
3b568787 437 error = nfs_readlinkrpc(vp, &auio);
c1cf1e59 438 } else {
984263bc 439 error = EACCES;
c1cf1e59 440 }
984263bc 441 }
984263bc 442 }
c1cf1e59
MD
443 /*
444 * [re]record creds for reading and/or writing if access
445 * was granted.
446 */
447 if (error == 0) {
448 if ((ap->a_mode & VREAD) && ap->a_cred != np->n_rucred) {
449 crhold(ap->a_cred);
450 if (np->n_rucred)
451 crfree(np->n_rucred);
452 np->n_rucred = ap->a_cred;
453 }
454 if ((ap->a_mode & VWRITE) && ap->a_cred != np->n_wucred) {
455 crhold(ap->a_cred);
456 if (np->n_wucred)
457 crfree(np->n_wucred);
458 np->n_wucred = ap->a_cred;
459 }
460 }
461 return(error);
984263bc
MD
462}
463
464/*
465 * nfs open vnode op
466 * Check to see if the type is ok
467 * and that deletion is not in progress.
468 * For paged in text files, you will need to flush the page cache
469 * if consistency is lost.
470 */
471/* ARGSUSED */
472static int
473nfs_open(ap)
474 struct vop_open_args /* {
475 struct vnode *a_vp;
476 int a_mode;
477 struct ucred *a_cred;
dadab5e9 478 struct thread *a_td;
984263bc
MD
479 } */ *ap;
480{
40393ded 481 struct vnode *vp = ap->a_vp;
984263bc
MD
482 struct nfsnode *np = VTONFS(vp);
483 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
484 struct vattr vattr;
485 int error;
486
487 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
488#ifdef DIAGNOSTIC
489 printf("open eacces vtyp=%d\n",vp->v_type);
490#endif
491 return (EACCES);
492 }
493 /*
494 * Get a valid lease. If cached data is stale, flush it.
495 */
496 if (nmp->nm_flag & NFSMNT_NQNFS) {
497 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
498 do {
3b568787 499 error = nqnfs_getlease(vp, ND_READ, ap->a_td);
984263bc
MD
500 } while (error == NQNFS_EXPIRED);
501 if (error)
502 return (error);
503 if (np->n_lrev != np->n_brev ||
504 (np->n_flag & NQNFSNONCACHE)) {
3b568787
MD
505 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1))
506 == EINTR) {
984263bc 507 return (error);
3b568787 508 }
984263bc
MD
509 np->n_brev = np->n_lrev;
510 }
511 }
512 } else {
513 if (np->n_flag & NMODIFIED) {
3b568787
MD
514 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1))
515 == EINTR) {
984263bc 516 return (error);
3b568787 517 }
984263bc
MD
518 np->n_attrstamp = 0;
519 if (vp->v_type == VDIR)
520 np->n_direofoffset = 0;
3b568787 521 error = VOP_GETATTR(vp, &vattr, ap->a_td);
984263bc
MD
522 if (error)
523 return (error);
524 np->n_mtime = vattr.va_mtime.tv_sec;
525 } else {
3b568787 526 error = VOP_GETATTR(vp, &vattr, ap->a_td);
984263bc
MD
527 if (error)
528 return (error);
529 if (np->n_mtime != vattr.va_mtime.tv_sec) {
530 if (vp->v_type == VDIR)
531 np->n_direofoffset = 0;
532 if ((error = nfs_vinvalbuf(vp, V_SAVE,
3b568787 533 ap->a_td, 1)) == EINTR) {
984263bc 534 return (error);
3b568787 535 }
984263bc
MD
536 np->n_mtime = vattr.va_mtime.tv_sec;
537 }
538 }
539 }
540 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
541 np->n_attrstamp = 0; /* For Open/Close consistency */
542 return (0);
543}
544
545/*
546 * nfs close vnode op
547 * What an NFS client should do upon close after writing is a debatable issue.
548 * Most NFS clients push delayed writes to the server upon close, basically for
549 * two reasons:
550 * 1 - So that any write errors may be reported back to the client process
551 * doing the close system call. By far the two most likely errors are
552 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
553 * 2 - To put a worst case upper bound on cache inconsistency between
554 * multiple clients for the file.
555 * There is also a consistency problem for Version 2 of the protocol w.r.t.
556 * not being able to tell if other clients are writing a file concurrently,
557 * since there is no way of knowing if the changed modify time in the reply
558 * is only due to the write for this client.
559 * (NFS Version 3 provides weak cache consistency data in the reply that
560 * should be sufficient to detect and handle this case.)
561 *
562 * The current code does the following:
563 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
564 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
565 * or commit them (this satisfies 1 and 2 except for the
566 * case where the server crashes after this close but
567 * before the commit RPC, which is felt to be "good
568 * enough". Changing the last argument to nfs_flush() to
569 * a 1 would force a commit operation, if it is felt a
570 * commit is necessary now.
571 * for NQNFS - do nothing now, since 2 is dealt with via leases and
572 * 1 should be dealt with via an fsync() system call for
573 * cases where write errors are important.
574 */
575/* ARGSUSED */
576static int
577nfs_close(ap)
578 struct vop_close_args /* {
579 struct vnodeop_desc *a_desc;
580 struct vnode *a_vp;
581 int a_fflag;
582 struct ucred *a_cred;
dadab5e9 583 struct thread *a_td;
984263bc
MD
584 } */ *ap;
585{
40393ded
RG
586 struct vnode *vp = ap->a_vp;
587 struct nfsnode *np = VTONFS(vp);
984263bc
MD
588 int error = 0;
589
590 if (vp->v_type == VREG) {
591 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
592 (np->n_flag & NMODIFIED)) {
593 if (NFS_ISV3(vp)) {
594 /*
595 * Under NFSv3 we have dirty buffers to dispose of. We
596 * must flush them to the NFS server. We have the option
597 * of waiting all the way through the commit rpc or just
598 * waiting for the initial write. The default is to only
599 * wait through the initial write so the data is in the
600 * server's cache, which is roughly similar to the state
601 * a standard disk subsystem leaves the file in on close().
602 *
603 * We cannot clear the NMODIFIED bit in np->n_flag due to
604 * potential races with other processes, and certainly
605 * cannot clear it if we don't commit.
606 */
607 int cm = nfsv3_commit_on_close ? 1 : 0;
3b568787 608 error = nfs_flush(vp, MNT_WAIT, ap->a_td, cm);
984263bc
MD
609 /* np->n_flag &= ~NMODIFIED; */
610 } else {
3b568787 611 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
984263bc
MD
612 }
613 np->n_attrstamp = 0;
614 }
615 if (np->n_flag & NWRITEERR) {
616 np->n_flag &= ~NWRITEERR;
617 error = np->n_error;
618 }
619 }
620 return (error);
621}
622
623/*
624 * nfs getattr call from vfs.
625 */
626static int
627nfs_getattr(ap)
628 struct vop_getattr_args /* {
629 struct vnode *a_vp;
630 struct vattr *a_vap;
631 struct ucred *a_cred;
dadab5e9 632 struct thread *a_td;
984263bc
MD
633 } */ *ap;
634{
40393ded
RG
635 struct vnode *vp = ap->a_vp;
636 struct nfsnode *np = VTONFS(vp);
637 caddr_t cp;
638 u_int32_t *tl;
639 int32_t t1, t2;
984263bc
MD
640 caddr_t bpos, dpos;
641 int error = 0;
642 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
643 int v3 = NFS_ISV3(vp);
644
645 /*
646 * Update local times for special files.
647 */
648 if (np->n_flag & (NACC | NUPD))
649 np->n_flag |= NCHG;
650 /*
651 * First look in the cache.
652 */
653 if (nfs_getattrcache(vp, ap->a_vap) == 0)
654 return (0);
655
656 if (v3 && nfsaccess_cache_timeout > 0) {
657 nfsstats.accesscache_misses++;
c1cf1e59 658 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
659 if (nfs_getattrcache(vp, ap->a_vap) == 0)
660 return (0);
661 }
662
663 nfsstats.rpccnt[NFSPROC_GETATTR]++;
664 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
665 nfsm_fhtom(vp, v3);
c1cf1e59 666 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
667 if (!error) {
668 nfsm_loadattr(vp, ap->a_vap);
669 }
670 nfsm_reqdone;
671 return (error);
672}
673
674/*
675 * nfs setattr call.
676 */
677static int
678nfs_setattr(ap)
679 struct vop_setattr_args /* {
680 struct vnodeop_desc *a_desc;
681 struct vnode *a_vp;
682 struct vattr *a_vap;
683 struct ucred *a_cred;
dadab5e9 684 struct thread *a_td;
984263bc
MD
685 } */ *ap;
686{
40393ded
RG
687 struct vnode *vp = ap->a_vp;
688 struct nfsnode *np = VTONFS(vp);
689 struct vattr *vap = ap->a_vap;
984263bc
MD
690 int error = 0;
691 u_quad_t tsize;
692
693#ifndef nolint
694 tsize = (u_quad_t)0;
695#endif
696
697 /*
698 * Setting of flags is not supported.
699 */
700 if (vap->va_flags != VNOVAL)
701 return (EOPNOTSUPP);
702
703 /*
704 * Disallow write attempts if the filesystem is mounted read-only.
705 */
706 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
707 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
708 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
709 (vp->v_mount->mnt_flag & MNT_RDONLY))
710 return (EROFS);
711 if (vap->va_size != VNOVAL) {
712 switch (vp->v_type) {
713 case VDIR:
714 return (EISDIR);
715 case VCHR:
716 case VBLK:
717 case VSOCK:
718 case VFIFO:
719 if (vap->va_mtime.tv_sec == VNOVAL &&
720 vap->va_atime.tv_sec == VNOVAL &&
721 vap->va_mode == (mode_t)VNOVAL &&
722 vap->va_uid == (uid_t)VNOVAL &&
723 vap->va_gid == (gid_t)VNOVAL)
724 return (0);
725 vap->va_size = VNOVAL;
726 break;
727 default:
728 /*
729 * Disallow write attempts if the filesystem is
730 * mounted read-only.
731 */
732 if (vp->v_mount->mnt_flag & MNT_RDONLY)
733 return (EROFS);
734
735 /*
736 * We run vnode_pager_setsize() early (why?),
737 * we must set np->n_size now to avoid vinvalbuf
738 * V_SAVE races that might setsize a lower
739 * value.
740 */
741
742 tsize = np->n_size;
3b568787 743 error = nfs_meta_setsize(vp, ap->a_td, vap->va_size);
984263bc
MD
744
745 if (np->n_flag & NMODIFIED) {
746 if (vap->va_size == 0)
3b568787 747 error = nfs_vinvalbuf(vp, 0, ap->a_td, 1);
984263bc 748 else
3b568787 749 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
984263bc
MD
750 if (error) {
751 np->n_size = tsize;
752 vnode_pager_setsize(vp, np->n_size);
753 return (error);
754 }
755 }
b07fc55c
DR
756 /* np->n_size has already been set to vap->va_size
757 * in nfs_meta_setsize(). We must set it again since
758 * nfs_loadattrcache() could be called through
759 * nfs_meta_setsize() and could modify np->n_size.
760 */
761 np->n_vattr.va_size = np->n_size = vap->va_size;
762 };
984263bc
MD
763 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
764 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
765 vp->v_type == VREG &&
3b568787 766 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1)) == EINTR)
984263bc 767 return (error);
dadab5e9 768 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
984263bc
MD
769 if (error && vap->va_size != VNOVAL) {
770 np->n_size = np->n_vattr.va_size = tsize;
771 vnode_pager_setsize(vp, np->n_size);
772 }
773 return (error);
774}
775
776/*
777 * Do an nfs setattr rpc.
778 */
779static int
dadab5e9
MD
780nfs_setattrrpc(struct vnode *vp, struct vattr *vap,
781 struct ucred *cred, struct thread *td)
984263bc 782{
40393ded
RG
783 struct nfsv2_sattr *sp;
784 caddr_t cp;
785 int32_t t1, t2;
984263bc
MD
786 caddr_t bpos, dpos, cp2;
787 u_int32_t *tl;
788 int error = 0, wccflag = NFSV3_WCCRATTR;
789 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
790 int v3 = NFS_ISV3(vp);
791
792 nfsstats.rpccnt[NFSPROC_SETATTR]++;
793 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
794 nfsm_fhtom(vp, v3);
795 if (v3) {
796 nfsm_v3attrbuild(vap, TRUE);
797 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
798 *tl = nfs_false;
799 } else {
800 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
801 if (vap->va_mode == (mode_t)VNOVAL)
802 sp->sa_mode = nfs_xdrneg1;
803 else
804 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
805 if (vap->va_uid == (uid_t)VNOVAL)
806 sp->sa_uid = nfs_xdrneg1;
807 else
808 sp->sa_uid = txdr_unsigned(vap->va_uid);
809 if (vap->va_gid == (gid_t)VNOVAL)
810 sp->sa_gid = nfs_xdrneg1;
811 else
812 sp->sa_gid = txdr_unsigned(vap->va_gid);
813 sp->sa_size = txdr_unsigned(vap->va_size);
814 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
815 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
816 }
dadab5e9 817 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
984263bc
MD
818 if (v3) {
819 nfsm_wcc_data(vp, wccflag);
820 } else
821 nfsm_loadattr(vp, (struct vattr *)0);
822 nfsm_reqdone;
823 return (error);
824}
825
826/*
827 * nfs lookup call, one step at a time...
828 * First look in cache
829 * If not found, unlock the directory nfsnode and do the rpc
830 */
831static int
832nfs_lookup(ap)
833 struct vop_lookup_args /* {
834 struct vnodeop_desc *a_desc;
835 struct vnode *a_dvp;
836 struct vnode **a_vpp;
837 struct componentname *a_cnp;
838 } */ *ap;
839{
840 struct componentname *cnp = ap->a_cnp;
841 struct vnode *dvp = ap->a_dvp;
842 struct vnode **vpp = ap->a_vpp;
843 int flags = cnp->cn_flags;
844 struct vnode *newvp;
845 u_int32_t *tl;
846 caddr_t cp;
847 int32_t t1, t2;
848 struct nfsmount *nmp;
849 caddr_t bpos, dpos, cp2;
850 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
851 long len;
852 nfsfh_t *fhp;
853 struct nfsnode *np;
854 int lockparent, wantparent, error = 0, attrflag, fhsize;
855 int v3 = NFS_ISV3(dvp);
dadab5e9 856 struct thread *td = cnp->cn_td;
984263bc
MD
857
858 *vpp = NULLVP;
2b69e610
MD
859 if ((flags & CNP_ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
860 (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME))
984263bc
MD
861 return (EROFS);
862 if (dvp->v_type != VDIR)
863 return (ENOTDIR);
2b69e610
MD
864 lockparent = flags & CNP_LOCKPARENT;
865 wantparent = flags & (CNP_LOCKPARENT|CNP_WANTPARENT);
984263bc
MD
866 nmp = VFSTONFS(dvp->v_mount);
867 np = VTONFS(dvp);
bc0c094e 868 if ((error = cache_lookup(dvp, NCPNULL, vpp, NCPPNULL, cnp)) && error != ENOENT) {
984263bc
MD
869 struct vattr vattr;
870 int vpid;
871
dadab5e9 872 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
984263bc
MD
873 *vpp = NULLVP;
874 return (error);
875 }
876
877 newvp = *vpp;
878 vpid = newvp->v_id;
879 /*
880 * See the comment starting `Step through' in ufs/ufs_lookup.c
881 * for an explanation of the locking protocol
882 */
883 if (dvp == newvp) {
884 VREF(newvp);
885 error = 0;
2b69e610 886 } else if (flags & CNP_ISDOTDOT) {
dadab5e9
MD
887 VOP_UNLOCK(dvp, 0, td);
888 error = vget(newvp, LK_EXCLUSIVE, td);
2b69e610 889 if (!error && lockparent && (flags & CNP_ISLASTCN))
dadab5e9 890 error = vn_lock(dvp, LK_EXCLUSIVE, td);
984263bc 891 } else {
dadab5e9 892 error = vget(newvp, LK_EXCLUSIVE, td);
2b69e610 893 if (!lockparent || error || !(flags & CNP_ISLASTCN))
dadab5e9 894 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
895 }
896 if (!error) {
897 if (vpid == newvp->v_id) {
3b568787 898 if (!VOP_GETATTR(newvp, &vattr, td)
984263bc
MD
899 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
900 nfsstats.lookupcache_hits++;
2b69e610
MD
901 if (cnp->cn_nameiop != NAMEI_LOOKUP &&
902 (flags & CNP_ISLASTCN))
903 cnp->cn_flags |= CNP_SAVENAME;
984263bc
MD
904 return (0);
905 }
906 cache_purge(newvp);
907 }
908 vput(newvp);
2b69e610 909 if (lockparent && dvp != newvp && (flags & CNP_ISLASTCN))
dadab5e9 910 VOP_UNLOCK(dvp, 0, td);
984263bc 911 }
dadab5e9 912 error = vn_lock(dvp, LK_EXCLUSIVE, td);
984263bc
MD
913 *vpp = NULLVP;
914 if (error)
915 return (error);
916 }
917 error = 0;
918 newvp = NULLVP;
919 nfsstats.lookupcache_misses++;
920 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
921 len = cnp->cn_namelen;
922 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
923 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
924 nfsm_fhtom(dvp, v3);
925 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
dadab5e9 926 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_td, cnp->cn_cred);
984263bc
MD
927 if (error) {
928 nfsm_postop_attr(dvp, attrflag);
929 m_freem(mrep);
930 goto nfsmout;
931 }
932 nfsm_getfh(fhp, fhsize, v3);
933
934 /*
935 * Handle RENAME case...
936 */
2b69e610 937 if (cnp->cn_nameiop == NAMEI_RENAME && wantparent && (flags & CNP_ISLASTCN)) {
984263bc
MD
938 if (NFS_CMPFH(np, fhp, fhsize)) {
939 m_freem(mrep);
940 return (EISDIR);
941 }
942 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
943 if (error) {
944 m_freem(mrep);
945 return (error);
946 }
947 newvp = NFSTOV(np);
948 if (v3) {
949 nfsm_postop_attr(newvp, attrflag);
950 nfsm_postop_attr(dvp, attrflag);
951 } else
952 nfsm_loadattr(newvp, (struct vattr *)0);
953 *vpp = newvp;
954 m_freem(mrep);
2b69e610 955 cnp->cn_flags |= CNP_SAVENAME;
984263bc 956 if (!lockparent)
dadab5e9 957 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
958 return (0);
959 }
960
2b69e610 961 if (flags & CNP_ISDOTDOT) {
dadab5e9 962 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
963 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
964 if (error) {
dadab5e9 965 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
966 return (error);
967 }
968 newvp = NFSTOV(np);
2b69e610 969 if (lockparent && (flags & CNP_ISLASTCN) &&
dadab5e9 970 (error = vn_lock(dvp, LK_EXCLUSIVE, td))) {
984263bc
MD
971 vput(newvp);
972 return (error);
973 }
974 } else if (NFS_CMPFH(np, fhp, fhsize)) {
975 VREF(dvp);
976 newvp = dvp;
977 } else {
978 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
979 if (error) {
980 m_freem(mrep);
981 return (error);
982 }
2b69e610 983 if (!lockparent || !(flags & CNP_ISLASTCN))
dadab5e9 984 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
985 newvp = NFSTOV(np);
986 }
987 if (v3) {
988 nfsm_postop_attr(newvp, attrflag);
989 nfsm_postop_attr(dvp, attrflag);
990 } else
991 nfsm_loadattr(newvp, (struct vattr *)0);
2b69e610
MD
992 if (cnp->cn_nameiop != NAMEI_LOOKUP && (flags & CNP_ISLASTCN))
993 cnp->cn_flags |= CNP_SAVENAME;
994 if ((cnp->cn_flags & CNP_MAKEENTRY) &&
995 (cnp->cn_nameiop != NAMEI_DELETE || !(flags & CNP_ISLASTCN))) {
984263bc 996 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
bc0c094e 997 cache_enter(dvp, NCPNULL, newvp, cnp);
984263bc
MD
998 }
999 *vpp = newvp;
1000 nfsm_reqdone;
1001 if (error) {
1002 if (newvp != NULLVP) {
1003 vrele(newvp);
1004 *vpp = NULLVP;
1005 }
2b69e610
MD
1006 if ((cnp->cn_nameiop == NAMEI_CREATE || cnp->cn_nameiop == NAMEI_RENAME) &&
1007 (flags & CNP_ISLASTCN) && error == ENOENT) {
984263bc 1008 if (!lockparent)
dadab5e9 1009 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
1010 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
1011 error = EROFS;
1012 else
1013 error = EJUSTRETURN;
1014 }
2b69e610
MD
1015 if (cnp->cn_nameiop != NAMEI_LOOKUP && (flags & CNP_ISLASTCN))
1016 cnp->cn_flags |= CNP_SAVENAME;
984263bc
MD
1017 }
1018 return (error);
1019}
1020
1021/*
1022 * nfs read call.
1023 * Just call nfs_bioread() to do the work.
1024 */
1025static int
1026nfs_read(ap)
1027 struct vop_read_args /* {
1028 struct vnode *a_vp;
1029 struct uio *a_uio;
1030 int a_ioflag;
1031 struct ucred *a_cred;
1032 } */ *ap;
1033{
40393ded 1034 struct vnode *vp = ap->a_vp;
984263bc
MD
1035
1036 if (vp->v_type != VREG)
1037 return (EPERM);
3b568787 1038 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag));
984263bc
MD
1039}
1040
1041/*
1042 * nfs readlink call
1043 */
1044static int
1045nfs_readlink(ap)
1046 struct vop_readlink_args /* {
1047 struct vnode *a_vp;
1048 struct uio *a_uio;
1049 struct ucred *a_cred;
1050 } */ *ap;
1051{
40393ded 1052 struct vnode *vp = ap->a_vp;
984263bc
MD
1053
1054 if (vp->v_type != VLNK)
1055 return (EINVAL);
3b568787 1056 return (nfs_bioread(vp, ap->a_uio, 0));
984263bc
MD
1057}
1058
1059/*
1060 * Do a readlink rpc.
1061 * Called by nfs_doio() from below the buffer cache.
1062 */
1063int
3b568787 1064nfs_readlinkrpc(struct vnode *vp, struct uio *uiop)
984263bc 1065{
40393ded
RG
1066 u_int32_t *tl;
1067 caddr_t cp;
1068 int32_t t1, t2;
984263bc
MD
1069 caddr_t bpos, dpos, cp2;
1070 int error = 0, len, attrflag;
1071 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1072 int v3 = NFS_ISV3(vp);
1073
1074 nfsstats.rpccnt[NFSPROC_READLINK]++;
1075 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1076 nfsm_fhtom(vp, v3);
c1cf1e59 1077 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
1078 if (v3)
1079 nfsm_postop_attr(vp, attrflag);
1080 if (!error) {
1081 nfsm_strsiz(len, NFS_MAXPATHLEN);
1082 if (len == NFS_MAXPATHLEN) {
1083 struct nfsnode *np = VTONFS(vp);
1084 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1085 len = np->n_size;
1086 }
1087 nfsm_mtouio(uiop, len);
1088 }
1089 nfsm_reqdone;
1090 return (error);
1091}
1092
1093/*
1094 * nfs read rpc call
1095 * Ditto above
1096 */
1097int
3b568787 1098nfs_readrpc(struct vnode *vp, struct uio *uiop)
984263bc 1099{
40393ded
RG
1100 u_int32_t *tl;
1101 caddr_t cp;
1102 int32_t t1, t2;
984263bc
MD
1103 caddr_t bpos, dpos, cp2;
1104 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1105 struct nfsmount *nmp;
1106 int error = 0, len, retlen, tsiz, eof, attrflag;
1107 int v3 = NFS_ISV3(vp);
1108
1109#ifndef nolint
1110 eof = 0;
1111#endif
1112 nmp = VFSTONFS(vp->v_mount);
1113 tsiz = uiop->uio_resid;
1114 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1115 return (EFBIG);
1116 while (tsiz > 0) {
1117 nfsstats.rpccnt[NFSPROC_READ]++;
1118 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1119 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1120 nfsm_fhtom(vp, v3);
1121 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1122 if (v3) {
1123 txdr_hyper(uiop->uio_offset, tl);
1124 *(tl + 2) = txdr_unsigned(len);
1125 } else {
1126 *tl++ = txdr_unsigned(uiop->uio_offset);
1127 *tl++ = txdr_unsigned(len);
1128 *tl = 0;
1129 }
c1cf1e59 1130 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc
MD
1131 if (v3) {
1132 nfsm_postop_attr(vp, attrflag);
1133 if (error) {
1134 m_freem(mrep);
1135 goto nfsmout;
1136 }
1137 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1138 eof = fxdr_unsigned(int, *(tl + 1));
1139 } else
1140 nfsm_loadattr(vp, (struct vattr *)0);
1141 nfsm_strsiz(retlen, nmp->nm_rsize);
1142 nfsm_mtouio(uiop, retlen);
1143 m_freem(mrep);
1144 tsiz -= retlen;
1145 if (v3) {
1146 if (eof || retlen == 0) {
1147 tsiz = 0;
1148 }
1149 } else if (retlen < len) {
1150 tsiz = 0;
1151 }
1152 }
1153nfsmout:
1154 return (error);
1155}
1156
1157/*
1158 * nfs write call
1159 */
1160int
3b568787 1161nfs_writerpc(vp, uiop, iomode, must_commit)
40393ded
RG
1162 struct vnode *vp;
1163 struct uio *uiop;
984263bc
MD
1164 int *iomode, *must_commit;
1165{
40393ded
RG
1166 u_int32_t *tl;
1167 caddr_t cp;
1168 int32_t t1, t2, backup;
984263bc
MD
1169 caddr_t bpos, dpos, cp2;
1170 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1171 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1172 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1173 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1174
1175#ifndef DIAGNOSTIC
1176 if (uiop->uio_iovcnt != 1)
1177 panic("nfs: writerpc iovcnt > 1");
1178#endif
1179 *must_commit = 0;
1180 tsiz = uiop->uio_resid;
1181 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1182 return (EFBIG);
1183 while (tsiz > 0) {
1184 nfsstats.rpccnt[NFSPROC_WRITE]++;
1185 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1186 nfsm_reqhead(vp, NFSPROC_WRITE,
1187 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1188 nfsm_fhtom(vp, v3);
1189 if (v3) {
1190 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1191 txdr_hyper(uiop->uio_offset, tl);
1192 tl += 2;
1193 *tl++ = txdr_unsigned(len);
1194 *tl++ = txdr_unsigned(*iomode);
1195 *tl = txdr_unsigned(len);
1196 } else {
40393ded 1197 u_int32_t x;
984263bc
MD
1198
1199 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1200 /* Set both "begin" and "current" to non-garbage. */
1201 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1202 *tl++ = x; /* "begin offset" */
1203 *tl++ = x; /* "current offset" */
1204 x = txdr_unsigned(len);
1205 *tl++ = x; /* total to this offset */
1206 *tl = x; /* size of this write */
1207 }
1208 nfsm_uiotom(uiop, len);
c1cf1e59 1209 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, nfs_vpcred(vp, ND_WRITE));
984263bc
MD
1210 if (v3) {
1211 wccflag = NFSV3_WCCCHK;
1212 nfsm_wcc_data(vp, wccflag);
1213 if (!error) {
1214 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1215 + NFSX_V3WRITEVERF);
1216 rlen = fxdr_unsigned(int, *tl++);
1217 if (rlen == 0) {
1218 error = NFSERR_IO;
1219 m_freem(mrep);
1220 break;
1221 } else if (rlen < len) {
1222 backup = len - rlen;
1223 uiop->uio_iov->iov_base -= backup;
1224 uiop->uio_iov->iov_len += backup;
1225 uiop->uio_offset -= backup;
1226 uiop->uio_resid += backup;
1227 len = rlen;
1228 }
1229 commit = fxdr_unsigned(int, *tl++);
1230
1231 /*
1232 * Return the lowest committment level
1233 * obtained by any of the RPCs.
1234 */
1235 if (committed == NFSV3WRITE_FILESYNC)
1236 committed = commit;
1237 else if (committed == NFSV3WRITE_DATASYNC &&
1238 commit == NFSV3WRITE_UNSTABLE)
1239 committed = commit;
1240 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1241 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1242 NFSX_V3WRITEVERF);
1243 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1244 } else if (bcmp((caddr_t)tl,
1245 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1246 *must_commit = 1;
1247 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1248 NFSX_V3WRITEVERF);
1249 }
1250 }
1251 } else
1252 nfsm_loadattr(vp, (struct vattr *)0);
1253 if (wccflag)
1254 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1255 m_freem(mrep);
1256 if (error)
1257 break;
1258 tsiz -= len;
1259 }
1260nfsmout:
1261 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1262 committed = NFSV3WRITE_FILESYNC;
1263 *iomode = committed;
1264 if (error)
1265 uiop->uio_resid = tsiz;
1266 return (error);
1267}
1268
1269/*
1270 * nfs mknod rpc
1271 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1272 * mode set to specify the file type and the size field for rdev.
1273 */
1274static int
1275nfs_mknodrpc(dvp, vpp, cnp, vap)
40393ded
RG
1276 struct vnode *dvp;
1277 struct vnode **vpp;
1278 struct componentname *cnp;
1279 struct vattr *vap;
984263bc 1280{
40393ded
RG
1281 struct nfsv2_sattr *sp;
1282 u_int32_t *tl;
1283 caddr_t cp;
1284 int32_t t1, t2;
984263bc
MD
1285 struct vnode *newvp = (struct vnode *)0;
1286 struct nfsnode *np = (struct nfsnode *)0;
1287 struct vattr vattr;
1288 char *cp2;
1289 caddr_t bpos, dpos;
1290 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1291 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1292 u_int32_t rdev;
1293 int v3 = NFS_ISV3(dvp);
1294
1295 if (vap->va_type == VCHR || vap->va_type == VBLK)
1296 rdev = txdr_unsigned(vap->va_rdev);
1297 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1298 rdev = nfs_xdrneg1;
1299 else {
1300 return (EOPNOTSUPP);
1301 }
3b568787 1302 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1303 return (error);
1304 }
1305 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1306 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1307 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1308 nfsm_fhtom(dvp, v3);
1309 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1310 if (v3) {
1311 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1312 *tl++ = vtonfsv3_type(vap->va_type);
1313 nfsm_v3attrbuild(vap, FALSE);
1314 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1315 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1316 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1317 *tl = txdr_unsigned(uminor(vap->va_rdev));
1318 }
1319 } else {
1320 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1321 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1322 sp->sa_uid = nfs_xdrneg1;
1323 sp->sa_gid = nfs_xdrneg1;
1324 sp->sa_size = rdev;
1325 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1326 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1327 }
dadab5e9 1328 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1329 if (!error) {
1330 nfsm_mtofh(dvp, newvp, v3, gotvp);
1331 if (!gotvp) {
1332 if (newvp) {
1333 vput(newvp);
1334 newvp = (struct vnode *)0;
1335 }
1336 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1337 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1338 if (!error)
1339 newvp = NFSTOV(np);
1340 }
1341 }
1342 if (v3)
1343 nfsm_wcc_data(dvp, wccflag);
1344 nfsm_reqdone;
1345 if (error) {
1346 if (newvp)
1347 vput(newvp);
1348 } else {
2b69e610 1349 if (cnp->cn_flags & CNP_MAKEENTRY)
bc0c094e 1350 cache_enter(dvp, NCPNULL, newvp, cnp);
984263bc
MD
1351 *vpp = newvp;
1352 }
1353 VTONFS(dvp)->n_flag |= NMODIFIED;
1354 if (!wccflag)
1355 VTONFS(dvp)->n_attrstamp = 0;
1356 return (error);
1357}
1358
1359/*
1360 * nfs mknod vop
1361 * just call nfs_mknodrpc() to do the work.
1362 */
1363/* ARGSUSED */
1364static int
1365nfs_mknod(ap)
1366 struct vop_mknod_args /* {
1367 struct vnode *a_dvp;
1368 struct vnode **a_vpp;
1369 struct componentname *a_cnp;
1370 struct vattr *a_vap;
1371 } */ *ap;
1372{
1373 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1374}
1375
1376static u_long create_verf;
1377/*
1378 * nfs file create call
1379 */
1380static int
1381nfs_create(ap)
1382 struct vop_create_args /* {
1383 struct vnode *a_dvp;
1384 struct vnode **a_vpp;
1385 struct componentname *a_cnp;
1386 struct vattr *a_vap;
1387 } */ *ap;
1388{
40393ded
RG
1389 struct vnode *dvp = ap->a_dvp;
1390 struct vattr *vap = ap->a_vap;
1391 struct componentname *cnp = ap->a_cnp;
1392 struct nfsv2_sattr *sp;
1393 u_int32_t *tl;
1394 caddr_t cp;
1395 int32_t t1, t2;
984263bc
MD
1396 struct nfsnode *np = (struct nfsnode *)0;
1397 struct vnode *newvp = (struct vnode *)0;
1398 caddr_t bpos, dpos, cp2;
1399 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1400 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1401 struct vattr vattr;
1402 int v3 = NFS_ISV3(dvp);
1403
1404 /*
1405 * Oops, not for me..
1406 */
1407 if (vap->va_type == VSOCK)
1408 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1409
3b568787 1410 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1411 return (error);
1412 }
1413 if (vap->va_vaflags & VA_EXCLUSIVE)
1414 fmode |= O_EXCL;
1415again:
1416 nfsstats.rpccnt[NFSPROC_CREATE]++;
1417 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1418 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1419 nfsm_fhtom(dvp, v3);
1420 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1421 if (v3) {
1422 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1423 if (fmode & O_EXCL) {
1424 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1425 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1426#ifdef INET
1427 if (!TAILQ_EMPTY(&in_ifaddrhead))
1428 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1429 else
1430#endif
1431 *tl++ = create_verf;
1432 *tl = ++create_verf;
1433 } else {
1434 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1435 nfsm_v3attrbuild(vap, FALSE);
1436 }
1437 } else {
1438 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1439 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1440 sp->sa_uid = nfs_xdrneg1;
1441 sp->sa_gid = nfs_xdrneg1;
1442 sp->sa_size = 0;
1443 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1444 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1445 }
dadab5e9 1446 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1447 if (!error) {
1448 nfsm_mtofh(dvp, newvp, v3, gotvp);
1449 if (!gotvp) {
1450 if (newvp) {
1451 vput(newvp);
1452 newvp = (struct vnode *)0;
1453 }
1454 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1455 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1456 if (!error)
1457 newvp = NFSTOV(np);
1458 }
1459 }
1460 if (v3)
1461 nfsm_wcc_data(dvp, wccflag);
1462 nfsm_reqdone;
1463 if (error) {
1464 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1465 fmode &= ~O_EXCL;
1466 goto again;
1467 }
1468 if (newvp)
1469 vput(newvp);
1470 } else if (v3 && (fmode & O_EXCL)) {
1471 /*
1472 * We are normally called with only a partially initialized
1473 * VAP. Since the NFSv3 spec says that server may use the
1474 * file attributes to store the verifier, the spec requires
1475 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1476 * in atime, but we can't really assume that all servers will
1477 * so we ensure that our SETATTR sets both atime and mtime.
1478 */
1479 if (vap->va_mtime.tv_sec == VNOVAL)
1480 vfs_timestamp(&vap->va_mtime);
1481 if (vap->va_atime.tv_sec == VNOVAL)
1482 vap->va_atime = vap->va_mtime;
dadab5e9 1483 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1484 }
1485 if (!error) {
2b69e610 1486 if (cnp->cn_flags & CNP_MAKEENTRY)
bc0c094e 1487 cache_enter(dvp, NCPNULL, newvp, cnp);
c1cf1e59
MD
1488 /*
1489 * The new np may have enough info for access
1490 * checks, make sure rucred and wucred are
1491 * initialized for read and write rpc's.
1492 */
1493 np = VTONFS(newvp);
1494 if (np->n_rucred == NULL)
1495 np->n_rucred = crhold(cnp->cn_cred);
1496 if (np->n_wucred == NULL)
1497 np->n_wucred = crhold(cnp->cn_cred);
984263bc
MD
1498 *ap->a_vpp = newvp;
1499 }
1500 VTONFS(dvp)->n_flag |= NMODIFIED;
1501 if (!wccflag)
1502 VTONFS(dvp)->n_attrstamp = 0;
1503 return (error);
1504}
1505
1506/*
1507 * nfs file remove call
1508 * To try and make nfs semantics closer to ufs semantics, a file that has
1509 * other processes using the vnode is renamed instead of removed and then
1510 * removed later on the last close.
1511 * - If v_usecount > 1
1512 * If a rename is not already in the works
1513 * call nfs_sillyrename() to set it up
1514 * else
1515 * do the remove rpc
1516 */
1517static int
1518nfs_remove(ap)
1519 struct vop_remove_args /* {
1520 struct vnodeop_desc *a_desc;
1521 struct vnode * a_dvp;
1522 struct vnode * a_vp;
1523 struct componentname * a_cnp;
1524 } */ *ap;
1525{
40393ded
RG
1526 struct vnode *vp = ap->a_vp;
1527 struct vnode *dvp = ap->a_dvp;
1528 struct componentname *cnp = ap->a_cnp;
1529 struct nfsnode *np = VTONFS(vp);
984263bc
MD
1530 int error = 0;
1531 struct vattr vattr;
1532
1533#ifndef DIAGNOSTIC
2b69e610 1534 if ((cnp->cn_flags & CNP_HASBUF) == 0)
984263bc
MD
1535 panic("nfs_remove: no name");
1536 if (vp->v_usecount < 1)
1537 panic("nfs_remove: bad v_usecount");
1538#endif
1539 if (vp->v_type == VDIR)
1540 error = EPERM;
1541 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
3b568787 1542 VOP_GETATTR(vp, &vattr, cnp->cn_td) == 0 &&
984263bc
MD
1543 vattr.va_nlink > 1)) {
1544 /*
1545 * Purge the name cache so that the chance of a lookup for
1546 * the name succeeding while the remove is in progress is
1547 * minimized. Without node locking it can still happen, such
1548 * that an I/O op returns ESTALE, but since you get this if
1549 * another host removes the file..
1550 */
1551 cache_purge(vp);
1552 /*
1553 * throw away biocache buffers, mainly to avoid
1554 * unnecessary delayed writes later.
1555 */
3b568787 1556 error = nfs_vinvalbuf(vp, 0, cnp->cn_td, 1);
984263bc
MD
1557 /* Do the rpc */
1558 if (error != EINTR)
1559 error = nfs_removerpc(dvp, cnp->cn_nameptr,
dadab5e9 1560 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1561 /*
1562 * Kludge City: If the first reply to the remove rpc is lost..
1563 * the reply to the retransmitted request will be ENOENT
1564 * since the file was in fact removed
1565 * Therefore, we cheat and return success.
1566 */
1567 if (error == ENOENT)
1568 error = 0;
1569 } else if (!np->n_sillyrename)
1570 error = nfs_sillyrename(dvp, vp, cnp);
1571 np->n_attrstamp = 0;
1572 return (error);
1573}
1574
1575/*
1576 * nfs file remove rpc called from nfs_inactive
1577 */
1578int
dadab5e9 1579nfs_removeit(struct sillyrename *sp)
984263bc
MD
1580{
1581
dadab5e9
MD
1582 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen,
1583 sp->s_cred, NULL));
984263bc
MD
1584}
1585
1586/*
1587 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1588 */
1589static int
dadab5e9
MD
1590nfs_removerpc(dvp, name, namelen, cred, td)
1591 struct vnode *dvp;
984263bc
MD
1592 const char *name;
1593 int namelen;
1594 struct ucred *cred;
dadab5e9 1595 struct thread *td;
984263bc 1596{
40393ded
RG
1597 u_int32_t *tl;
1598 caddr_t cp;
1599 int32_t t1, t2;
984263bc
MD
1600 caddr_t bpos, dpos, cp2;
1601 int error = 0, wccflag = NFSV3_WCCRATTR;
1602 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1603 int v3 = NFS_ISV3(dvp);
1604
1605 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1606 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1607 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1608 nfsm_fhtom(dvp, v3);
1609 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
dadab5e9 1610 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
984263bc
MD
1611 if (v3)
1612 nfsm_wcc_data(dvp, wccflag);
1613 nfsm_reqdone;
1614 VTONFS(dvp)->n_flag |= NMODIFIED;
1615 if (!wccflag)
1616 VTONFS(dvp)->n_attrstamp = 0;
1617 return (error);
1618}
1619
1620/*
1621 * nfs file rename call
1622 */
1623static int
1624nfs_rename(ap)
1625 struct vop_rename_args /* {
1626 struct vnode *a_fdvp;
1627 struct vnode *a_fvp;
1628 struct componentname *a_fcnp;
1629 struct vnode *a_tdvp;
1630 struct vnode *a_tvp;
1631 struct componentname *a_tcnp;
1632 } */ *ap;
1633{
40393ded
RG
1634 struct vnode *fvp = ap->a_fvp;
1635 struct vnode *tvp = ap->a_tvp;
1636 struct vnode *fdvp = ap->a_fdvp;
1637 struct vnode *tdvp = ap->a_tdvp;
1638 struct componentname *tcnp = ap->a_tcnp;
1639 struct componentname *fcnp = ap->a_fcnp;
984263bc
MD
1640 int error;
1641
1642#ifndef DIAGNOSTIC
2b69e610
MD
1643 if ((tcnp->cn_flags & CNP_HASBUF) == 0 ||
1644 (fcnp->cn_flags & CNP_HASBUF) == 0)
984263bc
MD
1645 panic("nfs_rename: no name");
1646#endif
1647 /* Check for cross-device rename */
1648 if ((fvp->v_mount != tdvp->v_mount) ||
1649 (tvp && (fvp->v_mount != tvp->v_mount))) {
1650 error = EXDEV;
1651 goto out;
1652 }
1653
1654 /*
1655 * We have to flush B_DELWRI data prior to renaming
1656 * the file. If we don't, the delayed-write buffers
1657 * can be flushed out later after the file has gone stale
1658 * under NFSV3. NFSV2 does not have this problem because
1659 * ( as far as I can tell ) it flushes dirty buffers more
1660 * often.
1661 */
1662
3b568787 1663 VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_td);
984263bc 1664 if (tvp)
3b568787 1665 VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_td);
984263bc
MD
1666
1667 /*
1668 * If the tvp exists and is in use, sillyrename it before doing the
1669 * rename of the new file over it.
1670 * XXX Can't sillyrename a directory.
1671 */
1672 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1673 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1674 vput(tvp);
1675 tvp = NULL;
1676 }
1677
1678 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1679 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
dadab5e9 1680 tcnp->cn_td);
984263bc
MD
1681
1682 if (fvp->v_type == VDIR) {
1683 if (tvp != NULL && tvp->v_type == VDIR)
1684 cache_purge(tdvp);
1685 cache_purge(fdvp);
1686 }
1687
1688out:
1689 if (tdvp == tvp)
1690 vrele(tdvp);
1691 else
1692 vput(tdvp);
1693 if (tvp)
1694 vput(tvp);
1695 vrele(fdvp);
1696 vrele(fvp);
1697 /*
1698 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1699 */
1700 if (error == ENOENT)
1701 error = 0;
1702 return (error);
1703}
1704
1705/*
1706 * nfs file rename rpc called from nfs_remove() above
1707 */
1708static int
1709nfs_renameit(sdvp, scnp, sp)
1710 struct vnode *sdvp;
1711 struct componentname *scnp;
40393ded 1712 struct sillyrename *sp;
984263bc
MD
1713{
1714 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
dadab5e9 1715 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_td));
984263bc
MD
1716}
1717
1718/*
1719 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1720 */
1721static int
dadab5e9
MD
1722nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, td)
1723 struct vnode *fdvp;
984263bc
MD
1724 const char *fnameptr;
1725 int fnamelen;
40393ded 1726 struct vnode *tdvp;
984263bc
MD
1727 const char *tnameptr;
1728 int tnamelen;
1729 struct ucred *cred;
dadab5e9 1730 struct thread *td;
984263bc 1731{
40393ded
RG
1732 u_int32_t *tl;
1733 caddr_t cp;
1734 int32_t t1, t2;
984263bc
MD
1735 caddr_t bpos, dpos, cp2;
1736 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1737 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1738 int v3 = NFS_ISV3(fdvp);
1739
1740 nfsstats.rpccnt[NFSPROC_RENAME]++;
1741 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1742 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1743 nfsm_rndup(tnamelen));
1744 nfsm_fhtom(fdvp, v3);
1745 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1746 nfsm_fhtom(tdvp, v3);
1747 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
dadab5e9 1748 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
984263bc
MD
1749 if (v3) {
1750 nfsm_wcc_data(fdvp, fwccflag);
1751 nfsm_wcc_data(tdvp, twccflag);
1752 }
1753 nfsm_reqdone;
1754 VTONFS(fdvp)->n_flag |= NMODIFIED;
1755 VTONFS(tdvp)->n_flag |= NMODIFIED;
1756 if (!fwccflag)
1757 VTONFS(fdvp)->n_attrstamp = 0;
1758 if (!twccflag)
1759 VTONFS(tdvp)->n_attrstamp = 0;
1760 return (error);
1761}
1762
1763/*
1764 * nfs hard link create call
1765 */
1766static int
1767nfs_link(ap)
1768 struct vop_link_args /* {
1769 struct vnode *a_tdvp;
1770 struct vnode *a_vp;
1771 struct componentname *a_cnp;
1772 } */ *ap;
1773{
40393ded
RG
1774 struct vnode *vp = ap->a_vp;
1775 struct vnode *tdvp = ap->a_tdvp;
1776 struct componentname *cnp = ap->a_cnp;
1777 u_int32_t *tl;
1778 caddr_t cp;
1779 int32_t t1, t2;
984263bc
MD
1780 caddr_t bpos, dpos, cp2;
1781 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1782 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1783 int v3;
1784
1785 if (vp->v_mount != tdvp->v_mount) {
1786 return (EXDEV);
1787 }
1788
1789 /*
1790 * Push all writes to the server, so that the attribute cache
1791 * doesn't get "out of sync" with the server.
1792 * XXX There should be a better way!
1793 */
3b568787 1794 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_td);
984263bc
MD
1795
1796 v3 = NFS_ISV3(vp);
1797 nfsstats.rpccnt[NFSPROC_LINK]++;
1798 nfsm_reqhead(vp, NFSPROC_LINK,
1799 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1800 nfsm_fhtom(vp, v3);
1801 nfsm_fhtom(tdvp, v3);
1802 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 1803 nfsm_request(vp, NFSPROC_LINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1804 if (v3) {
1805 nfsm_postop_attr(vp, attrflag);
1806 nfsm_wcc_data(tdvp, wccflag);
1807 }
1808 nfsm_reqdone;
1809 VTONFS(tdvp)->n_flag |= NMODIFIED;
1810 if (!attrflag)
1811 VTONFS(vp)->n_attrstamp = 0;
1812 if (!wccflag)
1813 VTONFS(tdvp)->n_attrstamp = 0;
1814 /*
1815 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1816 */
1817 if (error == EEXIST)
1818 error = 0;
1819 return (error);
1820}
1821
1822/*
1823 * nfs symbolic link create call
1824 */
1825static int
1826nfs_symlink(ap)
1827 struct vop_symlink_args /* {
1828 struct vnode *a_dvp;
1829 struct vnode **a_vpp;
1830 struct componentname *a_cnp;
1831 struct vattr *a_vap;
1832 char *a_target;
1833 } */ *ap;
1834{
40393ded
RG
1835 struct vnode *dvp = ap->a_dvp;
1836 struct vattr *vap = ap->a_vap;
1837 struct componentname *cnp = ap->a_cnp;
1838 struct nfsv2_sattr *sp;
1839 u_int32_t *tl;
1840 caddr_t cp;
1841 int32_t t1, t2;
984263bc
MD
1842 caddr_t bpos, dpos, cp2;
1843 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1844 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1845 struct vnode *newvp = (struct vnode *)0;
1846 int v3 = NFS_ISV3(dvp);
1847
1848 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1849 slen = strlen(ap->a_target);
1850 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1851 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1852 nfsm_fhtom(dvp, v3);
1853 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1854 if (v3) {
1855 nfsm_v3attrbuild(vap, FALSE);
1856 }
1857 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1858 if (!v3) {
1859 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1860 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1861 sp->sa_uid = nfs_xdrneg1;
1862 sp->sa_gid = nfs_xdrneg1;
1863 sp->sa_size = nfs_xdrneg1;
1864 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1865 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1866 }
1867
1868 /*
1869 * Issue the NFS request and get the rpc response.
1870 *
1871 * Only NFSv3 responses returning an error of 0 actually return
1872 * a file handle that can be converted into newvp without having
1873 * to do an extra lookup rpc.
1874 */
dadab5e9 1875 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1876 if (v3) {
1877 if (error == 0)
1878 nfsm_mtofh(dvp, newvp, v3, gotvp);
1879 nfsm_wcc_data(dvp, wccflag);
1880 }
1881
1882 /*
1883 * out code jumps -> here, mrep is also freed.
1884 */
1885
1886 nfsm_reqdone;
1887
1888 /*
1889 * If we get an EEXIST error, silently convert it to no-error
1890 * in case of an NFS retry.
1891 */
1892 if (error == EEXIST)
1893 error = 0;
1894
1895 /*
1896 * If we do not have (or no longer have) an error, and we could
1897 * not extract the newvp from the response due to the request being
1898 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1899 * to obtain a newvp to return.
1900 */
1901 if (error == 0 && newvp == NULL) {
1902 struct nfsnode *np = NULL;
1903
1904 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
dadab5e9 1905 cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1906 if (!error)
1907 newvp = NFSTOV(np);
1908 }
1909 if (error) {
1910 if (newvp)
1911 vput(newvp);
1912 } else {
1913 *ap->a_vpp = newvp;
1914 }
1915 VTONFS(dvp)->n_flag |= NMODIFIED;
1916 if (!wccflag)
1917 VTONFS(dvp)->n_attrstamp = 0;
1918 return (error);
1919}
1920
1921/*
1922 * nfs make dir call
1923 */
1924static int
1925nfs_mkdir(ap)
1926 struct vop_mkdir_args /* {
1927 struct vnode *a_dvp;
1928 struct vnode **a_vpp;
1929 struct componentname *a_cnp;
1930 struct vattr *a_vap;
1931 } */ *ap;
1932{
40393ded
RG
1933 struct vnode *dvp = ap->a_dvp;
1934 struct vattr *vap = ap->a_vap;
1935 struct componentname *cnp = ap->a_cnp;
1936 struct nfsv2_sattr *sp;
1937 u_int32_t *tl;
1938 caddr_t cp;
1939 int32_t t1, t2;
1940 int len;
984263bc
MD
1941 struct nfsnode *np = (struct nfsnode *)0;
1942 struct vnode *newvp = (struct vnode *)0;
1943 caddr_t bpos, dpos, cp2;
1944 int error = 0, wccflag = NFSV3_WCCRATTR;
1945 int gotvp = 0;
1946 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1947 struct vattr vattr;
1948 int v3 = NFS_ISV3(dvp);
1949
3b568787 1950 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1951 return (error);
1952 }
1953 len = cnp->cn_namelen;
1954 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1955 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1956 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1957 nfsm_fhtom(dvp, v3);
1958 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1959 if (v3) {
1960 nfsm_v3attrbuild(vap, FALSE);
1961 } else {
1962 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1963 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1964 sp->sa_uid = nfs_xdrneg1;
1965 sp->sa_gid = nfs_xdrneg1;
1966 sp->sa_size = nfs_xdrneg1;
1967 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1968 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1969 }
dadab5e9 1970 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1971 if (!error)
1972 nfsm_mtofh(dvp, newvp, v3, gotvp);
1973 if (v3)
1974 nfsm_wcc_data(dvp, wccflag);
1975 nfsm_reqdone;
1976 VTONFS(dvp)->n_flag |= NMODIFIED;
1977 if (!wccflag)
1978 VTONFS(dvp)->n_attrstamp = 0;
1979 /*
1980 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1981 * if we can succeed in looking up the directory.
1982 */
1983 if (error == EEXIST || (!error && !gotvp)) {
1984 if (newvp) {
1985 vrele(newvp);
1986 newvp = (struct vnode *)0;
1987 }
1988 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
dadab5e9 1989 cnp->cn_td, &np);
984263bc
MD
1990 if (!error) {
1991 newvp = NFSTOV(np);
1992 if (newvp->v_type != VDIR)
1993 error = EEXIST;
1994 }
1995 }
1996 if (error) {
1997 if (newvp)
1998 vrele(newvp);
1999 } else
2000 *ap->a_vpp = newvp;
2001 return (error);
2002}
2003
2004/*
2005 * nfs remove directory call
2006 */
2007static int
2008nfs_rmdir(ap)
2009 struct vop_rmdir_args /* {
2010 struct vnode *a_dvp;
2011 struct vnode *a_vp;
2012 struct componentname *a_cnp;
2013 } */ *ap;
2014{
40393ded
RG
2015 struct vnode *vp = ap->a_vp;
2016 struct vnode *dvp = ap->a_dvp;
2017 struct componentname *cnp = ap->a_cnp;
2018 u_int32_t *tl;
2019 caddr_t cp;
2020 int32_t t1, t2;
984263bc
MD
2021 caddr_t bpos, dpos, cp2;
2022 int error = 0, wccflag = NFSV3_WCCRATTR;
2023 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2024 int v3 = NFS_ISV3(dvp);
2025
2026 if (dvp == vp)
2027 return (EINVAL);
2028 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2029 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2030 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2031 nfsm_fhtom(dvp, v3);
2032 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 2033 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
2034 if (v3)
2035 nfsm_wcc_data(dvp, wccflag);
2036 nfsm_reqdone;
2037 VTONFS(dvp)->n_flag |= NMODIFIED;
2038 if (!wccflag)
2039 VTONFS(dvp)->n_attrstamp = 0;
2040 cache_purge(dvp);
2041 cache_purge(vp);
2042 /*
2043 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2044 */
2045 if (error == ENOENT)
2046 error = 0;
2047 return (error);
2048}
2049
2050/*
2051 * nfs readdir call
2052 */
2053static int
2054nfs_readdir(ap)
2055 struct vop_readdir_args /* {
2056 struct vnode *a_vp;
2057 struct uio *a_uio;
2058 struct ucred *a_cred;
2059 } */ *ap;
2060{
40393ded
RG
2061 struct vnode *vp = ap->a_vp;
2062 struct nfsnode *np = VTONFS(vp);
2063 struct uio *uio = ap->a_uio;
984263bc
MD
2064 int tresid, error;
2065 struct vattr vattr;
2066
2067 if (vp->v_type != VDIR)
2068 return (EPERM);
2069 /*
2070 * First, check for hit on the EOF offset cache
2071 */
2072 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2073 (np->n_flag & NMODIFIED) == 0) {
2074 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
2075 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
2076 nfsstats.direofcache_hits++;
2077 return (0);
2078 }
3b568787 2079 } else if (VOP_GETATTR(vp, &vattr, uio->uio_td) == 0 &&
984263bc
MD
2080 np->n_mtime == vattr.va_mtime.tv_sec) {
2081 nfsstats.direofcache_hits++;
2082 return (0);
2083 }
2084 }
2085
2086 /*
2087 * Call nfs_bioread() to do the real work.
2088 */
2089 tresid = uio->uio_resid;
3b568787 2090 error = nfs_bioread(vp, uio, 0);
984263bc
MD
2091
2092 if (!error && uio->uio_resid == tresid)
2093 nfsstats.direofcache_misses++;
2094 return (error);
2095}
2096
2097/*
2098 * Readdir rpc call.
2099 * Called from below the buffer cache by nfs_doio().
2100 */
2101int
3b568787 2102nfs_readdirrpc(struct vnode *vp, struct uio *uiop)
984263bc 2103{
40393ded
RG
2104 int len, left;
2105 struct dirent *dp = NULL;
2106 u_int32_t *tl;
2107 caddr_t cp;
2108 int32_t t1, t2;
2109 nfsuint64 *cookiep;
984263bc
MD
2110 caddr_t bpos, dpos, cp2;
2111 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2112 nfsuint64 cookie;
2113 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2114 struct nfsnode *dnp = VTONFS(vp);
2115 u_quad_t fileno;
2116 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2117 int attrflag;
2118 int v3 = NFS_ISV3(vp);
2119
2120#ifndef DIAGNOSTIC
2121 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2122 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2123 panic("nfs readdirrpc bad uio");
2124#endif
2125
2126 /*
2127 * If there is no cookie, assume directory was stale.
2128 */
2129 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2130 if (cookiep)
2131 cookie = *cookiep;
2132 else
2133 return (NFSERR_BAD_COOKIE);
2134 /*
2135 * Loop around doing readdir rpc's of size nm_readdirsize
2136 * truncated to a multiple of DIRBLKSIZ.
2137 * The stopping criteria is EOF or buffer full.
2138 */
2139 while (more_dirs && bigenough) {
2140 nfsstats.rpccnt[NFSPROC_READDIR]++;
2141 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2142 NFSX_READDIR(v3));
2143 nfsm_fhtom(vp, v3);
2144 if (v3) {
2145 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2146 *tl++ = cookie.nfsuquad[0];
2147 *tl++ = cookie.nfsuquad[1];
2148 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2149 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2150 } else {
2151 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2152 *tl++ = cookie.nfsuquad[0];
2153 }
2154 *tl = txdr_unsigned(nmp->nm_readdirsize);
c1cf1e59 2155 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc
MD
2156 if (v3) {
2157 nfsm_postop_attr(vp, attrflag);
2158 if (!error) {
2159 nfsm_dissect(tl, u_int32_t *,
2160 2 * NFSX_UNSIGNED);
2161 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2162 dnp->n_cookieverf.nfsuquad[1] = *tl;
2163 } else {
2164 m_freem(mrep);
2165 goto nfsmout;
2166 }
2167 }
2168 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2169 more_dirs = fxdr_unsigned(int, *tl);
2170
2171 /* loop thru the dir entries, doctoring them to 4bsd form */
2172 while (more_dirs && bigenough) {
2173 if (v3) {
2174 nfsm_dissect(tl, u_int32_t *,
2175 3 * NFSX_UNSIGNED);
2176 fileno = fxdr_hyper(tl);
2177 len = fxdr_unsigned(int, *(tl + 2));
2178 } else {
2179 nfsm_dissect(tl, u_int32_t *,
2180 2 * NFSX_UNSIGNED);
2181 fileno = fxdr_unsigned(u_quad_t, *tl++);
2182 len = fxdr_unsigned(int, *tl);
2183 }
2184 if (len <= 0 || len > NFS_MAXNAMLEN) {
2185 error = EBADRPC;
2186 m_freem(mrep);
2187 goto nfsmout;
2188 }
2189 tlen = nfsm_rndup(len);
2190 if (tlen == len)
2191 tlen += 4; /* To ensure null termination */
2192 left = DIRBLKSIZ - blksiz;
2193 if ((tlen + DIRHDSIZ) > left) {
2194 dp->d_reclen += left;
2195 uiop->uio_iov->iov_base += left;
2196 uiop->uio_iov->iov_len -= left;
2197 uiop->uio_offset += left;
2198 uiop->uio_resid -= left;
2199 blksiz = 0;
2200 }
2201 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2202 bigenough = 0;
2203 if (bigenough) {
2204 dp = (struct dirent *)uiop->uio_iov->iov_base;
2205 dp->d_fileno = (int)fileno;
2206 dp->d_namlen = len;
2207 dp->d_reclen = tlen + DIRHDSIZ;
2208 dp->d_type = DT_UNKNOWN;
2209 blksiz += dp->d_reclen;
2210 if (blksiz == DIRBLKSIZ)
2211 blksiz = 0;
2212 uiop->uio_offset += DIRHDSIZ;
2213 uiop->uio_resid -= DIRHDSIZ;
2214 uiop->uio_iov->iov_base += DIRHDSIZ;
2215 uiop->uio_iov->iov_len -= DIRHDSIZ;
2216 nfsm_mtouio(uiop, len);
2217 cp = uiop->uio_iov->iov_base;
2218 tlen -= len;
2219 *cp = '\0'; /* null terminate */
2220 uiop->uio_iov->iov_base += tlen;
2221 uiop->uio_iov->iov_len -= tlen;
2222 uiop->uio_offset += tlen;
2223 uiop->uio_resid -= tlen;
2224 } else
2225 nfsm_adv(nfsm_rndup(len));
2226 if (v3) {
2227 nfsm_dissect(tl, u_int32_t *,
2228 3 * NFSX_UNSIGNED);
2229 } else {
2230 nfsm_dissect(tl, u_int32_t *,
2231 2 * NFSX_UNSIGNED);
2232 }
2233 if (bigenough) {
2234 cookie.nfsuquad[0] = *tl++;
2235 if (v3)
2236 cookie.nfsuquad[1] = *tl++;
2237 } else if (v3)
2238 tl += 2;
2239 else
2240 tl++;
2241 more_dirs = fxdr_unsigned(int, *tl);
2242 }
2243 /*
2244 * If at end of rpc data, get the eof boolean
2245 */
2246 if (!more_dirs) {
2247 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2248 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2249 }
2250 m_freem(mrep);
2251 }
2252 /*
2253 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2254 * by increasing d_reclen for the last record.
2255 */
2256 if (blksiz > 0) {
2257 left = DIRBLKSIZ - blksiz;
2258 dp->d_reclen += left;
2259 uiop->uio_iov->iov_base += left;
2260 uiop->uio_iov->iov_len -= left;
2261 uiop->uio_offset += left;
2262 uiop->uio_resid -= left;
2263 }
2264
2265 /*
2266 * We are now either at the end of the directory or have filled the
2267 * block.
2268 */
2269 if (bigenough)
2270 dnp->n_direofoffset = uiop->uio_offset;
2271 else {
2272 if (uiop->uio_resid > 0)
2273 printf("EEK! readdirrpc resid > 0\n");
2274 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2275 *cookiep = cookie;
2276 }
2277nfsmout:
2278 return (error);
2279}
2280
2281/*
2282 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2283 */
2284int
3b568787 2285nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop)
984263bc 2286{
40393ded
RG
2287 int len, left;
2288 struct dirent *dp;
2289 u_int32_t *tl;
2290 caddr_t cp;
2291 int32_t t1, t2;
2292 struct vnode *newvp;
2293 nfsuint64 *cookiep;
984263bc
MD
2294 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2295 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2296 struct nameidata nami, *ndp = &nami;
2297 struct componentname *cnp = &ndp->ni_cnd;
2298 nfsuint64 cookie;
2299 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2300 struct nfsnode *dnp = VTONFS(vp), *np;
2301 nfsfh_t *fhp;
2302 u_quad_t fileno;
2303 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2304 int attrflag, fhsize;
2305
2306#ifndef nolint
2307 dp = (struct dirent *)0;
2308#endif
2309#ifndef DIAGNOSTIC
2310 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2311 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2312 panic("nfs readdirplusrpc bad uio");
2313#endif
2314 ndp->ni_dvp = vp;
2315 newvp = NULLVP;
2316
2317 /*
2318 * If there is no cookie, assume directory was stale.
2319 */
2320 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2321 if (cookiep)
2322 cookie = *cookiep;
2323 else
2324 return (NFSERR_BAD_COOKIE);
2325 /*
2326 * Loop around doing readdir rpc's of size nm_readdirsize
2327 * truncated to a multiple of DIRBLKSIZ.
2328 * The stopping criteria is EOF or buffer full.
2329 */
2330 while (more_dirs && bigenough) {
2331 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2332 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2333 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2334 nfsm_fhtom(vp, 1);
2335 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2336 *tl++ = cookie.nfsuquad[0];
2337 *tl++ = cookie.nfsuquad[1];
2338 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2339 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2340 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2341 *tl = txdr_unsigned(nmp->nm_rsize);
c1cf1e59 2342 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc
MD
2343 nfsm_postop_attr(vp, attrflag);
2344 if (error) {
2345 m_freem(mrep);
2346 goto nfsmout;
2347 }
2348 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2349 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2350 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2351 more_dirs = fxdr_unsigned(int, *tl);
2352
2353 /* loop thru the dir entries, doctoring them to 4bsd form */
2354 while (more_dirs && bigenough) {
2355 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2356 fileno = fxdr_hyper(tl);
2357 len = fxdr_unsigned(int, *(tl + 2));
2358 if (len <= 0 || len > NFS_MAXNAMLEN) {
2359 error = EBADRPC;
2360 m_freem(mrep);
2361 goto nfsmout;
2362 }
2363 tlen = nfsm_rndup(len);
2364 if (tlen == len)
2365 tlen += 4; /* To ensure null termination*/
2366 left = DIRBLKSIZ - blksiz;
2367 if ((tlen + DIRHDSIZ) > left) {
2368 dp->d_reclen += left;
2369 uiop->uio_iov->iov_base += left;
2370 uiop->uio_iov->iov_len -= left;
2371 uiop->uio_offset += left;
2372 uiop->uio_resid -= left;
2373 blksiz = 0;
2374 }
2375 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2376 bigenough = 0;
2377 if (bigenough) {
2378 dp = (struct dirent *)uiop->uio_iov->iov_base;
2379 dp->d_fileno = (int)fileno;
2380 dp->d_namlen = len;
2381 dp->d_reclen = tlen + DIRHDSIZ;
2382 dp->d_type = DT_UNKNOWN;
2383 blksiz += dp->d_reclen;
2384 if (blksiz == DIRBLKSIZ)
2385 blksiz = 0;
2386 uiop->uio_offset += DIRHDSIZ;
2387 uiop->uio_resid -= DIRHDSIZ;
2388 uiop->uio_iov->iov_base += DIRHDSIZ;
2389 uiop->uio_iov->iov_len -= DIRHDSIZ;
2390 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2391 cnp->cn_namelen = len;
2392 nfsm_mtouio(uiop, len);
2393 cp = uiop->uio_iov->iov_base;
2394 tlen -= len;
2395 *cp = '\0';
2396 uiop->uio_iov->iov_base += tlen;
2397 uiop->uio_iov->iov_len -= tlen;
2398 uiop->uio_offset += tlen;
2399 uiop->uio_resid -= tlen;
2400 } else
2401 nfsm_adv(nfsm_rndup(len));
2402 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2403 if (bigenough) {
2404 cookie.nfsuquad[0] = *tl++;
2405 cookie.nfsuquad[1] = *tl++;
2406 } else
2407 tl += 2;
2408
2409 /*
2410 * Since the attributes are before the file handle
2411 * (sigh), we must skip over the attributes and then
2412 * come back and get them.
2413 */
2414 attrflag = fxdr_unsigned(int, *tl);
2415 if (attrflag) {
2416 dpossav1 = dpos;
2417 mdsav1 = md;
2418 nfsm_adv(NFSX_V3FATTR);
2419 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2420 doit = fxdr_unsigned(int, *tl);
2421 if (doit) {
2422 nfsm_getfh(fhp, fhsize, 1);
2423 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2424 VREF(vp);
2425 newvp = vp;
2426 np = dnp;
2427 } else {
2428 error = nfs_nget(vp->v_mount, fhp,
2429 fhsize, &np);
2430 if (error)
2431 doit = 0;
2432 else
2433 newvp = NFSTOV(np);
2434 }
2435 }
2436 if (doit && bigenough) {
2437 dpossav2 = dpos;
2438 dpos = dpossav1;
2439 mdsav2 = md;
2440 md = mdsav1;
2441 nfsm_loadattr(newvp, (struct vattr *)0);
2442 dpos = dpossav2;
2443 md = mdsav2;
2444 dp->d_type =
2445 IFTODT(VTTOIF(np->n_vattr.va_type));
2446 ndp->ni_vp = newvp;
bc0c094e 2447 cache_enter(ndp->ni_dvp, NCPNULL, ndp->ni_vp, cnp);
984263bc
MD
2448 }
2449 } else {
2450 /* Just skip over the file handle */
2451 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2452 i = fxdr_unsigned(int, *tl);
2453 nfsm_adv(nfsm_rndup(i));
2454 }
2455 if (newvp != NULLVP) {
2456 if (newvp == vp)
2457 vrele(newvp);
2458 else
2459 vput(newvp);
2460 newvp = NULLVP;
2461 }
2462 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2463 more_dirs = fxdr_unsigned(int, *tl);
2464 }
2465 /*
2466 * If at end of rpc data, get the eof boolean
2467 */
2468 if (!more_dirs) {
2469 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2470 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2471 }
2472 m_freem(mrep);
2473 }
2474 /*
2475 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2476 * by increasing d_reclen for the last record.
2477 */
2478 if (blksiz > 0) {
2479 left = DIRBLKSIZ - blksiz;
2480 dp->d_reclen += left;
2481 uiop->uio_iov->iov_base += left;
2482 uiop->uio_iov->iov_len -= left;
2483 uiop->uio_offset += left;
2484 uiop->uio_resid -= left;
2485 }
2486
2487 /*
2488 * We are now either at the end of the directory or have filled the
2489 * block.
2490 */
2491 if (bigenough)
2492 dnp->n_direofoffset = uiop->uio_offset;
2493 else {
2494 if (uiop->uio_resid > 0)
2495 printf("EEK! readdirplusrpc resid > 0\n");
2496 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2497 *cookiep = cookie;
2498 }
2499nfsmout:
2500 if (newvp != NULLVP) {
2501 if (newvp == vp)
2502 vrele(newvp);
2503 else
2504 vput(newvp);
2505 newvp = NULLVP;
2506 }
2507 return (error);
2508}
2509
2510/*
2511 * Silly rename. To make the NFS filesystem that is stateless look a little
2512 * more like the "ufs" a remove of an active vnode is translated to a rename
2513 * to a funny looking filename that is removed by nfs_inactive on the
2514 * nfsnode. There is the potential for another process on a different client
2515 * to create the same funny name between the nfs_lookitup() fails and the
2516 * nfs_rename() completes, but...
2517 */
2518static int
2519nfs_sillyrename(dvp, vp, cnp)
2520 struct vnode *dvp, *vp;
2521 struct componentname *cnp;
2522{
40393ded 2523 struct sillyrename *sp;
984263bc
MD
2524 struct nfsnode *np;
2525 int error;
984263bc
MD
2526
2527 cache_purge(dvp);
2528 np = VTONFS(vp);
2529#ifndef DIAGNOSTIC
2530 if (vp->v_type == VDIR)
2531 panic("nfs: sillyrename dir");
2532#endif
2533 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2534 M_NFSREQ, M_WAITOK);
2535 sp->s_cred = crdup(cnp->cn_cred);
2536 sp->s_dvp = dvp;
2537 VREF(dvp);
2538
2539 /* Fudge together a funny name */
dadab5e9 2540 sp->s_namlen = sprintf(sp->s_name, ".nfsA%08x4.4", (int)cnp->cn_td);
984263bc
MD
2541
2542 /* Try lookitups until we get one that isn't there */
2543 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2544 cnp->cn_td, (struct nfsnode **)0) == 0) {
984263bc
MD
2545 sp->s_name[4]++;
2546 if (sp->s_name[4] > 'z') {
2547 error = EINVAL;
2548 goto bad;
2549 }
2550 }
2551 error = nfs_renameit(dvp, cnp, sp);
2552 if (error)
2553 goto bad;
2554 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2555 cnp->cn_td, &np);
984263bc
MD
2556 np->n_sillyrename = sp;
2557 return (0);
2558bad:
2559 vrele(sp->s_dvp);
2560 crfree(sp->s_cred);
2561 free((caddr_t)sp, M_NFSREQ);
2562 return (error);
2563}
2564
2565/*
2566 * Look up a file name and optionally either update the file handle or
2567 * allocate an nfsnode, depending on the value of npp.
2568 * npp == NULL --> just do the lookup
2569 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2570 * handled too
2571 * *npp != NULL --> update the file handle in the vnode
2572 */
2573static int
dadab5e9 2574nfs_lookitup(dvp, name, len, cred, td, npp)
40393ded 2575 struct vnode *dvp;
984263bc
MD
2576 const char *name;
2577 int len;
2578 struct ucred *cred;
dadab5e9 2579 struct thread *td;
984263bc
MD
2580 struct nfsnode **npp;
2581{
40393ded
RG
2582 u_int32_t *tl;
2583 caddr_t cp;
2584 int32_t t1, t2;
984263bc
MD
2585 struct vnode *newvp = (struct vnode *)0;
2586 struct nfsnode *np, *dnp = VTONFS(dvp);
2587 caddr_t bpos, dpos, cp2;
2588 int error = 0, fhlen, attrflag;
2589 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2590 nfsfh_t *nfhp;
2591 int v3 = NFS_ISV3(dvp);
2592
2593 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2594 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2595 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2596 nfsm_fhtom(dvp, v3);
2597 nfsm_strtom(name, len, NFS_MAXNAMLEN);
dadab5e9 2598 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
984263bc
MD
2599 if (npp && !error) {
2600 nfsm_getfh(nfhp, fhlen, v3);
2601 if (*npp) {
2602 np = *npp;
2603 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2604 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2605 np->n_fhp = &np->n_fh;
2606 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2607 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2608 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2609 np->n_fhsize = fhlen;
2610 newvp = NFSTOV(np);
2611 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2612 VREF(dvp);
2613 newvp = dvp;
2614 } else {
2615 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2616 if (error) {
2617 m_freem(mrep);
2618 return (error);
2619 }
2620 newvp = NFSTOV(np);
2621 }
2622 if (v3) {
2623 nfsm_postop_attr(newvp, attrflag);
2624 if (!attrflag && *npp == NULL) {
2625 m_freem(mrep);
2626 if (newvp == dvp)
2627 vrele(newvp);
2628 else
2629 vput(newvp);
2630 return (ENOENT);
2631 }
2632 } else
2633 nfsm_loadattr(newvp, (struct vattr *)0);
2634 }
2635 nfsm_reqdone;
2636 if (npp && *npp == NULL) {
2637 if (error) {
2638 if (newvp) {
2639 if (newvp == dvp)
2640 vrele(newvp);
2641 else
2642 vput(newvp);
2643 }
2644 } else
2645 *npp = np;
2646 }
2647 return (error);
2648}
2649
2650/*
2651 * Nfs Version 3 commit rpc
2652 */
2653int
3b568787 2654nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct thread *td)
984263bc 2655{
40393ded
RG
2656 caddr_t cp;
2657 u_int32_t *tl;
2658 int32_t t1, t2;
2659 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
984263bc
MD
2660 caddr_t bpos, dpos, cp2;
2661 int error = 0, wccflag = NFSV3_WCCRATTR;
2662 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2663
2664 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2665 return (0);
2666 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2667 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2668 nfsm_fhtom(vp, 1);
2669 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2670 txdr_hyper(offset, tl);
2671 tl += 2;
2672 *tl = txdr_unsigned(cnt);
c1cf1e59 2673 nfsm_request(vp, NFSPROC_COMMIT, td, nfs_vpcred(vp, ND_WRITE));
984263bc
MD
2674 nfsm_wcc_data(vp, wccflag);
2675 if (!error) {
2676 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2677 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2678 NFSX_V3WRITEVERF)) {
2679 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2680 NFSX_V3WRITEVERF);
2681 error = NFSERR_STALEWRITEVERF;
2682 }
2683 }
2684 nfsm_reqdone;
2685 return (error);
2686}
2687
2688/*
2689 * Kludge City..
2690 * - make nfs_bmap() essentially a no-op that does no translation
2691 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2692 * (Maybe I could use the process's page mapping, but I was concerned that
2693 * Kernel Write might not be enabled and also figured copyout() would do
2694 * a lot more work than bcopy() and also it currently happens in the
2695 * context of the swapper process (2).
2696 */
2697static int
2698nfs_bmap(ap)
2699 struct vop_bmap_args /* {
2700 struct vnode *a_vp;
2701 daddr_t a_bn;
2702 struct vnode **a_vpp;
2703 daddr_t *a_bnp;
2704 int *a_runp;
2705 int *a_runb;
2706 } */ *ap;
2707{
40393ded 2708 struct vnode *vp = ap->a_vp;
984263bc
MD
2709
2710 if (ap->a_vpp != NULL)
2711 *ap->a_vpp = vp;
2712 if (ap->a_bnp != NULL)
2713 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2714 if (ap->a_runp != NULL)
2715 *ap->a_runp = 0;
2716 if (ap->a_runb != NULL)
2717 *ap->a_runb = 0;
2718 return (0);
2719}
2720
2721/*
2722 * Strategy routine.
2723 * For async requests when nfsiod(s) are running, queue the request by
2724 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2725 * request.
2726 */
2727static int
2728nfs_strategy(ap)
2729 struct vop_strategy_args *ap;
2730{
40393ded 2731 struct buf *bp = ap->a_bp;
dadab5e9 2732 struct thread *td;
984263bc
MD
2733 int error = 0;
2734
2735 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2736 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2737
2738 if (bp->b_flags & B_PHYS)
2739 panic("nfs physio");
2740
2741 if (bp->b_flags & B_ASYNC)
dadab5e9 2742 td = NULL;
984263bc 2743 else
dadab5e9 2744 td = curthread; /* XXX */
984263bc 2745
984263bc
MD
2746 /*
2747 * If the op is asynchronous and an i/o daemon is waiting
2748 * queue the request, wake it up and wait for completion
2749 * otherwise just do it ourselves.
2750 */
2751 if ((bp->b_flags & B_ASYNC) == 0 ||
3b568787
MD
2752 nfs_asyncio(bp, td))
2753 error = nfs_doio(bp, td);
984263bc
MD
2754 return (error);
2755}
2756
2757/*
2758 * Mmap a file
2759 *
2760 * NB Currently unsupported.
2761 */
2762/* ARGSUSED */
2763static int
2764nfs_mmap(ap)
2765 struct vop_mmap_args /* {
2766 struct vnode *a_vp;
2767 int a_fflags;
2768 struct ucred *a_cred;
dadab5e9 2769 struct thread *a_td;
984263bc
MD
2770 } */ *ap;
2771{
2772
2773 return (EINVAL);
2774}
2775
2776/*
2777 * fsync vnode op. Just call nfs_flush() with commit == 1.
2778 */
2779/* ARGSUSED */
2780static int
2781nfs_fsync(ap)
2782 struct vop_fsync_args /* {
2783 struct vnodeop_desc *a_desc;
2784 struct vnode * a_vp;
2785 struct ucred * a_cred;
2786 int a_waitfor;
dadab5e9 2787 struct thread * a_td;
984263bc
MD
2788 } */ *ap;
2789{
2790
3b568787 2791 return (nfs_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1));
984263bc
MD
2792}
2793
2794/*
2795 * Flush all the blocks associated with a vnode.
2796 * Walk through the buffer pool and push any dirty pages
2797 * associated with the vnode.
2798 */
2799static int
3b568787 2800nfs_flush(vp, waitfor, td, commit)
40393ded 2801 struct vnode *vp;
984263bc 2802 int waitfor;
dadab5e9 2803 struct thread *td;
984263bc
MD
2804 int commit;
2805{
40393ded
RG
2806 struct nfsnode *np = VTONFS(vp);
2807 struct buf *bp;
2808 int i;
984263bc
MD
2809 struct buf *nbp;
2810 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2811 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2812 int passone = 1;
2813 u_quad_t off, endoff, toff;
984263bc
MD
2814 struct buf **bvec = NULL;
2815#ifndef NFS_COMMITBVECSIZ
2816#define NFS_COMMITBVECSIZ 20
2817#endif
2818 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2819 int bvecsize = 0, bveccount;
2820
2821 if (nmp->nm_flag & NFSMNT_INT)
2822 slpflag = PCATCH;
2823 if (!commit)
2824 passone = 0;
2825 /*
2826 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2827 * server, but nas not been committed to stable storage on the server
2828 * yet. On the first pass, the byte range is worked out and the commit
2829 * rpc is done. On the second pass, nfs_writebp() is called to do the
2830 * job.
2831 */
2832again:
2833 off = (u_quad_t)-1;
2834 endoff = 0;
2835 bvecpos = 0;
2836 if (NFS_ISV3(vp) && commit) {
2837 s = splbio();
2838 /*
2839 * Count up how many buffers waiting for a commit.
2840 */
2841 bveccount = 0;
2842 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2843 nbp = TAILQ_NEXT(bp, b_vnbufs);
2844 if (BUF_REFCNT(bp) == 0 &&
2845 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2846 == (B_DELWRI | B_NEEDCOMMIT))
2847 bveccount++;
2848 }
2849 /*
2850 * Allocate space to remember the list of bufs to commit. It is
2851 * important to use M_NOWAIT here to avoid a race with nfs_write.
2852 * If we can't get memory (for whatever reason), we will end up
2853 * committing the buffers one-by-one in the loop below.
2854 */
2855 if (bvec != NULL && bvec != bvec_on_stack)
2856 free(bvec, M_TEMP);
2857 if (bveccount > NFS_COMMITBVECSIZ) {
2858 bvec = (struct buf **)
2859 malloc(bveccount * sizeof(struct buf *),
2860 M_TEMP, M_NOWAIT);
2861 if (bvec == NULL) {
2862 bvec = bvec_on_stack;
2863 bvecsize = NFS_COMMITBVECSIZ;
2864 } else
2865 bvecsize = bveccount;
2866 } else {
2867 bvec = bvec_on_stack;
2868 bvecsize = NFS_COMMITBVECSIZ;
2869 }
2870 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2871 nbp = TAILQ_NEXT(bp, b_vnbufs);
2872 if (bvecpos >= bvecsize)
2873 break;
2874 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2875 (B_DELWRI | B_NEEDCOMMIT) ||
2876 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2877 continue;
2878 bremfree(bp);
2879 /*
984263bc
MD
2880 * NOTE: we are not clearing B_DONE here, so we have
2881 * to do it later on in this routine if we intend to
2882 * initiate I/O on the bp.
2883 *
2884 * Note: to avoid loopback deadlocks, we do not
2885 * assign b_runningbufspace.
2886 */
984263bc
MD
2887 bp->b_flags |= B_WRITEINPROG;
2888 vfs_busy_pages(bp, 1);
2889
2890 /*
2891 * bp is protected by being locked, but nbp is not
2892 * and vfs_busy_pages() may sleep. We have to
2893 * recalculate nbp.
2894 */
2895 nbp = TAILQ_NEXT(bp, b_vnbufs);
2896
2897 /*
2898 * A list of these buffers is kept so that the
2899 * second loop knows which buffers have actually
2900 * been committed. This is necessary, since there
2901 * may be a race between the commit rpc and new
2902 * uncommitted writes on the file.
2903 */
2904 bvec[bvecpos++] = bp;
2905 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2906 bp->b_dirtyoff;
2907 if (toff < off)
2908 off = toff;
2909 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2910 if (toff > endoff)
2911 endoff = toff;
2912 }
2913 splx(s);
2914 }
2915 if (bvecpos > 0) {
2916 /*
3b568787
MD
2917 * Commit data on the server, as required. Note that
2918 * nfs_commit will use the vnode's cred for the commit.
984263bc 2919 */
3b568787 2920 retv = nfs_commit(vp, off, (int)(endoff - off), td);
984263bc
MD
2921
2922 if (retv == NFSERR_STALEWRITEVERF)
2923 nfs_clearcommit(vp->v_mount);
2924
2925 /*
2926 * Now, either mark the blocks I/O done or mark the
2927 * blocks dirty, depending on whether the commit
2928 * succeeded.
2929 */
2930 for (i = 0; i < bvecpos; i++) {
2931 bp = bvec[i];
2932 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2933 if (retv) {
2934 /*
2935 * Error, leave B_DELWRI intact
2936 */
2937 vfs_unbusy_pages(bp);
2938 brelse(bp);
2939 } else {
2940 /*
2941 * Success, remove B_DELWRI ( bundirty() ).
2942 *
2943 * b_dirtyoff/b_dirtyend seem to be NFS
2944 * specific. We should probably move that
2945 * into bundirty(). XXX
2946 */
2947 s = splbio();
2948 vp->v_numoutput++;
2949 bp->b_flags |= B_ASYNC;
2950 bundirty(bp);
2951 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2952 bp->b_dirtyoff = bp->b_dirtyend = 0;
2953 splx(s);
2954 biodone(bp);
2955 }
2956 }
2957 }
2958
2959 /*
2960 * Start/do any write(s) that are required.
2961 */
2962loop:
2963 s = splbio();
2964 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2965 nbp = TAILQ_NEXT(bp, b_vnbufs);
2966 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2967 if (waitfor != MNT_WAIT || passone)
2968 continue;
2969 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2970 "nfsfsync", slpflag, slptimeo);
2971 splx(s);
2972 if (error == 0)
2973 panic("nfs_fsync: inconsistent lock");
2974 if (error == ENOLCK)
2975 goto loop;
dadab5e9 2976 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
984263bc
MD
2977 error = EINTR;
2978 goto done;
2979 }
2980 if (slpflag == PCATCH) {
2981 slpflag = 0;
2982 slptimeo = 2 * hz;
2983 }
2984 goto loop;
2985 }
2986 if ((bp->b_flags & B_DELWRI) == 0)
2987 panic("nfs_fsync: not dirty");
2988 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
2989 BUF_UNLOCK(bp);
2990 continue;
2991 }
2992 bremfree(bp);
2993 if (passone || !commit)
2994 bp->b_flags |= B_ASYNC;
2995 else
2996 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
2997 splx(s);
2998 VOP_BWRITE(bp->b_vp, bp);
2999 goto loop;
3000 }
3001 splx(s);
3002 if (passone) {
3003 passone = 0;
3004 goto again;
3005 }
3006 if (waitfor == MNT_WAIT) {
3007 while (vp->v_numoutput) {
3008 vp->v_flag |= VBWAIT;
3009 error = tsleep((caddr_t)&vp->v_numoutput,
377d4740 3010 slpflag, "nfsfsync", slptimeo);
984263bc 3011 if (error) {
dadab5e9 3012 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
984263bc
MD
3013 error = EINTR;
3014 goto done;
3015 }
3016 if (slpflag == PCATCH) {
3017 slpflag = 0;
3018 slptimeo = 2 * hz;
3019 }
3020 }
3021 }
3022 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
3023 goto loop;
3024 }
3025 }
3026 if (np->n_flag & NWRITEERR) {
3027 error = np->n_error;
3028 np->n_flag &= ~NWRITEERR;
3029 }
3030done:
3031 if (bvec != NULL && bvec != bvec_on_stack)
3032 free(bvec, M_TEMP);
3033 return (error);
3034}
3035
3036/*
3037 * NFS advisory byte-level locks.
3038 * Currently unsupported.
3039 */
3040static int
3041nfs_advlock(ap)
3042 struct vop_advlock_args /* {
3043 struct vnode *a_vp;
3044 caddr_t a_id;
3045 int a_op;
3046 struct flock *a_fl;
3047 int a_flags;
3048 } */ *ap;
3049{
40393ded 3050 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3051
3052 /*
3053 * The following kludge is to allow diskless support to work
3054 * until a real NFS lockd is implemented. Basically, just pretend
3055 * that this is a local lock.
3056 */
3057 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3058}
3059
3060/*
3061 * Print out the contents of an nfsnode.
3062 */
3063static int
3064nfs_print(ap)
3065 struct vop_print_args /* {
3066 struct vnode *a_vp;
3067 } */ *ap;
3068{
40393ded
RG
3069 struct vnode *vp = ap->a_vp;
3070 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3071
3072 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3073 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3074 if (vp->v_type == VFIFO)
3075 fifo_printinfo(vp);
3076 printf("\n");
3077 return (0);
3078}
3079
3080/*
3081 * Just call nfs_writebp() with the force argument set to 1.
3082 *
3083 * NOTE: B_DONE may or may not be set in a_bp on call.
3084 */
3085static int
3086nfs_bwrite(ap)
3087 struct vop_bwrite_args /* {
3088 struct vnode *a_bp;
3089 } */ *ap;
3090{
dadab5e9 3091 return (nfs_writebp(ap->a_bp, 1, curthread));
984263bc
MD
3092}
3093
3094/*
3095 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3096 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3097 * B_CACHE if this is a VMIO buffer.
3098 */
3099int
dadab5e9 3100nfs_writebp(bp, force, td)
40393ded 3101 struct buf *bp;
984263bc 3102 int force;
dadab5e9 3103 struct thread *td;
984263bc
MD
3104{
3105 int s;
3106 int oldflags = bp->b_flags;
3107#if 0
3108 int retv = 1;
3109 off_t off;
3110#endif
3111
3112 if (BUF_REFCNT(bp) == 0)
3113 panic("bwrite: buffer is not locked???");
3114
3115 if (bp->b_flags & B_INVAL) {
3116 brelse(bp);
3117 return(0);
3118 }
3119
3120 bp->b_flags |= B_CACHE;
3121
3122 /*
3123 * Undirty the bp. We will redirty it later if the I/O fails.
3124 */
3125
3126 s = splbio();
3127 bundirty(bp);
3128 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3129
3130 bp->b_vp->v_numoutput++;
984263bc
MD
3131 splx(s);
3132
3133 /*
3134 * Note: to avoid loopback deadlocks, we do not
3135 * assign b_runningbufspace.
3136 */
3137 vfs_busy_pages(bp, 1);
3138
3139 if (force)
3140 bp->b_flags |= B_WRITEINPROG;
3141 BUF_KERNPROC(bp);
3142 VOP_STRATEGY(bp->b_vp, bp);
3143
3144 if( (oldflags & B_ASYNC) == 0) {
3145 int rtval = biowait(bp);
3146
3147 if (oldflags & B_DELWRI) {
3148 s = splbio();
3149 reassignbuf(bp, bp->b_vp);
3150 splx(s);
3151 }
3152
3153 brelse(bp);
3154 return (rtval);
3155 }
3156
3157 return (0);
3158}
3159
3160/*
3161 * nfs special file access vnode op.
3162 * Essentially just get vattr and then imitate iaccess() since the device is
3163 * local to the client.
3164 */
3165static int
3166nfsspec_access(ap)
3167 struct vop_access_args /* {
3168 struct vnode *a_vp;
3169 int a_mode;
3170 struct ucred *a_cred;
dadab5e9 3171 struct thread *a_td;
984263bc
MD
3172 } */ *ap;
3173{
40393ded
RG
3174 struct vattr *vap;
3175 gid_t *gp;
3176 struct ucred *cred = ap->a_cred;
984263bc
MD
3177 struct vnode *vp = ap->a_vp;
3178 mode_t mode = ap->a_mode;
3179 struct vattr vattr;
40393ded 3180 int i;
984263bc
MD
3181 int error;
3182
3183 /*
3184 * Disallow write attempts on filesystems mounted read-only;
3185 * unless the file is a socket, fifo, or a block or character
3186 * device resident on the filesystem.
3187 */
3188 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3189 switch (vp->v_type) {
3190 case VREG:
3191 case VDIR:
3192 case VLNK:
3193 return (EROFS);
3194 default:
3195 break;
3196 }
3197 }
3198 /*
3199 * If you're the super-user,
3200 * you always get access.
3201 */
3202 if (cred->cr_uid == 0)
3203 return (0);
3204 vap = &vattr;
3b568787 3205 error = VOP_GETATTR(vp, vap, ap->a_td);
984263bc
MD
3206 if (error)
3207 return (error);
3208 /*
3209 * Access check is based on only one of owner, group, public.
3210 * If not owner, then check group. If not a member of the
3211 * group, then check public access.
3212 */
3213 if (cred->cr_uid != vap->va_uid) {
3214 mode >>= 3;
3215 gp = cred->cr_groups;
3216 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3217 if (vap->va_gid == *gp)
3218 goto found;
3219 mode >>= 3;
3220found:
3221 ;
3222 }
3223 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3224 return (error);
3225}
3226
3227/*
3228 * Read wrapper for special devices.
3229 */
3230static int
3231nfsspec_read(ap)
3232 struct vop_read_args /* {
3233 struct vnode *a_vp;
3234 struct uio *a_uio;
3235 int a_ioflag;
3236 struct ucred *a_cred;
3237 } */ *ap;
3238{
40393ded 3239 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3240
3241 /*
3242 * Set access flag.
3243 */
3244 np->n_flag |= NACC;
3245 getnanotime(&np->n_atim);
3246 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3247}
3248
3249/*
3250 * Write wrapper for special devices.
3251 */
3252static int
3253nfsspec_write(ap)
3254 struct vop_write_args /* {
3255 struct vnode *a_vp;
3256 struct uio *a_uio;
3257 int a_ioflag;
3258 struct ucred *a_cred;
3259 } */ *ap;
3260{
40393ded 3261 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3262
3263 /*
3264 * Set update flag.
3265 */
3266 np->n_flag |= NUPD;
3267 getnanotime(&np->n_mtim);
3268 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3269}
3270
3271/*
3272 * Close wrapper for special devices.
3273 *
3274 * Update the times on the nfsnode then do device close.
3275 */
3276static int
3277nfsspec_close(ap)
3278 struct vop_close_args /* {
3279 struct vnode *a_vp;
3280 int a_fflag;
3281 struct ucred *a_cred;
dadab5e9 3282 struct thread *a_td;
984263bc
MD
3283 } */ *ap;
3284{
40393ded
RG
3285 struct vnode *vp = ap->a_vp;
3286 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3287 struct vattr vattr;
3288
3289 if (np->n_flag & (NACC | NUPD)) {
3290 np->n_flag |= NCHG;
3291 if (vp->v_usecount == 1 &&
3292 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3293 VATTR_NULL(&vattr);
3294 if (np->n_flag & NACC)
3295 vattr.va_atime = np->n_atim;
3296 if (np->n_flag & NUPD)
3297 vattr.va_mtime = np->n_mtim;
c1cf1e59 3298 (void)VOP_SETATTR(vp, &vattr, nfs_vpcred(vp, ND_WRITE), ap->a_td);
984263bc
MD
3299 }
3300 }
3301 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3302}
3303
3304/*
3305 * Read wrapper for fifos.
3306 */
3307static int
3308nfsfifo_read(ap)
3309 struct vop_read_args /* {
3310 struct vnode *a_vp;
3311 struct uio *a_uio;
3312 int a_ioflag;
3313 struct ucred *a_cred;
3314 } */ *ap;
3315{
40393ded 3316 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3317
3318 /*
3319 * Set access flag.
3320 */
3321 np->n_flag |= NACC;
3322 getnanotime(&np->n_atim);
3323 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3324}
3325
3326/*
3327 * Write wrapper for fifos.
3328 */
3329static int
3330nfsfifo_write(ap)
3331 struct vop_write_args /* {
3332 struct vnode *a_vp;
3333 struct uio *a_uio;
3334 int a_ioflag;
3335 struct ucred *a_cred;
3336 } */ *ap;
3337{
40393ded 3338 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3339
3340 /*
3341 * Set update flag.
3342 */
3343 np->n_flag |= NUPD;
3344 getnanotime(&np->n_mtim);
3345 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3346}
3347
3348/*
3349 * Close wrapper for fifos.
3350 *
3351 * Update the times on the nfsnode then do fifo close.
3352 */
3353static int
3354nfsfifo_close(ap)
3355 struct vop_close_args /* {
3356 struct vnode *a_vp;
3357 int a_fflag;
dadab5e9 3358 struct thread *a_td;
984263bc
MD
3359 } */ *ap;
3360{
40393ded
RG
3361 struct vnode *vp = ap->a_vp;
3362 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3363 struct vattr vattr;
3364 struct timespec ts;
3365
3366 if (np->n_flag & (NACC | NUPD)) {
3367 getnanotime(&ts);
3368 if (np->n_flag & NACC)
3369 np->n_atim = ts;
3370 if (np->n_flag & NUPD)
3371 np->n_mtim = ts;
3372 np->n_flag |= NCHG;
3373 if (vp->v_usecount == 1 &&
3374 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3375 VATTR_NULL(&vattr);
3376 if (np->n_flag & NACC)
3377 vattr.va_atime = np->n_atim;
3378 if (np->n_flag & NUPD)
3379 vattr.va_mtime = np->n_mtim;
c1cf1e59 3380 (void)VOP_SETATTR(vp, &vattr, nfs_vpcred(vp, ND_WRITE), ap->a_td);
984263bc
MD
3381 }
3382 }
3383 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3384}
3385