Introduce cratom(), remove crcopy().
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
dadab5e9 38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.4 2003/06/25 03:56:07 dillon Exp $
984263bc
MD
39 */
40
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/buf.h>
55#include <sys/malloc.h>
56#include <sys/mbuf.h>
57#include <sys/namei.h>
58#include <sys/socket.h>
59#include <sys/vnode.h>
60#include <sys/dirent.h>
61#include <sys/fcntl.h>
62#include <sys/lockf.h>
63#include <sys/stat.h>
64#include <sys/sysctl.h>
65#include <sys/conf.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69#include <vm/vm_zone.h>
70
3020e3be
MD
71#include <sys/buf2.h>
72
984263bc
MD
73#include <miscfs/fifofs/fifo.h>
74
75#include <nfs/rpcv2.h>
76#include <nfs/nfsproto.h>
77#include <nfs/nfs.h>
78#include <nfs/nfsnode.h>
79#include <nfs/nfsmount.h>
80#include <nfs/xdr_subs.h>
81#include <nfs/nfsm_subs.h>
82#include <nfs/nqnfs.h>
83
84#include <net/if.h>
85#include <netinet/in.h>
86#include <netinet/in_var.h>
87
88/* Defs */
89#define TRUE 1
90#define FALSE 0
91
92/*
93 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
94 * calls are not in getblk() and brelse() so that they would not be necessary
95 * here.
96 */
97#ifndef B_VMIO
98#define vfs_busy_pages(bp, f)
99#endif
100
101static int nfsspec_read __P((struct vop_read_args *));
102static int nfsspec_write __P((struct vop_write_args *));
103static int nfsfifo_read __P((struct vop_read_args *));
104static int nfsfifo_write __P((struct vop_write_args *));
105static int nfsspec_close __P((struct vop_close_args *));
106static int nfsfifo_close __P((struct vop_close_args *));
107#define nfs_poll vop_nopoll
dadab5e9
MD
108static int nfs_flush __P((struct vnode *,struct ucred *,int,struct thread *,int));
109static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct thread *));
984263bc
MD
110static int nfs_lookup __P((struct vop_lookup_args *));
111static int nfs_create __P((struct vop_create_args *));
112static int nfs_mknod __P((struct vop_mknod_args *));
113static int nfs_open __P((struct vop_open_args *));
114static int nfs_close __P((struct vop_close_args *));
115static int nfs_access __P((struct vop_access_args *));
116static int nfs_getattr __P((struct vop_getattr_args *));
117static int nfs_setattr __P((struct vop_setattr_args *));
118static int nfs_read __P((struct vop_read_args *));
119static int nfs_mmap __P((struct vop_mmap_args *));
120static int nfs_fsync __P((struct vop_fsync_args *));
121static int nfs_remove __P((struct vop_remove_args *));
122static int nfs_link __P((struct vop_link_args *));
123static int nfs_rename __P((struct vop_rename_args *));
124static int nfs_mkdir __P((struct vop_mkdir_args *));
125static int nfs_rmdir __P((struct vop_rmdir_args *));
126static int nfs_symlink __P((struct vop_symlink_args *));
127static int nfs_readdir __P((struct vop_readdir_args *));
128static int nfs_bmap __P((struct vop_bmap_args *));
129static int nfs_strategy __P((struct vop_strategy_args *));
130static int nfs_lookitup __P((struct vnode *, const char *, int,
dadab5e9 131 struct ucred *, struct thread *, struct nfsnode **));
984263bc
MD
132static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
133static int nfsspec_access __P((struct vop_access_args *));
134static int nfs_readlink __P((struct vop_readlink_args *));
135static int nfs_print __P((struct vop_print_args *));
136static int nfs_advlock __P((struct vop_advlock_args *));
137static int nfs_bwrite __P((struct vop_bwrite_args *));
138/*
139 * Global vfs data structures for nfs
140 */
141vop_t **nfsv2_vnodeop_p;
142static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
143 { &vop_default_desc, (vop_t *) vop_defaultop },
144 { &vop_access_desc, (vop_t *) nfs_access },
145 { &vop_advlock_desc, (vop_t *) nfs_advlock },
146 { &vop_bmap_desc, (vop_t *) nfs_bmap },
147 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
148 { &vop_close_desc, (vop_t *) nfs_close },
149 { &vop_create_desc, (vop_t *) nfs_create },
150 { &vop_fsync_desc, (vop_t *) nfs_fsync },
151 { &vop_getattr_desc, (vop_t *) nfs_getattr },
152 { &vop_getpages_desc, (vop_t *) nfs_getpages },
153 { &vop_putpages_desc, (vop_t *) nfs_putpages },
154 { &vop_inactive_desc, (vop_t *) nfs_inactive },
155 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
156 { &vop_lease_desc, (vop_t *) vop_null },
157 { &vop_link_desc, (vop_t *) nfs_link },
158 { &vop_lock_desc, (vop_t *) vop_sharedlock },
159 { &vop_lookup_desc, (vop_t *) nfs_lookup },
160 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
161 { &vop_mknod_desc, (vop_t *) nfs_mknod },
162 { &vop_mmap_desc, (vop_t *) nfs_mmap },
163 { &vop_open_desc, (vop_t *) nfs_open },
164 { &vop_poll_desc, (vop_t *) nfs_poll },
165 { &vop_print_desc, (vop_t *) nfs_print },
166 { &vop_read_desc, (vop_t *) nfs_read },
167 { &vop_readdir_desc, (vop_t *) nfs_readdir },
168 { &vop_readlink_desc, (vop_t *) nfs_readlink },
169 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
170 { &vop_remove_desc, (vop_t *) nfs_remove },
171 { &vop_rename_desc, (vop_t *) nfs_rename },
172 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
173 { &vop_setattr_desc, (vop_t *) nfs_setattr },
174 { &vop_strategy_desc, (vop_t *) nfs_strategy },
175 { &vop_symlink_desc, (vop_t *) nfs_symlink },
176 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
177 { &vop_write_desc, (vop_t *) nfs_write },
178 { NULL, NULL }
179};
180static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
181 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
182VNODEOP_SET(nfsv2_vnodeop_opv_desc);
183
184/*
185 * Special device vnode ops
186 */
187vop_t **spec_nfsv2nodeop_p;
188static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
189 { &vop_default_desc, (vop_t *) spec_vnoperate },
190 { &vop_access_desc, (vop_t *) nfsspec_access },
191 { &vop_close_desc, (vop_t *) nfsspec_close },
192 { &vop_fsync_desc, (vop_t *) nfs_fsync },
193 { &vop_getattr_desc, (vop_t *) nfs_getattr },
194 { &vop_inactive_desc, (vop_t *) nfs_inactive },
195 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
196 { &vop_lock_desc, (vop_t *) vop_sharedlock },
197 { &vop_print_desc, (vop_t *) nfs_print },
198 { &vop_read_desc, (vop_t *) nfsspec_read },
199 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
200 { &vop_setattr_desc, (vop_t *) nfs_setattr },
201 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
202 { &vop_write_desc, (vop_t *) nfsspec_write },
203 { NULL, NULL }
204};
205static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
206 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
207VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
208
209vop_t **fifo_nfsv2nodeop_p;
210static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
211 { &vop_default_desc, (vop_t *) fifo_vnoperate },
212 { &vop_access_desc, (vop_t *) nfsspec_access },
213 { &vop_close_desc, (vop_t *) nfsfifo_close },
214 { &vop_fsync_desc, (vop_t *) nfs_fsync },
215 { &vop_getattr_desc, (vop_t *) nfs_getattr },
216 { &vop_inactive_desc, (vop_t *) nfs_inactive },
217 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
218 { &vop_lock_desc, (vop_t *) vop_sharedlock },
219 { &vop_print_desc, (vop_t *) nfs_print },
220 { &vop_read_desc, (vop_t *) nfsfifo_read },
221 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
222 { &vop_setattr_desc, (vop_t *) nfs_setattr },
223 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
224 { &vop_write_desc, (vop_t *) nfsfifo_write },
225 { NULL, NULL }
226};
227static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
228 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
229VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
230
231static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
232 struct componentname *cnp,
233 struct vattr *vap));
234static int nfs_removerpc __P((struct vnode *dvp, const char *name,
235 int namelen,
dadab5e9 236 struct ucred *cred, struct thread *td));
984263bc
MD
237static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
238 int fnamelen, struct vnode *tdvp,
239 const char *tnameptr, int tnamelen,
dadab5e9 240 struct ucred *cred, struct thread *td));
984263bc
MD
241static int nfs_renameit __P((struct vnode *sdvp,
242 struct componentname *scnp,
243 struct sillyrename *sp));
244
245/*
246 * Global variables
247 */
248extern u_int32_t nfs_true, nfs_false;
249extern u_int32_t nfs_xdrneg1;
250extern struct nfsstats nfsstats;
251extern nfstype nfsv3_type[9];
dadab5e9 252struct thread *nfs_iodwant[NFS_MAXASYNCDAEMON];
984263bc
MD
253struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
254int nfs_numasync = 0;
255#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
256
257SYSCTL_DECL(_vfs_nfs);
258
259static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
260SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
261 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
262
263static int nfsv3_commit_on_close = 0;
264SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
265 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
266#if 0
267SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
268 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
269
270SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
271 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
272#endif
273
274#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
275 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
276 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
277static int
dadab5e9
MD
278nfs3_access_otw(struct vnode *vp, int wmode,
279 struct thread *td, struct ucred *cred)
984263bc
MD
280{
281 const int v3 = 1;
282 u_int32_t *tl;
283 int error = 0, attrflag;
284
285 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
286 caddr_t bpos, dpos, cp2;
287 register int32_t t1, t2;
288 register caddr_t cp;
289 u_int32_t rmode;
290 struct nfsnode *np = VTONFS(vp);
291
292 nfsstats.rpccnt[NFSPROC_ACCESS]++;
293 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
294 nfsm_fhtom(vp, v3);
295 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
296 *tl = txdr_unsigned(wmode);
dadab5e9 297 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
984263bc
MD
298 nfsm_postop_attr(vp, attrflag);
299 if (!error) {
300 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
301 rmode = fxdr_unsigned(u_int32_t, *tl);
302 np->n_mode = rmode;
303 np->n_modeuid = cred->cr_uid;
304 np->n_modestamp = time_second;
305 }
306 nfsm_reqdone;
307 return error;
308}
309
310/*
311 * nfs access vnode op.
312 * For nfs version 2, just return ok. File accesses may fail later.
313 * For nfs version 3, use the access rpc to check accessibility. If file modes
314 * are changed on the server, accesses might still fail later.
315 */
316static int
317nfs_access(ap)
318 struct vop_access_args /* {
319 struct vnode *a_vp;
320 int a_mode;
321 struct ucred *a_cred;
dadab5e9 322 struct thread *a_td;
984263bc
MD
323 } */ *ap;
324{
325 register struct vnode *vp = ap->a_vp;
326 int error = 0;
327 u_int32_t mode, wmode;
328 int v3 = NFS_ISV3(vp);
329 struct nfsnode *np = VTONFS(vp);
330
331 /*
332 * Disallow write attempts on filesystems mounted read-only;
333 * unless the file is a socket, fifo, or a block or character
334 * device resident on the filesystem.
335 */
336 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
337 switch (vp->v_type) {
338 case VREG:
339 case VDIR:
340 case VLNK:
341 return (EROFS);
342 default:
343 break;
344 }
345 }
346 /*
347 * For nfs v3, check to see if we have done this recently, and if
348 * so return our cached result instead of making an ACCESS call.
349 * If not, do an access rpc, otherwise you are stuck emulating
350 * ufs_access() locally using the vattr. This may not be correct,
351 * since the server may apply other access criteria such as
352 * client uid-->server uid mapping that we do not know about.
353 */
354 if (v3) {
355 if (ap->a_mode & VREAD)
356 mode = NFSV3ACCESS_READ;
357 else
358 mode = 0;
359 if (vp->v_type != VDIR) {
360 if (ap->a_mode & VWRITE)
361 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
362 if (ap->a_mode & VEXEC)
363 mode |= NFSV3ACCESS_EXECUTE;
364 } else {
365 if (ap->a_mode & VWRITE)
366 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
367 NFSV3ACCESS_DELETE);
368 if (ap->a_mode & VEXEC)
369 mode |= NFSV3ACCESS_LOOKUP;
370 }
371 /* XXX safety belt, only make blanket request if caching */
372 if (nfsaccess_cache_timeout > 0) {
373 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
374 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
375 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
376 } else {
377 wmode = mode;
378 }
379
380 /*
381 * Does our cached result allow us to give a definite yes to
382 * this request?
383 */
384 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
385 (ap->a_cred->cr_uid == np->n_modeuid) &&
386 ((np->n_mode & mode) == mode)) {
387 nfsstats.accesscache_hits++;
388 } else {
389 /*
390 * Either a no, or a don't know. Go to the wire.
391 */
392 nfsstats.accesscache_misses++;
dadab5e9 393 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
984263bc
MD
394 if (!error) {
395 if ((np->n_mode & mode) != mode) {
396 error = EACCES;
397 }
398 }
399 }
400 return (error);
401 } else {
402 if ((error = nfsspec_access(ap)) != 0)
403 return (error);
404
405 /*
406 * Attempt to prevent a mapped root from accessing a file
407 * which it shouldn't. We try to read a byte from the file
408 * if the user is root and the file is not zero length.
409 * After calling nfsspec_access, we should have the correct
410 * file size cached.
411 */
412 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
413 && VTONFS(vp)->n_size > 0) {
414 struct iovec aiov;
415 struct uio auio;
416 char buf[1];
417
418 aiov.iov_base = buf;
419 aiov.iov_len = 1;
420 auio.uio_iov = &aiov;
421 auio.uio_iovcnt = 1;
422 auio.uio_offset = 0;
423 auio.uio_resid = 1;
424 auio.uio_segflg = UIO_SYSSPACE;
425 auio.uio_rw = UIO_READ;
dadab5e9 426 auio.uio_td = ap->a_td;
984263bc
MD
427
428 if (vp->v_type == VREG)
429 error = nfs_readrpc(vp, &auio, ap->a_cred);
430 else if (vp->v_type == VDIR) {
431 char* bp;
432 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
433 aiov.iov_base = bp;
434 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
435 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
436 free(bp, M_TEMP);
437 } else if (vp->v_type == VLNK)
438 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
439 else
440 error = EACCES;
441 }
442 return (error);
443 }
444}
445
446/*
447 * nfs open vnode op
448 * Check to see if the type is ok
449 * and that deletion is not in progress.
450 * For paged in text files, you will need to flush the page cache
451 * if consistency is lost.
452 */
453/* ARGSUSED */
454static int
455nfs_open(ap)
456 struct vop_open_args /* {
457 struct vnode *a_vp;
458 int a_mode;
459 struct ucred *a_cred;
dadab5e9 460 struct thread *a_td;
984263bc
MD
461 } */ *ap;
462{
463 register struct vnode *vp = ap->a_vp;
464 struct nfsnode *np = VTONFS(vp);
465 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
466 struct vattr vattr;
467 int error;
468
469 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
470#ifdef DIAGNOSTIC
471 printf("open eacces vtyp=%d\n",vp->v_type);
472#endif
473 return (EACCES);
474 }
475 /*
476 * Get a valid lease. If cached data is stale, flush it.
477 */
478 if (nmp->nm_flag & NFSMNT_NQNFS) {
479 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
480 do {
481 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
dadab5e9 482 ap->a_td);
984263bc
MD
483 } while (error == NQNFS_EXPIRED);
484 if (error)
485 return (error);
486 if (np->n_lrev != np->n_brev ||
487 (np->n_flag & NQNFSNONCACHE)) {
488 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
dadab5e9 489 ap->a_td, 1)) == EINTR)
984263bc
MD
490 return (error);
491 np->n_brev = np->n_lrev;
492 }
493 }
494 } else {
495 if (np->n_flag & NMODIFIED) {
496 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
dadab5e9 497 ap->a_td, 1)) == EINTR)
984263bc
MD
498 return (error);
499 np->n_attrstamp = 0;
500 if (vp->v_type == VDIR)
501 np->n_direofoffset = 0;
dadab5e9 502 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
984263bc
MD
503 if (error)
504 return (error);
505 np->n_mtime = vattr.va_mtime.tv_sec;
506 } else {
dadab5e9 507 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
984263bc
MD
508 if (error)
509 return (error);
510 if (np->n_mtime != vattr.va_mtime.tv_sec) {
511 if (vp->v_type == VDIR)
512 np->n_direofoffset = 0;
513 if ((error = nfs_vinvalbuf(vp, V_SAVE,
dadab5e9 514 ap->a_cred, ap->a_td, 1)) == EINTR)
984263bc
MD
515 return (error);
516 np->n_mtime = vattr.va_mtime.tv_sec;
517 }
518 }
519 }
520 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
521 np->n_attrstamp = 0; /* For Open/Close consistency */
522 return (0);
523}
524
525/*
526 * nfs close vnode op
527 * What an NFS client should do upon close after writing is a debatable issue.
528 * Most NFS clients push delayed writes to the server upon close, basically for
529 * two reasons:
530 * 1 - So that any write errors may be reported back to the client process
531 * doing the close system call. By far the two most likely errors are
532 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
533 * 2 - To put a worst case upper bound on cache inconsistency between
534 * multiple clients for the file.
535 * There is also a consistency problem for Version 2 of the protocol w.r.t.
536 * not being able to tell if other clients are writing a file concurrently,
537 * since there is no way of knowing if the changed modify time in the reply
538 * is only due to the write for this client.
539 * (NFS Version 3 provides weak cache consistency data in the reply that
540 * should be sufficient to detect and handle this case.)
541 *
542 * The current code does the following:
543 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
544 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
545 * or commit them (this satisfies 1 and 2 except for the
546 * case where the server crashes after this close but
547 * before the commit RPC, which is felt to be "good
548 * enough". Changing the last argument to nfs_flush() to
549 * a 1 would force a commit operation, if it is felt a
550 * commit is necessary now.
551 * for NQNFS - do nothing now, since 2 is dealt with via leases and
552 * 1 should be dealt with via an fsync() system call for
553 * cases where write errors are important.
554 */
555/* ARGSUSED */
556static int
557nfs_close(ap)
558 struct vop_close_args /* {
559 struct vnodeop_desc *a_desc;
560 struct vnode *a_vp;
561 int a_fflag;
562 struct ucred *a_cred;
dadab5e9 563 struct thread *a_td;
984263bc
MD
564 } */ *ap;
565{
566 register struct vnode *vp = ap->a_vp;
567 register struct nfsnode *np = VTONFS(vp);
568 int error = 0;
569
570 if (vp->v_type == VREG) {
571 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
572 (np->n_flag & NMODIFIED)) {
573 if (NFS_ISV3(vp)) {
574 /*
575 * Under NFSv3 we have dirty buffers to dispose of. We
576 * must flush them to the NFS server. We have the option
577 * of waiting all the way through the commit rpc or just
578 * waiting for the initial write. The default is to only
579 * wait through the initial write so the data is in the
580 * server's cache, which is roughly similar to the state
581 * a standard disk subsystem leaves the file in on close().
582 *
583 * We cannot clear the NMODIFIED bit in np->n_flag due to
584 * potential races with other processes, and certainly
585 * cannot clear it if we don't commit.
586 */
587 int cm = nfsv3_commit_on_close ? 1 : 0;
dadab5e9 588 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_td, cm);
984263bc
MD
589 /* np->n_flag &= ~NMODIFIED; */
590 } else {
dadab5e9 591 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_td, 1);
984263bc
MD
592 }
593 np->n_attrstamp = 0;
594 }
595 if (np->n_flag & NWRITEERR) {
596 np->n_flag &= ~NWRITEERR;
597 error = np->n_error;
598 }
599 }
600 return (error);
601}
602
603/*
604 * nfs getattr call from vfs.
605 */
606static int
607nfs_getattr(ap)
608 struct vop_getattr_args /* {
609 struct vnode *a_vp;
610 struct vattr *a_vap;
611 struct ucred *a_cred;
dadab5e9 612 struct thread *a_td;
984263bc
MD
613 } */ *ap;
614{
615 register struct vnode *vp = ap->a_vp;
616 register struct nfsnode *np = VTONFS(vp);
617 register caddr_t cp;
618 register u_int32_t *tl;
619 register int32_t t1, t2;
620 caddr_t bpos, dpos;
621 int error = 0;
622 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
623 int v3 = NFS_ISV3(vp);
624
625 /*
626 * Update local times for special files.
627 */
628 if (np->n_flag & (NACC | NUPD))
629 np->n_flag |= NCHG;
630 /*
631 * First look in the cache.
632 */
633 if (nfs_getattrcache(vp, ap->a_vap) == 0)
634 return (0);
635
636 if (v3 && nfsaccess_cache_timeout > 0) {
637 nfsstats.accesscache_misses++;
dadab5e9 638 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, ap->a_cred);
984263bc
MD
639 if (nfs_getattrcache(vp, ap->a_vap) == 0)
640 return (0);
641 }
642
643 nfsstats.rpccnt[NFSPROC_GETATTR]++;
644 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
645 nfsm_fhtom(vp, v3);
dadab5e9 646 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, ap->a_cred);
984263bc
MD
647 if (!error) {
648 nfsm_loadattr(vp, ap->a_vap);
649 }
650 nfsm_reqdone;
651 return (error);
652}
653
654/*
655 * nfs setattr call.
656 */
657static int
658nfs_setattr(ap)
659 struct vop_setattr_args /* {
660 struct vnodeop_desc *a_desc;
661 struct vnode *a_vp;
662 struct vattr *a_vap;
663 struct ucred *a_cred;
dadab5e9 664 struct thread *a_td;
984263bc
MD
665 } */ *ap;
666{
667 register struct vnode *vp = ap->a_vp;
668 register struct nfsnode *np = VTONFS(vp);
669 register struct vattr *vap = ap->a_vap;
670 int error = 0;
671 u_quad_t tsize;
672
673#ifndef nolint
674 tsize = (u_quad_t)0;
675#endif
676
677 /*
678 * Setting of flags is not supported.
679 */
680 if (vap->va_flags != VNOVAL)
681 return (EOPNOTSUPP);
682
683 /*
684 * Disallow write attempts if the filesystem is mounted read-only.
685 */
686 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
687 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
688 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
689 (vp->v_mount->mnt_flag & MNT_RDONLY))
690 return (EROFS);
691 if (vap->va_size != VNOVAL) {
692 switch (vp->v_type) {
693 case VDIR:
694 return (EISDIR);
695 case VCHR:
696 case VBLK:
697 case VSOCK:
698 case VFIFO:
699 if (vap->va_mtime.tv_sec == VNOVAL &&
700 vap->va_atime.tv_sec == VNOVAL &&
701 vap->va_mode == (mode_t)VNOVAL &&
702 vap->va_uid == (uid_t)VNOVAL &&
703 vap->va_gid == (gid_t)VNOVAL)
704 return (0);
705 vap->va_size = VNOVAL;
706 break;
707 default:
708 /*
709 * Disallow write attempts if the filesystem is
710 * mounted read-only.
711 */
712 if (vp->v_mount->mnt_flag & MNT_RDONLY)
713 return (EROFS);
714
715 /*
716 * We run vnode_pager_setsize() early (why?),
717 * we must set np->n_size now to avoid vinvalbuf
718 * V_SAVE races that might setsize a lower
719 * value.
720 */
721
722 tsize = np->n_size;
723 error = nfs_meta_setsize(vp, ap->a_cred,
dadab5e9 724 ap->a_td, vap->va_size);
984263bc
MD
725
726 if (np->n_flag & NMODIFIED) {
727 if (vap->va_size == 0)
728 error = nfs_vinvalbuf(vp, 0,
dadab5e9 729 ap->a_cred, ap->a_td, 1);
984263bc
MD
730 else
731 error = nfs_vinvalbuf(vp, V_SAVE,
dadab5e9 732 ap->a_cred, ap->a_td, 1);
984263bc
MD
733 if (error) {
734 np->n_size = tsize;
735 vnode_pager_setsize(vp, np->n_size);
736 return (error);
737 }
738 }
739 np->n_vattr.va_size = vap->va_size;
740 };
741 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
742 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
743 vp->v_type == VREG &&
744 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
dadab5e9 745 ap->a_td, 1)) == EINTR)
984263bc 746 return (error);
dadab5e9 747 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
984263bc
MD
748 if (error && vap->va_size != VNOVAL) {
749 np->n_size = np->n_vattr.va_size = tsize;
750 vnode_pager_setsize(vp, np->n_size);
751 }
752 return (error);
753}
754
755/*
756 * Do an nfs setattr rpc.
757 */
758static int
dadab5e9
MD
759nfs_setattrrpc(struct vnode *vp, struct vattr *vap,
760 struct ucred *cred, struct thread *td)
984263bc
MD
761{
762 register struct nfsv2_sattr *sp;
763 register caddr_t cp;
764 register int32_t t1, t2;
765 caddr_t bpos, dpos, cp2;
766 u_int32_t *tl;
767 int error = 0, wccflag = NFSV3_WCCRATTR;
768 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
769 int v3 = NFS_ISV3(vp);
770
771 nfsstats.rpccnt[NFSPROC_SETATTR]++;
772 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
773 nfsm_fhtom(vp, v3);
774 if (v3) {
775 nfsm_v3attrbuild(vap, TRUE);
776 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
777 *tl = nfs_false;
778 } else {
779 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
780 if (vap->va_mode == (mode_t)VNOVAL)
781 sp->sa_mode = nfs_xdrneg1;
782 else
783 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
784 if (vap->va_uid == (uid_t)VNOVAL)
785 sp->sa_uid = nfs_xdrneg1;
786 else
787 sp->sa_uid = txdr_unsigned(vap->va_uid);
788 if (vap->va_gid == (gid_t)VNOVAL)
789 sp->sa_gid = nfs_xdrneg1;
790 else
791 sp->sa_gid = txdr_unsigned(vap->va_gid);
792 sp->sa_size = txdr_unsigned(vap->va_size);
793 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
794 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
795 }
dadab5e9 796 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
984263bc
MD
797 if (v3) {
798 nfsm_wcc_data(vp, wccflag);
799 } else
800 nfsm_loadattr(vp, (struct vattr *)0);
801 nfsm_reqdone;
802 return (error);
803}
804
805/*
806 * nfs lookup call, one step at a time...
807 * First look in cache
808 * If not found, unlock the directory nfsnode and do the rpc
809 */
810static int
811nfs_lookup(ap)
812 struct vop_lookup_args /* {
813 struct vnodeop_desc *a_desc;
814 struct vnode *a_dvp;
815 struct vnode **a_vpp;
816 struct componentname *a_cnp;
817 } */ *ap;
818{
819 struct componentname *cnp = ap->a_cnp;
820 struct vnode *dvp = ap->a_dvp;
821 struct vnode **vpp = ap->a_vpp;
822 int flags = cnp->cn_flags;
823 struct vnode *newvp;
824 u_int32_t *tl;
825 caddr_t cp;
826 int32_t t1, t2;
827 struct nfsmount *nmp;
828 caddr_t bpos, dpos, cp2;
829 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
830 long len;
831 nfsfh_t *fhp;
832 struct nfsnode *np;
833 int lockparent, wantparent, error = 0, attrflag, fhsize;
834 int v3 = NFS_ISV3(dvp);
dadab5e9 835 struct thread *td = cnp->cn_td;
984263bc
MD
836
837 *vpp = NULLVP;
838 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
839 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
840 return (EROFS);
841 if (dvp->v_type != VDIR)
842 return (ENOTDIR);
843 lockparent = flags & LOCKPARENT;
844 wantparent = flags & (LOCKPARENT|WANTPARENT);
845 nmp = VFSTONFS(dvp->v_mount);
846 np = VTONFS(dvp);
847 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
848 struct vattr vattr;
849 int vpid;
850
dadab5e9 851 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
984263bc
MD
852 *vpp = NULLVP;
853 return (error);
854 }
855
856 newvp = *vpp;
857 vpid = newvp->v_id;
858 /*
859 * See the comment starting `Step through' in ufs/ufs_lookup.c
860 * for an explanation of the locking protocol
861 */
862 if (dvp == newvp) {
863 VREF(newvp);
864 error = 0;
865 } else if (flags & ISDOTDOT) {
dadab5e9
MD
866 VOP_UNLOCK(dvp, 0, td);
867 error = vget(newvp, LK_EXCLUSIVE, td);
984263bc 868 if (!error && lockparent && (flags & ISLASTCN))
dadab5e9 869 error = vn_lock(dvp, LK_EXCLUSIVE, td);
984263bc 870 } else {
dadab5e9 871 error = vget(newvp, LK_EXCLUSIVE, td);
984263bc 872 if (!lockparent || error || !(flags & ISLASTCN))
dadab5e9 873 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
874 }
875 if (!error) {
876 if (vpid == newvp->v_id) {
dadab5e9 877 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, td)
984263bc
MD
878 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
879 nfsstats.lookupcache_hits++;
880 if (cnp->cn_nameiop != LOOKUP &&
881 (flags & ISLASTCN))
882 cnp->cn_flags |= SAVENAME;
883 return (0);
884 }
885 cache_purge(newvp);
886 }
887 vput(newvp);
888 if (lockparent && dvp != newvp && (flags & ISLASTCN))
dadab5e9 889 VOP_UNLOCK(dvp, 0, td);
984263bc 890 }
dadab5e9 891 error = vn_lock(dvp, LK_EXCLUSIVE, td);
984263bc
MD
892 *vpp = NULLVP;
893 if (error)
894 return (error);
895 }
896 error = 0;
897 newvp = NULLVP;
898 nfsstats.lookupcache_misses++;
899 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
900 len = cnp->cn_namelen;
901 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
902 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
903 nfsm_fhtom(dvp, v3);
904 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
dadab5e9 905 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_td, cnp->cn_cred);
984263bc
MD
906 if (error) {
907 nfsm_postop_attr(dvp, attrflag);
908 m_freem(mrep);
909 goto nfsmout;
910 }
911 nfsm_getfh(fhp, fhsize, v3);
912
913 /*
914 * Handle RENAME case...
915 */
916 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
917 if (NFS_CMPFH(np, fhp, fhsize)) {
918 m_freem(mrep);
919 return (EISDIR);
920 }
921 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
922 if (error) {
923 m_freem(mrep);
924 return (error);
925 }
926 newvp = NFSTOV(np);
927 if (v3) {
928 nfsm_postop_attr(newvp, attrflag);
929 nfsm_postop_attr(dvp, attrflag);
930 } else
931 nfsm_loadattr(newvp, (struct vattr *)0);
932 *vpp = newvp;
933 m_freem(mrep);
934 cnp->cn_flags |= SAVENAME;
935 if (!lockparent)
dadab5e9 936 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
937 return (0);
938 }
939
940 if (flags & ISDOTDOT) {
dadab5e9 941 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
942 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
943 if (error) {
dadab5e9 944 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
945 return (error);
946 }
947 newvp = NFSTOV(np);
948 if (lockparent && (flags & ISLASTCN) &&
dadab5e9 949 (error = vn_lock(dvp, LK_EXCLUSIVE, td))) {
984263bc
MD
950 vput(newvp);
951 return (error);
952 }
953 } else if (NFS_CMPFH(np, fhp, fhsize)) {
954 VREF(dvp);
955 newvp = dvp;
956 } else {
957 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
958 if (error) {
959 m_freem(mrep);
960 return (error);
961 }
962 if (!lockparent || !(flags & ISLASTCN))
dadab5e9 963 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
964 newvp = NFSTOV(np);
965 }
966 if (v3) {
967 nfsm_postop_attr(newvp, attrflag);
968 nfsm_postop_attr(dvp, attrflag);
969 } else
970 nfsm_loadattr(newvp, (struct vattr *)0);
971 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
972 cnp->cn_flags |= SAVENAME;
973 if ((cnp->cn_flags & MAKEENTRY) &&
974 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
975 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
976 cache_enter(dvp, newvp, cnp);
977 }
978 *vpp = newvp;
979 nfsm_reqdone;
980 if (error) {
981 if (newvp != NULLVP) {
982 vrele(newvp);
983 *vpp = NULLVP;
984 }
985 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
986 (flags & ISLASTCN) && error == ENOENT) {
987 if (!lockparent)
dadab5e9 988 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
989 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
990 error = EROFS;
991 else
992 error = EJUSTRETURN;
993 }
994 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
995 cnp->cn_flags |= SAVENAME;
996 }
997 return (error);
998}
999
1000/*
1001 * nfs read call.
1002 * Just call nfs_bioread() to do the work.
1003 */
1004static int
1005nfs_read(ap)
1006 struct vop_read_args /* {
1007 struct vnode *a_vp;
1008 struct uio *a_uio;
1009 int a_ioflag;
1010 struct ucred *a_cred;
1011 } */ *ap;
1012{
1013 register struct vnode *vp = ap->a_vp;
1014
1015 if (vp->v_type != VREG)
1016 return (EPERM);
1017 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1018}
1019
1020/*
1021 * nfs readlink call
1022 */
1023static int
1024nfs_readlink(ap)
1025 struct vop_readlink_args /* {
1026 struct vnode *a_vp;
1027 struct uio *a_uio;
1028 struct ucred *a_cred;
1029 } */ *ap;
1030{
1031 register struct vnode *vp = ap->a_vp;
1032
1033 if (vp->v_type != VLNK)
1034 return (EINVAL);
1035 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1036}
1037
1038/*
1039 * Do a readlink rpc.
1040 * Called by nfs_doio() from below the buffer cache.
1041 */
1042int
1043nfs_readlinkrpc(vp, uiop, cred)
1044 register struct vnode *vp;
1045 struct uio *uiop;
1046 struct ucred *cred;
1047{
1048 register u_int32_t *tl;
1049 register caddr_t cp;
1050 register int32_t t1, t2;
1051 caddr_t bpos, dpos, cp2;
1052 int error = 0, len, attrflag;
1053 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1054 int v3 = NFS_ISV3(vp);
1055
1056 nfsstats.rpccnt[NFSPROC_READLINK]++;
1057 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1058 nfsm_fhtom(vp, v3);
dadab5e9 1059 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
984263bc
MD
1060 if (v3)
1061 nfsm_postop_attr(vp, attrflag);
1062 if (!error) {
1063 nfsm_strsiz(len, NFS_MAXPATHLEN);
1064 if (len == NFS_MAXPATHLEN) {
1065 struct nfsnode *np = VTONFS(vp);
1066 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1067 len = np->n_size;
1068 }
1069 nfsm_mtouio(uiop, len);
1070 }
1071 nfsm_reqdone;
1072 return (error);
1073}
1074
1075/*
1076 * nfs read rpc call
1077 * Ditto above
1078 */
1079int
1080nfs_readrpc(vp, uiop, cred)
1081 register struct vnode *vp;
1082 struct uio *uiop;
1083 struct ucred *cred;
1084{
1085 register u_int32_t *tl;
1086 register caddr_t cp;
1087 register int32_t t1, t2;
1088 caddr_t bpos, dpos, cp2;
1089 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1090 struct nfsmount *nmp;
1091 int error = 0, len, retlen, tsiz, eof, attrflag;
1092 int v3 = NFS_ISV3(vp);
1093
1094#ifndef nolint
1095 eof = 0;
1096#endif
1097 nmp = VFSTONFS(vp->v_mount);
1098 tsiz = uiop->uio_resid;
1099 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1100 return (EFBIG);
1101 while (tsiz > 0) {
1102 nfsstats.rpccnt[NFSPROC_READ]++;
1103 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1104 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1105 nfsm_fhtom(vp, v3);
1106 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1107 if (v3) {
1108 txdr_hyper(uiop->uio_offset, tl);
1109 *(tl + 2) = txdr_unsigned(len);
1110 } else {
1111 *tl++ = txdr_unsigned(uiop->uio_offset);
1112 *tl++ = txdr_unsigned(len);
1113 *tl = 0;
1114 }
dadab5e9 1115 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
984263bc
MD
1116 if (v3) {
1117 nfsm_postop_attr(vp, attrflag);
1118 if (error) {
1119 m_freem(mrep);
1120 goto nfsmout;
1121 }
1122 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1123 eof = fxdr_unsigned(int, *(tl + 1));
1124 } else
1125 nfsm_loadattr(vp, (struct vattr *)0);
1126 nfsm_strsiz(retlen, nmp->nm_rsize);
1127 nfsm_mtouio(uiop, retlen);
1128 m_freem(mrep);
1129 tsiz -= retlen;
1130 if (v3) {
1131 if (eof || retlen == 0) {
1132 tsiz = 0;
1133 }
1134 } else if (retlen < len) {
1135 tsiz = 0;
1136 }
1137 }
1138nfsmout:
1139 return (error);
1140}
1141
1142/*
1143 * nfs write call
1144 */
1145int
1146nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1147 register struct vnode *vp;
1148 register struct uio *uiop;
1149 struct ucred *cred;
1150 int *iomode, *must_commit;
1151{
1152 register u_int32_t *tl;
1153 register caddr_t cp;
1154 register int32_t t1, t2, backup;
1155 caddr_t bpos, dpos, cp2;
1156 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1157 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1158 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1159 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1160
1161#ifndef DIAGNOSTIC
1162 if (uiop->uio_iovcnt != 1)
1163 panic("nfs: writerpc iovcnt > 1");
1164#endif
1165 *must_commit = 0;
1166 tsiz = uiop->uio_resid;
1167 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1168 return (EFBIG);
1169 while (tsiz > 0) {
1170 nfsstats.rpccnt[NFSPROC_WRITE]++;
1171 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1172 nfsm_reqhead(vp, NFSPROC_WRITE,
1173 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1174 nfsm_fhtom(vp, v3);
1175 if (v3) {
1176 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1177 txdr_hyper(uiop->uio_offset, tl);
1178 tl += 2;
1179 *tl++ = txdr_unsigned(len);
1180 *tl++ = txdr_unsigned(*iomode);
1181 *tl = txdr_unsigned(len);
1182 } else {
1183 register u_int32_t x;
1184
1185 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1186 /* Set both "begin" and "current" to non-garbage. */
1187 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1188 *tl++ = x; /* "begin offset" */
1189 *tl++ = x; /* "current offset" */
1190 x = txdr_unsigned(len);
1191 *tl++ = x; /* total to this offset */
1192 *tl = x; /* size of this write */
1193 }
1194 nfsm_uiotom(uiop, len);
dadab5e9 1195 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
984263bc
MD
1196 if (v3) {
1197 wccflag = NFSV3_WCCCHK;
1198 nfsm_wcc_data(vp, wccflag);
1199 if (!error) {
1200 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1201 + NFSX_V3WRITEVERF);
1202 rlen = fxdr_unsigned(int, *tl++);
1203 if (rlen == 0) {
1204 error = NFSERR_IO;
1205 m_freem(mrep);
1206 break;
1207 } else if (rlen < len) {
1208 backup = len - rlen;
1209 uiop->uio_iov->iov_base -= backup;
1210 uiop->uio_iov->iov_len += backup;
1211 uiop->uio_offset -= backup;
1212 uiop->uio_resid += backup;
1213 len = rlen;
1214 }
1215 commit = fxdr_unsigned(int, *tl++);
1216
1217 /*
1218 * Return the lowest committment level
1219 * obtained by any of the RPCs.
1220 */
1221 if (committed == NFSV3WRITE_FILESYNC)
1222 committed = commit;
1223 else if (committed == NFSV3WRITE_DATASYNC &&
1224 commit == NFSV3WRITE_UNSTABLE)
1225 committed = commit;
1226 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1227 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1228 NFSX_V3WRITEVERF);
1229 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1230 } else if (bcmp((caddr_t)tl,
1231 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1232 *must_commit = 1;
1233 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1234 NFSX_V3WRITEVERF);
1235 }
1236 }
1237 } else
1238 nfsm_loadattr(vp, (struct vattr *)0);
1239 if (wccflag)
1240 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1241 m_freem(mrep);
1242 if (error)
1243 break;
1244 tsiz -= len;
1245 }
1246nfsmout:
1247 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1248 committed = NFSV3WRITE_FILESYNC;
1249 *iomode = committed;
1250 if (error)
1251 uiop->uio_resid = tsiz;
1252 return (error);
1253}
1254
1255/*
1256 * nfs mknod rpc
1257 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1258 * mode set to specify the file type and the size field for rdev.
1259 */
1260static int
1261nfs_mknodrpc(dvp, vpp, cnp, vap)
1262 register struct vnode *dvp;
1263 register struct vnode **vpp;
1264 register struct componentname *cnp;
1265 register struct vattr *vap;
1266{
1267 register struct nfsv2_sattr *sp;
1268 register u_int32_t *tl;
1269 register caddr_t cp;
1270 register int32_t t1, t2;
1271 struct vnode *newvp = (struct vnode *)0;
1272 struct nfsnode *np = (struct nfsnode *)0;
1273 struct vattr vattr;
1274 char *cp2;
1275 caddr_t bpos, dpos;
1276 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1277 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1278 u_int32_t rdev;
1279 int v3 = NFS_ISV3(dvp);
1280
1281 if (vap->va_type == VCHR || vap->va_type == VBLK)
1282 rdev = txdr_unsigned(vap->va_rdev);
1283 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1284 rdev = nfs_xdrneg1;
1285 else {
1286 return (EOPNOTSUPP);
1287 }
dadab5e9 1288 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_td)) != 0) {
984263bc
MD
1289 return (error);
1290 }
1291 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1292 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1293 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1294 nfsm_fhtom(dvp, v3);
1295 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1296 if (v3) {
1297 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1298 *tl++ = vtonfsv3_type(vap->va_type);
1299 nfsm_v3attrbuild(vap, FALSE);
1300 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1301 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1302 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1303 *tl = txdr_unsigned(uminor(vap->va_rdev));
1304 }
1305 } else {
1306 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1307 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1308 sp->sa_uid = nfs_xdrneg1;
1309 sp->sa_gid = nfs_xdrneg1;
1310 sp->sa_size = rdev;
1311 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1312 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1313 }
dadab5e9 1314 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1315 if (!error) {
1316 nfsm_mtofh(dvp, newvp, v3, gotvp);
1317 if (!gotvp) {
1318 if (newvp) {
1319 vput(newvp);
1320 newvp = (struct vnode *)0;
1321 }
1322 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1323 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1324 if (!error)
1325 newvp = NFSTOV(np);
1326 }
1327 }
1328 if (v3)
1329 nfsm_wcc_data(dvp, wccflag);
1330 nfsm_reqdone;
1331 if (error) {
1332 if (newvp)
1333 vput(newvp);
1334 } else {
1335 if (cnp->cn_flags & MAKEENTRY)
1336 cache_enter(dvp, newvp, cnp);
1337 *vpp = newvp;
1338 }
1339 VTONFS(dvp)->n_flag |= NMODIFIED;
1340 if (!wccflag)
1341 VTONFS(dvp)->n_attrstamp = 0;
1342 return (error);
1343}
1344
1345/*
1346 * nfs mknod vop
1347 * just call nfs_mknodrpc() to do the work.
1348 */
1349/* ARGSUSED */
1350static int
1351nfs_mknod(ap)
1352 struct vop_mknod_args /* {
1353 struct vnode *a_dvp;
1354 struct vnode **a_vpp;
1355 struct componentname *a_cnp;
1356 struct vattr *a_vap;
1357 } */ *ap;
1358{
1359 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1360}
1361
1362static u_long create_verf;
1363/*
1364 * nfs file create call
1365 */
1366static int
1367nfs_create(ap)
1368 struct vop_create_args /* {
1369 struct vnode *a_dvp;
1370 struct vnode **a_vpp;
1371 struct componentname *a_cnp;
1372 struct vattr *a_vap;
1373 } */ *ap;
1374{
1375 register struct vnode *dvp = ap->a_dvp;
1376 register struct vattr *vap = ap->a_vap;
1377 register struct componentname *cnp = ap->a_cnp;
1378 register struct nfsv2_sattr *sp;
1379 register u_int32_t *tl;
1380 register caddr_t cp;
1381 register int32_t t1, t2;
1382 struct nfsnode *np = (struct nfsnode *)0;
1383 struct vnode *newvp = (struct vnode *)0;
1384 caddr_t bpos, dpos, cp2;
1385 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1386 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1387 struct vattr vattr;
1388 int v3 = NFS_ISV3(dvp);
1389
1390 /*
1391 * Oops, not for me..
1392 */
1393 if (vap->va_type == VSOCK)
1394 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1395
dadab5e9 1396 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_td)) != 0) {
984263bc
MD
1397 return (error);
1398 }
1399 if (vap->va_vaflags & VA_EXCLUSIVE)
1400 fmode |= O_EXCL;
1401again:
1402 nfsstats.rpccnt[NFSPROC_CREATE]++;
1403 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1404 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1405 nfsm_fhtom(dvp, v3);
1406 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1407 if (v3) {
1408 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1409 if (fmode & O_EXCL) {
1410 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1411 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1412#ifdef INET
1413 if (!TAILQ_EMPTY(&in_ifaddrhead))
1414 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1415 else
1416#endif
1417 *tl++ = create_verf;
1418 *tl = ++create_verf;
1419 } else {
1420 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1421 nfsm_v3attrbuild(vap, FALSE);
1422 }
1423 } else {
1424 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1425 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1426 sp->sa_uid = nfs_xdrneg1;
1427 sp->sa_gid = nfs_xdrneg1;
1428 sp->sa_size = 0;
1429 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1430 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1431 }
dadab5e9 1432 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1433 if (!error) {
1434 nfsm_mtofh(dvp, newvp, v3, gotvp);
1435 if (!gotvp) {
1436 if (newvp) {
1437 vput(newvp);
1438 newvp = (struct vnode *)0;
1439 }
1440 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1441 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1442 if (!error)
1443 newvp = NFSTOV(np);
1444 }
1445 }
1446 if (v3)
1447 nfsm_wcc_data(dvp, wccflag);
1448 nfsm_reqdone;
1449 if (error) {
1450 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1451 fmode &= ~O_EXCL;
1452 goto again;
1453 }
1454 if (newvp)
1455 vput(newvp);
1456 } else if (v3 && (fmode & O_EXCL)) {
1457 /*
1458 * We are normally called with only a partially initialized
1459 * VAP. Since the NFSv3 spec says that server may use the
1460 * file attributes to store the verifier, the spec requires
1461 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1462 * in atime, but we can't really assume that all servers will
1463 * so we ensure that our SETATTR sets both atime and mtime.
1464 */
1465 if (vap->va_mtime.tv_sec == VNOVAL)
1466 vfs_timestamp(&vap->va_mtime);
1467 if (vap->va_atime.tv_sec == VNOVAL)
1468 vap->va_atime = vap->va_mtime;
dadab5e9 1469 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1470 }
1471 if (!error) {
1472 if (cnp->cn_flags & MAKEENTRY)
1473 cache_enter(dvp, newvp, cnp);
1474 *ap->a_vpp = newvp;
1475 }
1476 VTONFS(dvp)->n_flag |= NMODIFIED;
1477 if (!wccflag)
1478 VTONFS(dvp)->n_attrstamp = 0;
1479 return (error);
1480}
1481
1482/*
1483 * nfs file remove call
1484 * To try and make nfs semantics closer to ufs semantics, a file that has
1485 * other processes using the vnode is renamed instead of removed and then
1486 * removed later on the last close.
1487 * - If v_usecount > 1
1488 * If a rename is not already in the works
1489 * call nfs_sillyrename() to set it up
1490 * else
1491 * do the remove rpc
1492 */
1493static int
1494nfs_remove(ap)
1495 struct vop_remove_args /* {
1496 struct vnodeop_desc *a_desc;
1497 struct vnode * a_dvp;
1498 struct vnode * a_vp;
1499 struct componentname * a_cnp;
1500 } */ *ap;
1501{
1502 register struct vnode *vp = ap->a_vp;
1503 register struct vnode *dvp = ap->a_dvp;
1504 register struct componentname *cnp = ap->a_cnp;
1505 register struct nfsnode *np = VTONFS(vp);
1506 int error = 0;
1507 struct vattr vattr;
1508
1509#ifndef DIAGNOSTIC
1510 if ((cnp->cn_flags & HASBUF) == 0)
1511 panic("nfs_remove: no name");
1512 if (vp->v_usecount < 1)
1513 panic("nfs_remove: bad v_usecount");
1514#endif
1515 if (vp->v_type == VDIR)
1516 error = EPERM;
1517 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
dadab5e9 1518 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_td) == 0 &&
984263bc
MD
1519 vattr.va_nlink > 1)) {
1520 /*
1521 * Purge the name cache so that the chance of a lookup for
1522 * the name succeeding while the remove is in progress is
1523 * minimized. Without node locking it can still happen, such
1524 * that an I/O op returns ESTALE, but since you get this if
1525 * another host removes the file..
1526 */
1527 cache_purge(vp);
1528 /*
1529 * throw away biocache buffers, mainly to avoid
1530 * unnecessary delayed writes later.
1531 */
dadab5e9 1532 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_td, 1);
984263bc
MD
1533 /* Do the rpc */
1534 if (error != EINTR)
1535 error = nfs_removerpc(dvp, cnp->cn_nameptr,
dadab5e9 1536 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1537 /*
1538 * Kludge City: If the first reply to the remove rpc is lost..
1539 * the reply to the retransmitted request will be ENOENT
1540 * since the file was in fact removed
1541 * Therefore, we cheat and return success.
1542 */
1543 if (error == ENOENT)
1544 error = 0;
1545 } else if (!np->n_sillyrename)
1546 error = nfs_sillyrename(dvp, vp, cnp);
1547 np->n_attrstamp = 0;
1548 return (error);
1549}
1550
1551/*
1552 * nfs file remove rpc called from nfs_inactive
1553 */
1554int
dadab5e9 1555nfs_removeit(struct sillyrename *sp)
984263bc
MD
1556{
1557
dadab5e9
MD
1558 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen,
1559 sp->s_cred, NULL));
984263bc
MD
1560}
1561
1562/*
1563 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1564 */
1565static int
dadab5e9
MD
1566nfs_removerpc(dvp, name, namelen, cred, td)
1567 struct vnode *dvp;
984263bc
MD
1568 const char *name;
1569 int namelen;
1570 struct ucred *cred;
dadab5e9 1571 struct thread *td;
984263bc
MD
1572{
1573 register u_int32_t *tl;
1574 register caddr_t cp;
1575 register int32_t t1, t2;
1576 caddr_t bpos, dpos, cp2;
1577 int error = 0, wccflag = NFSV3_WCCRATTR;
1578 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1579 int v3 = NFS_ISV3(dvp);
1580
1581 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1582 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1583 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1584 nfsm_fhtom(dvp, v3);
1585 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
dadab5e9 1586 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
984263bc
MD
1587 if (v3)
1588 nfsm_wcc_data(dvp, wccflag);
1589 nfsm_reqdone;
1590 VTONFS(dvp)->n_flag |= NMODIFIED;
1591 if (!wccflag)
1592 VTONFS(dvp)->n_attrstamp = 0;
1593 return (error);
1594}
1595
1596/*
1597 * nfs file rename call
1598 */
1599static int
1600nfs_rename(ap)
1601 struct vop_rename_args /* {
1602 struct vnode *a_fdvp;
1603 struct vnode *a_fvp;
1604 struct componentname *a_fcnp;
1605 struct vnode *a_tdvp;
1606 struct vnode *a_tvp;
1607 struct componentname *a_tcnp;
1608 } */ *ap;
1609{
1610 register struct vnode *fvp = ap->a_fvp;
1611 register struct vnode *tvp = ap->a_tvp;
1612 register struct vnode *fdvp = ap->a_fdvp;
1613 register struct vnode *tdvp = ap->a_tdvp;
1614 register struct componentname *tcnp = ap->a_tcnp;
1615 register struct componentname *fcnp = ap->a_fcnp;
1616 int error;
1617
1618#ifndef DIAGNOSTIC
1619 if ((tcnp->cn_flags & HASBUF) == 0 ||
1620 (fcnp->cn_flags & HASBUF) == 0)
1621 panic("nfs_rename: no name");
1622#endif
1623 /* Check for cross-device rename */
1624 if ((fvp->v_mount != tdvp->v_mount) ||
1625 (tvp && (fvp->v_mount != tvp->v_mount))) {
1626 error = EXDEV;
1627 goto out;
1628 }
1629
1630 /*
1631 * We have to flush B_DELWRI data prior to renaming
1632 * the file. If we don't, the delayed-write buffers
1633 * can be flushed out later after the file has gone stale
1634 * under NFSV3. NFSV2 does not have this problem because
1635 * ( as far as I can tell ) it flushes dirty buffers more
1636 * often.
1637 */
1638
dadab5e9 1639 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_td);
984263bc 1640 if (tvp)
dadab5e9 1641 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_td);
984263bc
MD
1642
1643 /*
1644 * If the tvp exists and is in use, sillyrename it before doing the
1645 * rename of the new file over it.
1646 * XXX Can't sillyrename a directory.
1647 */
1648 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1649 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1650 vput(tvp);
1651 tvp = NULL;
1652 }
1653
1654 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1655 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
dadab5e9 1656 tcnp->cn_td);
984263bc
MD
1657
1658 if (fvp->v_type == VDIR) {
1659 if (tvp != NULL && tvp->v_type == VDIR)
1660 cache_purge(tdvp);
1661 cache_purge(fdvp);
1662 }
1663
1664out:
1665 if (tdvp == tvp)
1666 vrele(tdvp);
1667 else
1668 vput(tdvp);
1669 if (tvp)
1670 vput(tvp);
1671 vrele(fdvp);
1672 vrele(fvp);
1673 /*
1674 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1675 */
1676 if (error == ENOENT)
1677 error = 0;
1678 return (error);
1679}
1680
1681/*
1682 * nfs file rename rpc called from nfs_remove() above
1683 */
1684static int
1685nfs_renameit(sdvp, scnp, sp)
1686 struct vnode *sdvp;
1687 struct componentname *scnp;
1688 register struct sillyrename *sp;
1689{
1690 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
dadab5e9 1691 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_td));
984263bc
MD
1692}
1693
1694/*
1695 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1696 */
1697static int
dadab5e9
MD
1698nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, td)
1699 struct vnode *fdvp;
984263bc
MD
1700 const char *fnameptr;
1701 int fnamelen;
1702 register struct vnode *tdvp;
1703 const char *tnameptr;
1704 int tnamelen;
1705 struct ucred *cred;
dadab5e9 1706 struct thread *td;
984263bc
MD
1707{
1708 register u_int32_t *tl;
1709 register caddr_t cp;
1710 register int32_t t1, t2;
1711 caddr_t bpos, dpos, cp2;
1712 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1713 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1714 int v3 = NFS_ISV3(fdvp);
1715
1716 nfsstats.rpccnt[NFSPROC_RENAME]++;
1717 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1718 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1719 nfsm_rndup(tnamelen));
1720 nfsm_fhtom(fdvp, v3);
1721 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1722 nfsm_fhtom(tdvp, v3);
1723 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
dadab5e9 1724 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
984263bc
MD
1725 if (v3) {
1726 nfsm_wcc_data(fdvp, fwccflag);
1727 nfsm_wcc_data(tdvp, twccflag);
1728 }
1729 nfsm_reqdone;
1730 VTONFS(fdvp)->n_flag |= NMODIFIED;
1731 VTONFS(tdvp)->n_flag |= NMODIFIED;
1732 if (!fwccflag)
1733 VTONFS(fdvp)->n_attrstamp = 0;
1734 if (!twccflag)
1735 VTONFS(tdvp)->n_attrstamp = 0;
1736 return (error);
1737}
1738
1739/*
1740 * nfs hard link create call
1741 */
1742static int
1743nfs_link(ap)
1744 struct vop_link_args /* {
1745 struct vnode *a_tdvp;
1746 struct vnode *a_vp;
1747 struct componentname *a_cnp;
1748 } */ *ap;
1749{
1750 register struct vnode *vp = ap->a_vp;
1751 register struct vnode *tdvp = ap->a_tdvp;
1752 register struct componentname *cnp = ap->a_cnp;
1753 register u_int32_t *tl;
1754 register caddr_t cp;
1755 register int32_t t1, t2;
1756 caddr_t bpos, dpos, cp2;
1757 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1758 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1759 int v3;
1760
1761 if (vp->v_mount != tdvp->v_mount) {
1762 return (EXDEV);
1763 }
1764
1765 /*
1766 * Push all writes to the server, so that the attribute cache
1767 * doesn't get "out of sync" with the server.
1768 * XXX There should be a better way!
1769 */
dadab5e9 1770 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_td);
984263bc
MD
1771
1772 v3 = NFS_ISV3(vp);
1773 nfsstats.rpccnt[NFSPROC_LINK]++;
1774 nfsm_reqhead(vp, NFSPROC_LINK,
1775 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1776 nfsm_fhtom(vp, v3);
1777 nfsm_fhtom(tdvp, v3);
1778 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 1779 nfsm_request(vp, NFSPROC_LINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1780 if (v3) {
1781 nfsm_postop_attr(vp, attrflag);
1782 nfsm_wcc_data(tdvp, wccflag);
1783 }
1784 nfsm_reqdone;
1785 VTONFS(tdvp)->n_flag |= NMODIFIED;
1786 if (!attrflag)
1787 VTONFS(vp)->n_attrstamp = 0;
1788 if (!wccflag)
1789 VTONFS(tdvp)->n_attrstamp = 0;
1790 /*
1791 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1792 */
1793 if (error == EEXIST)
1794 error = 0;
1795 return (error);
1796}
1797
1798/*
1799 * nfs symbolic link create call
1800 */
1801static int
1802nfs_symlink(ap)
1803 struct vop_symlink_args /* {
1804 struct vnode *a_dvp;
1805 struct vnode **a_vpp;
1806 struct componentname *a_cnp;
1807 struct vattr *a_vap;
1808 char *a_target;
1809 } */ *ap;
1810{
1811 register struct vnode *dvp = ap->a_dvp;
1812 register struct vattr *vap = ap->a_vap;
1813 register struct componentname *cnp = ap->a_cnp;
1814 register struct nfsv2_sattr *sp;
1815 register u_int32_t *tl;
1816 register caddr_t cp;
1817 register int32_t t1, t2;
1818 caddr_t bpos, dpos, cp2;
1819 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1820 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1821 struct vnode *newvp = (struct vnode *)0;
1822 int v3 = NFS_ISV3(dvp);
1823
1824 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1825 slen = strlen(ap->a_target);
1826 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1827 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1828 nfsm_fhtom(dvp, v3);
1829 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1830 if (v3) {
1831 nfsm_v3attrbuild(vap, FALSE);
1832 }
1833 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1834 if (!v3) {
1835 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1836 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1837 sp->sa_uid = nfs_xdrneg1;
1838 sp->sa_gid = nfs_xdrneg1;
1839 sp->sa_size = nfs_xdrneg1;
1840 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1841 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1842 }
1843
1844 /*
1845 * Issue the NFS request and get the rpc response.
1846 *
1847 * Only NFSv3 responses returning an error of 0 actually return
1848 * a file handle that can be converted into newvp without having
1849 * to do an extra lookup rpc.
1850 */
dadab5e9 1851 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1852 if (v3) {
1853 if (error == 0)
1854 nfsm_mtofh(dvp, newvp, v3, gotvp);
1855 nfsm_wcc_data(dvp, wccflag);
1856 }
1857
1858 /*
1859 * out code jumps -> here, mrep is also freed.
1860 */
1861
1862 nfsm_reqdone;
1863
1864 /*
1865 * If we get an EEXIST error, silently convert it to no-error
1866 * in case of an NFS retry.
1867 */
1868 if (error == EEXIST)
1869 error = 0;
1870
1871 /*
1872 * If we do not have (or no longer have) an error, and we could
1873 * not extract the newvp from the response due to the request being
1874 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1875 * to obtain a newvp to return.
1876 */
1877 if (error == 0 && newvp == NULL) {
1878 struct nfsnode *np = NULL;
1879
1880 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
dadab5e9 1881 cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1882 if (!error)
1883 newvp = NFSTOV(np);
1884 }
1885 if (error) {
1886 if (newvp)
1887 vput(newvp);
1888 } else {
1889 *ap->a_vpp = newvp;
1890 }
1891 VTONFS(dvp)->n_flag |= NMODIFIED;
1892 if (!wccflag)
1893 VTONFS(dvp)->n_attrstamp = 0;
1894 return (error);
1895}
1896
1897/*
1898 * nfs make dir call
1899 */
1900static int
1901nfs_mkdir(ap)
1902 struct vop_mkdir_args /* {
1903 struct vnode *a_dvp;
1904 struct vnode **a_vpp;
1905 struct componentname *a_cnp;
1906 struct vattr *a_vap;
1907 } */ *ap;
1908{
1909 register struct vnode *dvp = ap->a_dvp;
1910 register struct vattr *vap = ap->a_vap;
1911 register struct componentname *cnp = ap->a_cnp;
1912 register struct nfsv2_sattr *sp;
1913 register u_int32_t *tl;
1914 register caddr_t cp;
1915 register int32_t t1, t2;
1916 register int len;
1917 struct nfsnode *np = (struct nfsnode *)0;
1918 struct vnode *newvp = (struct vnode *)0;
1919 caddr_t bpos, dpos, cp2;
1920 int error = 0, wccflag = NFSV3_WCCRATTR;
1921 int gotvp = 0;
1922 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1923 struct vattr vattr;
1924 int v3 = NFS_ISV3(dvp);
1925
dadab5e9 1926 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_td)) != 0) {
984263bc
MD
1927 return (error);
1928 }
1929 len = cnp->cn_namelen;
1930 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1931 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1932 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1933 nfsm_fhtom(dvp, v3);
1934 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1935 if (v3) {
1936 nfsm_v3attrbuild(vap, FALSE);
1937 } else {
1938 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1939 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1940 sp->sa_uid = nfs_xdrneg1;
1941 sp->sa_gid = nfs_xdrneg1;
1942 sp->sa_size = nfs_xdrneg1;
1943 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1944 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1945 }
dadab5e9 1946 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1947 if (!error)
1948 nfsm_mtofh(dvp, newvp, v3, gotvp);
1949 if (v3)
1950 nfsm_wcc_data(dvp, wccflag);
1951 nfsm_reqdone;
1952 VTONFS(dvp)->n_flag |= NMODIFIED;
1953 if (!wccflag)
1954 VTONFS(dvp)->n_attrstamp = 0;
1955 /*
1956 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1957 * if we can succeed in looking up the directory.
1958 */
1959 if (error == EEXIST || (!error && !gotvp)) {
1960 if (newvp) {
1961 vrele(newvp);
1962 newvp = (struct vnode *)0;
1963 }
1964 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
dadab5e9 1965 cnp->cn_td, &np);
984263bc
MD
1966 if (!error) {
1967 newvp = NFSTOV(np);
1968 if (newvp->v_type != VDIR)
1969 error = EEXIST;
1970 }
1971 }
1972 if (error) {
1973 if (newvp)
1974 vrele(newvp);
1975 } else
1976 *ap->a_vpp = newvp;
1977 return (error);
1978}
1979
1980/*
1981 * nfs remove directory call
1982 */
1983static int
1984nfs_rmdir(ap)
1985 struct vop_rmdir_args /* {
1986 struct vnode *a_dvp;
1987 struct vnode *a_vp;
1988 struct componentname *a_cnp;
1989 } */ *ap;
1990{
1991 register struct vnode *vp = ap->a_vp;
1992 register struct vnode *dvp = ap->a_dvp;
1993 register struct componentname *cnp = ap->a_cnp;
1994 register u_int32_t *tl;
1995 register caddr_t cp;
1996 register int32_t t1, t2;
1997 caddr_t bpos, dpos, cp2;
1998 int error = 0, wccflag = NFSV3_WCCRATTR;
1999 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2000 int v3 = NFS_ISV3(dvp);
2001
2002 if (dvp == vp)
2003 return (EINVAL);
2004 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2005 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2006 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2007 nfsm_fhtom(dvp, v3);
2008 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 2009 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
2010 if (v3)
2011 nfsm_wcc_data(dvp, wccflag);
2012 nfsm_reqdone;
2013 VTONFS(dvp)->n_flag |= NMODIFIED;
2014 if (!wccflag)
2015 VTONFS(dvp)->n_attrstamp = 0;
2016 cache_purge(dvp);
2017 cache_purge(vp);
2018 /*
2019 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2020 */
2021 if (error == ENOENT)
2022 error = 0;
2023 return (error);
2024}
2025
2026/*
2027 * nfs readdir call
2028 */
2029static int
2030nfs_readdir(ap)
2031 struct vop_readdir_args /* {
2032 struct vnode *a_vp;
2033 struct uio *a_uio;
2034 struct ucred *a_cred;
2035 } */ *ap;
2036{
2037 register struct vnode *vp = ap->a_vp;
2038 register struct nfsnode *np = VTONFS(vp);
2039 register struct uio *uio = ap->a_uio;
2040 int tresid, error;
2041 struct vattr vattr;
2042
2043 if (vp->v_type != VDIR)
2044 return (EPERM);
2045 /*
2046 * First, check for hit on the EOF offset cache
2047 */
2048 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2049 (np->n_flag & NMODIFIED) == 0) {
2050 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
2051 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
2052 nfsstats.direofcache_hits++;
2053 return (0);
2054 }
dadab5e9 2055 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_td) == 0 &&
984263bc
MD
2056 np->n_mtime == vattr.va_mtime.tv_sec) {
2057 nfsstats.direofcache_hits++;
2058 return (0);
2059 }
2060 }
2061
2062 /*
2063 * Call nfs_bioread() to do the real work.
2064 */
2065 tresid = uio->uio_resid;
2066 error = nfs_bioread(vp, uio, 0, ap->a_cred);
2067
2068 if (!error && uio->uio_resid == tresid)
2069 nfsstats.direofcache_misses++;
2070 return (error);
2071}
2072
2073/*
2074 * Readdir rpc call.
2075 * Called from below the buffer cache by nfs_doio().
2076 */
2077int
2078nfs_readdirrpc(vp, uiop, cred)
2079 struct vnode *vp;
2080 register struct uio *uiop;
2081 struct ucred *cred;
2082
2083{
2084 register int len, left;
2085 register struct dirent *dp = NULL;
2086 register u_int32_t *tl;
2087 register caddr_t cp;
2088 register int32_t t1, t2;
2089 register nfsuint64 *cookiep;
2090 caddr_t bpos, dpos, cp2;
2091 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2092 nfsuint64 cookie;
2093 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2094 struct nfsnode *dnp = VTONFS(vp);
2095 u_quad_t fileno;
2096 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2097 int attrflag;
2098 int v3 = NFS_ISV3(vp);
2099
2100#ifndef DIAGNOSTIC
2101 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2102 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2103 panic("nfs readdirrpc bad uio");
2104#endif
2105
2106 /*
2107 * If there is no cookie, assume directory was stale.
2108 */
2109 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2110 if (cookiep)
2111 cookie = *cookiep;
2112 else
2113 return (NFSERR_BAD_COOKIE);
2114 /*
2115 * Loop around doing readdir rpc's of size nm_readdirsize
2116 * truncated to a multiple of DIRBLKSIZ.
2117 * The stopping criteria is EOF or buffer full.
2118 */
2119 while (more_dirs && bigenough) {
2120 nfsstats.rpccnt[NFSPROC_READDIR]++;
2121 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2122 NFSX_READDIR(v3));
2123 nfsm_fhtom(vp, v3);
2124 if (v3) {
2125 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2126 *tl++ = cookie.nfsuquad[0];
2127 *tl++ = cookie.nfsuquad[1];
2128 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2129 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2130 } else {
2131 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2132 *tl++ = cookie.nfsuquad[0];
2133 }
2134 *tl = txdr_unsigned(nmp->nm_readdirsize);
dadab5e9 2135 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
984263bc
MD
2136 if (v3) {
2137 nfsm_postop_attr(vp, attrflag);
2138 if (!error) {
2139 nfsm_dissect(tl, u_int32_t *,
2140 2 * NFSX_UNSIGNED);
2141 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2142 dnp->n_cookieverf.nfsuquad[1] = *tl;
2143 } else {
2144 m_freem(mrep);
2145 goto nfsmout;
2146 }
2147 }
2148 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2149 more_dirs = fxdr_unsigned(int, *tl);
2150
2151 /* loop thru the dir entries, doctoring them to 4bsd form */
2152 while (more_dirs && bigenough) {
2153 if (v3) {
2154 nfsm_dissect(tl, u_int32_t *,
2155 3 * NFSX_UNSIGNED);
2156 fileno = fxdr_hyper(tl);
2157 len = fxdr_unsigned(int, *(tl + 2));
2158 } else {
2159 nfsm_dissect(tl, u_int32_t *,
2160 2 * NFSX_UNSIGNED);
2161 fileno = fxdr_unsigned(u_quad_t, *tl++);
2162 len = fxdr_unsigned(int, *tl);
2163 }
2164 if (len <= 0 || len > NFS_MAXNAMLEN) {
2165 error = EBADRPC;
2166 m_freem(mrep);
2167 goto nfsmout;
2168 }
2169 tlen = nfsm_rndup(len);
2170 if (tlen == len)
2171 tlen += 4; /* To ensure null termination */
2172 left = DIRBLKSIZ - blksiz;
2173 if ((tlen + DIRHDSIZ) > left) {
2174 dp->d_reclen += left;
2175 uiop->uio_iov->iov_base += left;
2176 uiop->uio_iov->iov_len -= left;
2177 uiop->uio_offset += left;
2178 uiop->uio_resid -= left;
2179 blksiz = 0;
2180 }
2181 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2182 bigenough = 0;
2183 if (bigenough) {
2184 dp = (struct dirent *)uiop->uio_iov->iov_base;
2185 dp->d_fileno = (int)fileno;
2186 dp->d_namlen = len;
2187 dp->d_reclen = tlen + DIRHDSIZ;
2188 dp->d_type = DT_UNKNOWN;
2189 blksiz += dp->d_reclen;
2190 if (blksiz == DIRBLKSIZ)
2191 blksiz = 0;
2192 uiop->uio_offset += DIRHDSIZ;
2193 uiop->uio_resid -= DIRHDSIZ;
2194 uiop->uio_iov->iov_base += DIRHDSIZ;
2195 uiop->uio_iov->iov_len -= DIRHDSIZ;
2196 nfsm_mtouio(uiop, len);
2197 cp = uiop->uio_iov->iov_base;
2198 tlen -= len;
2199 *cp = '\0'; /* null terminate */
2200 uiop->uio_iov->iov_base += tlen;
2201 uiop->uio_iov->iov_len -= tlen;
2202 uiop->uio_offset += tlen;
2203 uiop->uio_resid -= tlen;
2204 } else
2205 nfsm_adv(nfsm_rndup(len));
2206 if (v3) {
2207 nfsm_dissect(tl, u_int32_t *,
2208 3 * NFSX_UNSIGNED);
2209 } else {
2210 nfsm_dissect(tl, u_int32_t *,
2211 2 * NFSX_UNSIGNED);
2212 }
2213 if (bigenough) {
2214 cookie.nfsuquad[0] = *tl++;
2215 if (v3)
2216 cookie.nfsuquad[1] = *tl++;
2217 } else if (v3)
2218 tl += 2;
2219 else
2220 tl++;
2221 more_dirs = fxdr_unsigned(int, *tl);
2222 }
2223 /*
2224 * If at end of rpc data, get the eof boolean
2225 */
2226 if (!more_dirs) {
2227 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2228 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2229 }
2230 m_freem(mrep);
2231 }
2232 /*
2233 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2234 * by increasing d_reclen for the last record.
2235 */
2236 if (blksiz > 0) {
2237 left = DIRBLKSIZ - blksiz;
2238 dp->d_reclen += left;
2239 uiop->uio_iov->iov_base += left;
2240 uiop->uio_iov->iov_len -= left;
2241 uiop->uio_offset += left;
2242 uiop->uio_resid -= left;
2243 }
2244
2245 /*
2246 * We are now either at the end of the directory or have filled the
2247 * block.
2248 */
2249 if (bigenough)
2250 dnp->n_direofoffset = uiop->uio_offset;
2251 else {
2252 if (uiop->uio_resid > 0)
2253 printf("EEK! readdirrpc resid > 0\n");
2254 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2255 *cookiep = cookie;
2256 }
2257nfsmout:
2258 return (error);
2259}
2260
2261/*
2262 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2263 */
2264int
2265nfs_readdirplusrpc(vp, uiop, cred)
2266 struct vnode *vp;
2267 register struct uio *uiop;
2268 struct ucred *cred;
2269{
2270 register int len, left;
2271 register struct dirent *dp;
2272 register u_int32_t *tl;
2273 register caddr_t cp;
2274 register int32_t t1, t2;
2275 register struct vnode *newvp;
2276 register nfsuint64 *cookiep;
2277 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2278 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2279 struct nameidata nami, *ndp = &nami;
2280 struct componentname *cnp = &ndp->ni_cnd;
2281 nfsuint64 cookie;
2282 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2283 struct nfsnode *dnp = VTONFS(vp), *np;
2284 nfsfh_t *fhp;
2285 u_quad_t fileno;
2286 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2287 int attrflag, fhsize;
2288
2289#ifndef nolint
2290 dp = (struct dirent *)0;
2291#endif
2292#ifndef DIAGNOSTIC
2293 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2294 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2295 panic("nfs readdirplusrpc bad uio");
2296#endif
2297 ndp->ni_dvp = vp;
2298 newvp = NULLVP;
2299
2300 /*
2301 * If there is no cookie, assume directory was stale.
2302 */
2303 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2304 if (cookiep)
2305 cookie = *cookiep;
2306 else
2307 return (NFSERR_BAD_COOKIE);
2308 /*
2309 * Loop around doing readdir rpc's of size nm_readdirsize
2310 * truncated to a multiple of DIRBLKSIZ.
2311 * The stopping criteria is EOF or buffer full.
2312 */
2313 while (more_dirs && bigenough) {
2314 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2315 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2316 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2317 nfsm_fhtom(vp, 1);
2318 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2319 *tl++ = cookie.nfsuquad[0];
2320 *tl++ = cookie.nfsuquad[1];
2321 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2322 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2323 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2324 *tl = txdr_unsigned(nmp->nm_rsize);
dadab5e9 2325 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
984263bc
MD
2326 nfsm_postop_attr(vp, attrflag);
2327 if (error) {
2328 m_freem(mrep);
2329 goto nfsmout;
2330 }
2331 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2332 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2333 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2334 more_dirs = fxdr_unsigned(int, *tl);
2335
2336 /* loop thru the dir entries, doctoring them to 4bsd form */
2337 while (more_dirs && bigenough) {
2338 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2339 fileno = fxdr_hyper(tl);
2340 len = fxdr_unsigned(int, *(tl + 2));
2341 if (len <= 0 || len > NFS_MAXNAMLEN) {
2342 error = EBADRPC;
2343 m_freem(mrep);
2344 goto nfsmout;
2345 }
2346 tlen = nfsm_rndup(len);
2347 if (tlen == len)
2348 tlen += 4; /* To ensure null termination*/
2349 left = DIRBLKSIZ - blksiz;
2350 if ((tlen + DIRHDSIZ) > left) {
2351 dp->d_reclen += left;
2352 uiop->uio_iov->iov_base += left;
2353 uiop->uio_iov->iov_len -= left;
2354 uiop->uio_offset += left;
2355 uiop->uio_resid -= left;
2356 blksiz = 0;
2357 }
2358 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2359 bigenough = 0;
2360 if (bigenough) {
2361 dp = (struct dirent *)uiop->uio_iov->iov_base;
2362 dp->d_fileno = (int)fileno;
2363 dp->d_namlen = len;
2364 dp->d_reclen = tlen + DIRHDSIZ;
2365 dp->d_type = DT_UNKNOWN;
2366 blksiz += dp->d_reclen;
2367 if (blksiz == DIRBLKSIZ)
2368 blksiz = 0;
2369 uiop->uio_offset += DIRHDSIZ;
2370 uiop->uio_resid -= DIRHDSIZ;
2371 uiop->uio_iov->iov_base += DIRHDSIZ;
2372 uiop->uio_iov->iov_len -= DIRHDSIZ;
2373 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2374 cnp->cn_namelen = len;
2375 nfsm_mtouio(uiop, len);
2376 cp = uiop->uio_iov->iov_base;
2377 tlen -= len;
2378 *cp = '\0';
2379 uiop->uio_iov->iov_base += tlen;
2380 uiop->uio_iov->iov_len -= tlen;
2381 uiop->uio_offset += tlen;
2382 uiop->uio_resid -= tlen;
2383 } else
2384 nfsm_adv(nfsm_rndup(len));
2385 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2386 if (bigenough) {
2387 cookie.nfsuquad[0] = *tl++;
2388 cookie.nfsuquad[1] = *tl++;
2389 } else
2390 tl += 2;
2391
2392 /*
2393 * Since the attributes are before the file handle
2394 * (sigh), we must skip over the attributes and then
2395 * come back and get them.
2396 */
2397 attrflag = fxdr_unsigned(int, *tl);
2398 if (attrflag) {
2399 dpossav1 = dpos;
2400 mdsav1 = md;
2401 nfsm_adv(NFSX_V3FATTR);
2402 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2403 doit = fxdr_unsigned(int, *tl);
2404 if (doit) {
2405 nfsm_getfh(fhp, fhsize, 1);
2406 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2407 VREF(vp);
2408 newvp = vp;
2409 np = dnp;
2410 } else {
2411 error = nfs_nget(vp->v_mount, fhp,
2412 fhsize, &np);
2413 if (error)
2414 doit = 0;
2415 else
2416 newvp = NFSTOV(np);
2417 }
2418 }
2419 if (doit && bigenough) {
2420 dpossav2 = dpos;
2421 dpos = dpossav1;
2422 mdsav2 = md;
2423 md = mdsav1;
2424 nfsm_loadattr(newvp, (struct vattr *)0);
2425 dpos = dpossav2;
2426 md = mdsav2;
2427 dp->d_type =
2428 IFTODT(VTTOIF(np->n_vattr.va_type));
2429 ndp->ni_vp = newvp;
2430 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2431 }
2432 } else {
2433 /* Just skip over the file handle */
2434 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2435 i = fxdr_unsigned(int, *tl);
2436 nfsm_adv(nfsm_rndup(i));
2437 }
2438 if (newvp != NULLVP) {
2439 if (newvp == vp)
2440 vrele(newvp);
2441 else
2442 vput(newvp);
2443 newvp = NULLVP;
2444 }
2445 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2446 more_dirs = fxdr_unsigned(int, *tl);
2447 }
2448 /*
2449 * If at end of rpc data, get the eof boolean
2450 */
2451 if (!more_dirs) {
2452 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2453 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2454 }
2455 m_freem(mrep);
2456 }
2457 /*
2458 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2459 * by increasing d_reclen for the last record.
2460 */
2461 if (blksiz > 0) {
2462 left = DIRBLKSIZ - blksiz;
2463 dp->d_reclen += left;
2464 uiop->uio_iov->iov_base += left;
2465 uiop->uio_iov->iov_len -= left;
2466 uiop->uio_offset += left;
2467 uiop->uio_resid -= left;
2468 }
2469
2470 /*
2471 * We are now either at the end of the directory or have filled the
2472 * block.
2473 */
2474 if (bigenough)
2475 dnp->n_direofoffset = uiop->uio_offset;
2476 else {
2477 if (uiop->uio_resid > 0)
2478 printf("EEK! readdirplusrpc resid > 0\n");
2479 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2480 *cookiep = cookie;
2481 }
2482nfsmout:
2483 if (newvp != NULLVP) {
2484 if (newvp == vp)
2485 vrele(newvp);
2486 else
2487 vput(newvp);
2488 newvp = NULLVP;
2489 }
2490 return (error);
2491}
2492
2493/*
2494 * Silly rename. To make the NFS filesystem that is stateless look a little
2495 * more like the "ufs" a remove of an active vnode is translated to a rename
2496 * to a funny looking filename that is removed by nfs_inactive on the
2497 * nfsnode. There is the potential for another process on a different client
2498 * to create the same funny name between the nfs_lookitup() fails and the
2499 * nfs_rename() completes, but...
2500 */
2501static int
2502nfs_sillyrename(dvp, vp, cnp)
2503 struct vnode *dvp, *vp;
2504 struct componentname *cnp;
2505{
2506 register struct sillyrename *sp;
2507 struct nfsnode *np;
2508 int error;
984263bc
MD
2509
2510 cache_purge(dvp);
2511 np = VTONFS(vp);
2512#ifndef DIAGNOSTIC
2513 if (vp->v_type == VDIR)
2514 panic("nfs: sillyrename dir");
2515#endif
2516 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2517 M_NFSREQ, M_WAITOK);
2518 sp->s_cred = crdup(cnp->cn_cred);
2519 sp->s_dvp = dvp;
2520 VREF(dvp);
2521
2522 /* Fudge together a funny name */
dadab5e9 2523 sp->s_namlen = sprintf(sp->s_name, ".nfsA%08x4.4", (int)cnp->cn_td);
984263bc
MD
2524
2525 /* Try lookitups until we get one that isn't there */
2526 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2527 cnp->cn_td, (struct nfsnode **)0) == 0) {
984263bc
MD
2528 sp->s_name[4]++;
2529 if (sp->s_name[4] > 'z') {
2530 error = EINVAL;
2531 goto bad;
2532 }
2533 }
2534 error = nfs_renameit(dvp, cnp, sp);
2535 if (error)
2536 goto bad;
2537 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2538 cnp->cn_td, &np);
984263bc
MD
2539 np->n_sillyrename = sp;
2540 return (0);
2541bad:
2542 vrele(sp->s_dvp);
2543 crfree(sp->s_cred);
2544 free((caddr_t)sp, M_NFSREQ);
2545 return (error);
2546}
2547
2548/*
2549 * Look up a file name and optionally either update the file handle or
2550 * allocate an nfsnode, depending on the value of npp.
2551 * npp == NULL --> just do the lookup
2552 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2553 * handled too
2554 * *npp != NULL --> update the file handle in the vnode
2555 */
2556static int
dadab5e9 2557nfs_lookitup(dvp, name, len, cred, td, npp)
984263bc
MD
2558 register struct vnode *dvp;
2559 const char *name;
2560 int len;
2561 struct ucred *cred;
dadab5e9 2562 struct thread *td;
984263bc
MD
2563 struct nfsnode **npp;
2564{
2565 register u_int32_t *tl;
2566 register caddr_t cp;
2567 register int32_t t1, t2;
2568 struct vnode *newvp = (struct vnode *)0;
2569 struct nfsnode *np, *dnp = VTONFS(dvp);
2570 caddr_t bpos, dpos, cp2;
2571 int error = 0, fhlen, attrflag;
2572 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2573 nfsfh_t *nfhp;
2574 int v3 = NFS_ISV3(dvp);
2575
2576 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2577 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2578 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2579 nfsm_fhtom(dvp, v3);
2580 nfsm_strtom(name, len, NFS_MAXNAMLEN);
dadab5e9 2581 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
984263bc
MD
2582 if (npp && !error) {
2583 nfsm_getfh(nfhp, fhlen, v3);
2584 if (*npp) {
2585 np = *npp;
2586 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2587 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2588 np->n_fhp = &np->n_fh;
2589 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2590 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2591 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2592 np->n_fhsize = fhlen;
2593 newvp = NFSTOV(np);
2594 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2595 VREF(dvp);
2596 newvp = dvp;
2597 } else {
2598 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2599 if (error) {
2600 m_freem(mrep);
2601 return (error);
2602 }
2603 newvp = NFSTOV(np);
2604 }
2605 if (v3) {
2606 nfsm_postop_attr(newvp, attrflag);
2607 if (!attrflag && *npp == NULL) {
2608 m_freem(mrep);
2609 if (newvp == dvp)
2610 vrele(newvp);
2611 else
2612 vput(newvp);
2613 return (ENOENT);
2614 }
2615 } else
2616 nfsm_loadattr(newvp, (struct vattr *)0);
2617 }
2618 nfsm_reqdone;
2619 if (npp && *npp == NULL) {
2620 if (error) {
2621 if (newvp) {
2622 if (newvp == dvp)
2623 vrele(newvp);
2624 else
2625 vput(newvp);
2626 }
2627 } else
2628 *npp = np;
2629 }
2630 return (error);
2631}
2632
2633/*
2634 * Nfs Version 3 commit rpc
2635 */
2636int
dadab5e9 2637nfs_commit(vp, offset, cnt, cred, td)
984263bc
MD
2638 struct vnode *vp;
2639 u_quad_t offset;
2640 int cnt;
2641 struct ucred *cred;
dadab5e9 2642 struct thread *td;
984263bc
MD
2643{
2644 register caddr_t cp;
2645 register u_int32_t *tl;
2646 register int32_t t1, t2;
2647 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2648 caddr_t bpos, dpos, cp2;
2649 int error = 0, wccflag = NFSV3_WCCRATTR;
2650 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2651
2652 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2653 return (0);
2654 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2655 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2656 nfsm_fhtom(vp, 1);
2657 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2658 txdr_hyper(offset, tl);
2659 tl += 2;
2660 *tl = txdr_unsigned(cnt);
dadab5e9 2661 nfsm_request(vp, NFSPROC_COMMIT, td, cred);
984263bc
MD
2662 nfsm_wcc_data(vp, wccflag);
2663 if (!error) {
2664 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2665 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2666 NFSX_V3WRITEVERF)) {
2667 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2668 NFSX_V3WRITEVERF);
2669 error = NFSERR_STALEWRITEVERF;
2670 }
2671 }
2672 nfsm_reqdone;
2673 return (error);
2674}
2675
2676/*
2677 * Kludge City..
2678 * - make nfs_bmap() essentially a no-op that does no translation
2679 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2680 * (Maybe I could use the process's page mapping, but I was concerned that
2681 * Kernel Write might not be enabled and also figured copyout() would do
2682 * a lot more work than bcopy() and also it currently happens in the
2683 * context of the swapper process (2).
2684 */
2685static int
2686nfs_bmap(ap)
2687 struct vop_bmap_args /* {
2688 struct vnode *a_vp;
2689 daddr_t a_bn;
2690 struct vnode **a_vpp;
2691 daddr_t *a_bnp;
2692 int *a_runp;
2693 int *a_runb;
2694 } */ *ap;
2695{
2696 register struct vnode *vp = ap->a_vp;
2697
2698 if (ap->a_vpp != NULL)
2699 *ap->a_vpp = vp;
2700 if (ap->a_bnp != NULL)
2701 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2702 if (ap->a_runp != NULL)
2703 *ap->a_runp = 0;
2704 if (ap->a_runb != NULL)
2705 *ap->a_runb = 0;
2706 return (0);
2707}
2708
2709/*
2710 * Strategy routine.
2711 * For async requests when nfsiod(s) are running, queue the request by
2712 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2713 * request.
2714 */
2715static int
2716nfs_strategy(ap)
2717 struct vop_strategy_args *ap;
2718{
2719 register struct buf *bp = ap->a_bp;
2720 struct ucred *cr;
dadab5e9 2721 struct thread *td;
984263bc
MD
2722 int error = 0;
2723
2724 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2725 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2726
2727 if (bp->b_flags & B_PHYS)
2728 panic("nfs physio");
2729
2730 if (bp->b_flags & B_ASYNC)
dadab5e9 2731 td = NULL;
984263bc 2732 else
dadab5e9 2733 td = curthread; /* XXX */
984263bc
MD
2734
2735 if (bp->b_flags & B_READ)
2736 cr = bp->b_rcred;
2737 else
2738 cr = bp->b_wcred;
2739
2740 /*
2741 * If the op is asynchronous and an i/o daemon is waiting
2742 * queue the request, wake it up and wait for completion
2743 * otherwise just do it ourselves.
2744 */
2745 if ((bp->b_flags & B_ASYNC) == 0 ||
dadab5e9
MD
2746 nfs_asyncio(bp, NOCRED, td))
2747 error = nfs_doio(bp, cr, td);
984263bc
MD
2748 return (error);
2749}
2750
2751/*
2752 * Mmap a file
2753 *
2754 * NB Currently unsupported.
2755 */
2756/* ARGSUSED */
2757static int
2758nfs_mmap(ap)
2759 struct vop_mmap_args /* {
2760 struct vnode *a_vp;
2761 int a_fflags;
2762 struct ucred *a_cred;
dadab5e9 2763 struct thread *a_td;
984263bc
MD
2764 } */ *ap;
2765{
2766
2767 return (EINVAL);
2768}
2769
2770/*
2771 * fsync vnode op. Just call nfs_flush() with commit == 1.
2772 */
2773/* ARGSUSED */
2774static int
2775nfs_fsync(ap)
2776 struct vop_fsync_args /* {
2777 struct vnodeop_desc *a_desc;
2778 struct vnode * a_vp;
2779 struct ucred * a_cred;
2780 int a_waitfor;
dadab5e9 2781 struct thread * a_td;
984263bc
MD
2782 } */ *ap;
2783{
2784
dadab5e9 2785 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_td, 1));
984263bc
MD
2786}
2787
2788/*
2789 * Flush all the blocks associated with a vnode.
2790 * Walk through the buffer pool and push any dirty pages
2791 * associated with the vnode.
2792 */
2793static int
dadab5e9 2794nfs_flush(vp, cred, waitfor, td, commit)
984263bc
MD
2795 register struct vnode *vp;
2796 struct ucred *cred;
2797 int waitfor;
dadab5e9 2798 struct thread *td;
984263bc
MD
2799 int commit;
2800{
2801 register struct nfsnode *np = VTONFS(vp);
2802 register struct buf *bp;
2803 register int i;
2804 struct buf *nbp;
2805 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2806 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2807 int passone = 1;
2808 u_quad_t off, endoff, toff;
2809 struct ucred* wcred = NULL;
2810 struct buf **bvec = NULL;
2811#ifndef NFS_COMMITBVECSIZ
2812#define NFS_COMMITBVECSIZ 20
2813#endif
2814 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2815 int bvecsize = 0, bveccount;
2816
2817 if (nmp->nm_flag & NFSMNT_INT)
2818 slpflag = PCATCH;
2819 if (!commit)
2820 passone = 0;
2821 /*
2822 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2823 * server, but nas not been committed to stable storage on the server
2824 * yet. On the first pass, the byte range is worked out and the commit
2825 * rpc is done. On the second pass, nfs_writebp() is called to do the
2826 * job.
2827 */
2828again:
2829 off = (u_quad_t)-1;
2830 endoff = 0;
2831 bvecpos = 0;
2832 if (NFS_ISV3(vp) && commit) {
2833 s = splbio();
2834 /*
2835 * Count up how many buffers waiting for a commit.
2836 */
2837 bveccount = 0;
2838 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2839 nbp = TAILQ_NEXT(bp, b_vnbufs);
2840 if (BUF_REFCNT(bp) == 0 &&
2841 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2842 == (B_DELWRI | B_NEEDCOMMIT))
2843 bveccount++;
2844 }
2845 /*
2846 * Allocate space to remember the list of bufs to commit. It is
2847 * important to use M_NOWAIT here to avoid a race with nfs_write.
2848 * If we can't get memory (for whatever reason), we will end up
2849 * committing the buffers one-by-one in the loop below.
2850 */
2851 if (bvec != NULL && bvec != bvec_on_stack)
2852 free(bvec, M_TEMP);
2853 if (bveccount > NFS_COMMITBVECSIZ) {
2854 bvec = (struct buf **)
2855 malloc(bveccount * sizeof(struct buf *),
2856 M_TEMP, M_NOWAIT);
2857 if (bvec == NULL) {
2858 bvec = bvec_on_stack;
2859 bvecsize = NFS_COMMITBVECSIZ;
2860 } else
2861 bvecsize = bveccount;
2862 } else {
2863 bvec = bvec_on_stack;
2864 bvecsize = NFS_COMMITBVECSIZ;
2865 }
2866 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2867 nbp = TAILQ_NEXT(bp, b_vnbufs);
2868 if (bvecpos >= bvecsize)
2869 break;
2870 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2871 (B_DELWRI | B_NEEDCOMMIT) ||
2872 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2873 continue;
2874 bremfree(bp);
2875 /*
2876 * Work out if all buffers are using the same cred
2877 * so we can deal with them all with one commit.
2878 *
2879 * NOTE: we are not clearing B_DONE here, so we have
2880 * to do it later on in this routine if we intend to
2881 * initiate I/O on the bp.
2882 *
2883 * Note: to avoid loopback deadlocks, we do not
2884 * assign b_runningbufspace.
2885 */
2886 if (wcred == NULL)
2887 wcred = bp->b_wcred;
2888 else if (wcred != bp->b_wcred)
2889 wcred = NOCRED;
2890 bp->b_flags |= B_WRITEINPROG;
2891 vfs_busy_pages(bp, 1);
2892
2893 /*
2894 * bp is protected by being locked, but nbp is not
2895 * and vfs_busy_pages() may sleep. We have to
2896 * recalculate nbp.
2897 */
2898 nbp = TAILQ_NEXT(bp, b_vnbufs);
2899
2900 /*
2901 * A list of these buffers is kept so that the
2902 * second loop knows which buffers have actually
2903 * been committed. This is necessary, since there
2904 * may be a race between the commit rpc and new
2905 * uncommitted writes on the file.
2906 */
2907 bvec[bvecpos++] = bp;
2908 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2909 bp->b_dirtyoff;
2910 if (toff < off)
2911 off = toff;
2912 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2913 if (toff > endoff)
2914 endoff = toff;
2915 }
2916 splx(s);
2917 }
2918 if (bvecpos > 0) {
2919 /*
2920 * Commit data on the server, as required.
2921 * If all bufs are using the same wcred, then use that with
2922 * one call for all of them, otherwise commit each one
2923 * separately.
2924 */
2925 if (wcred != NOCRED)
2926 retv = nfs_commit(vp, off, (int)(endoff - off),
dadab5e9 2927 wcred, td);
984263bc
MD
2928 else {
2929 retv = 0;
2930 for (i = 0; i < bvecpos; i++) {
2931 off_t off, size;
2932 bp = bvec[i];
2933 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2934 bp->b_dirtyoff;
2935 size = (u_quad_t)(bp->b_dirtyend
2936 - bp->b_dirtyoff);
2937 retv = nfs_commit(vp, off, (int)size,
dadab5e9 2938 bp->b_wcred, td);
984263bc
MD
2939 if (retv) break;
2940 }
2941 }
2942
2943 if (retv == NFSERR_STALEWRITEVERF)
2944 nfs_clearcommit(vp->v_mount);
2945
2946 /*
2947 * Now, either mark the blocks I/O done or mark the
2948 * blocks dirty, depending on whether the commit
2949 * succeeded.
2950 */
2951 for (i = 0; i < bvecpos; i++) {
2952 bp = bvec[i];
2953 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2954 if (retv) {
2955 /*
2956 * Error, leave B_DELWRI intact
2957 */
2958 vfs_unbusy_pages(bp);
2959 brelse(bp);
2960 } else {
2961 /*
2962 * Success, remove B_DELWRI ( bundirty() ).
2963 *
2964 * b_dirtyoff/b_dirtyend seem to be NFS
2965 * specific. We should probably move that
2966 * into bundirty(). XXX
2967 */
2968 s = splbio();
2969 vp->v_numoutput++;
2970 bp->b_flags |= B_ASYNC;
2971 bundirty(bp);
2972 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2973 bp->b_dirtyoff = bp->b_dirtyend = 0;
2974 splx(s);
2975 biodone(bp);
2976 }
2977 }
2978 }
2979
2980 /*
2981 * Start/do any write(s) that are required.
2982 */
2983loop:
2984 s = splbio();
2985 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2986 nbp = TAILQ_NEXT(bp, b_vnbufs);
2987 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2988 if (waitfor != MNT_WAIT || passone)
2989 continue;
2990 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2991 "nfsfsync", slpflag, slptimeo);
2992 splx(s);
2993 if (error == 0)
2994 panic("nfs_fsync: inconsistent lock");
2995 if (error == ENOLCK)
2996 goto loop;
dadab5e9 2997 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
984263bc
MD
2998 error = EINTR;
2999 goto done;
3000 }
3001 if (slpflag == PCATCH) {
3002 slpflag = 0;
3003 slptimeo = 2 * hz;
3004 }
3005 goto loop;
3006 }
3007 if ((bp->b_flags & B_DELWRI) == 0)
3008 panic("nfs_fsync: not dirty");
3009 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
3010 BUF_UNLOCK(bp);
3011 continue;
3012 }
3013 bremfree(bp);
3014 if (passone || !commit)
3015 bp->b_flags |= B_ASYNC;
3016 else
3017 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
3018 splx(s);
3019 VOP_BWRITE(bp->b_vp, bp);
3020 goto loop;
3021 }
3022 splx(s);
3023 if (passone) {
3024 passone = 0;
3025 goto again;
3026 }
3027 if (waitfor == MNT_WAIT) {
3028 while (vp->v_numoutput) {
3029 vp->v_flag |= VBWAIT;
3030 error = tsleep((caddr_t)&vp->v_numoutput,
3031 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
3032 if (error) {
dadab5e9 3033 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
984263bc
MD
3034 error = EINTR;
3035 goto done;
3036 }
3037 if (slpflag == PCATCH) {
3038 slpflag = 0;
3039 slptimeo = 2 * hz;
3040 }
3041 }
3042 }
3043 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
3044 goto loop;
3045 }
3046 }
3047 if (np->n_flag & NWRITEERR) {
3048 error = np->n_error;
3049 np->n_flag &= ~NWRITEERR;
3050 }
3051done:
3052 if (bvec != NULL && bvec != bvec_on_stack)
3053 free(bvec, M_TEMP);
3054 return (error);
3055}
3056
3057/*
3058 * NFS advisory byte-level locks.
3059 * Currently unsupported.
3060 */
3061static int
3062nfs_advlock(ap)
3063 struct vop_advlock_args /* {
3064 struct vnode *a_vp;
3065 caddr_t a_id;
3066 int a_op;
3067 struct flock *a_fl;
3068 int a_flags;
3069 } */ *ap;
3070{
3071 register struct nfsnode *np = VTONFS(ap->a_vp);
3072
3073 /*
3074 * The following kludge is to allow diskless support to work
3075 * until a real NFS lockd is implemented. Basically, just pretend
3076 * that this is a local lock.
3077 */
3078 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3079}
3080
3081/*
3082 * Print out the contents of an nfsnode.
3083 */
3084static int
3085nfs_print(ap)
3086 struct vop_print_args /* {
3087 struct vnode *a_vp;
3088 } */ *ap;
3089{
3090 register struct vnode *vp = ap->a_vp;
3091 register struct nfsnode *np = VTONFS(vp);
3092
3093 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3094 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3095 if (vp->v_type == VFIFO)
3096 fifo_printinfo(vp);
3097 printf("\n");
3098 return (0);
3099}
3100
3101/*
3102 * Just call nfs_writebp() with the force argument set to 1.
3103 *
3104 * NOTE: B_DONE may or may not be set in a_bp on call.
3105 */
3106static int
3107nfs_bwrite(ap)
3108 struct vop_bwrite_args /* {
3109 struct vnode *a_bp;
3110 } */ *ap;
3111{
dadab5e9 3112 return (nfs_writebp(ap->a_bp, 1, curthread));
984263bc
MD
3113}
3114
3115/*
3116 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3117 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3118 * B_CACHE if this is a VMIO buffer.
3119 */
3120int
dadab5e9 3121nfs_writebp(bp, force, td)
984263bc
MD
3122 register struct buf *bp;
3123 int force;
dadab5e9 3124 struct thread *td;
984263bc
MD
3125{
3126 int s;
3127 int oldflags = bp->b_flags;
3128#if 0
3129 int retv = 1;
3130 off_t off;
3131#endif
3132
3133 if (BUF_REFCNT(bp) == 0)
3134 panic("bwrite: buffer is not locked???");
3135
3136 if (bp->b_flags & B_INVAL) {
3137 brelse(bp);
3138 return(0);
3139 }
3140
3141 bp->b_flags |= B_CACHE;
3142
3143 /*
3144 * Undirty the bp. We will redirty it later if the I/O fails.
3145 */
3146
3147 s = splbio();
3148 bundirty(bp);
3149 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3150
3151 bp->b_vp->v_numoutput++;
3152 curproc->p_stats->p_ru.ru_oublock++;
3153 splx(s);
3154
3155 /*
3156 * Note: to avoid loopback deadlocks, we do not
3157 * assign b_runningbufspace.
3158 */
3159 vfs_busy_pages(bp, 1);
3160
3161 if (force)
3162 bp->b_flags |= B_WRITEINPROG;
3163 BUF_KERNPROC(bp);
3164 VOP_STRATEGY(bp->b_vp, bp);
3165
3166 if( (oldflags & B_ASYNC) == 0) {
3167 int rtval = biowait(bp);
3168
3169 if (oldflags & B_DELWRI) {
3170 s = splbio();
3171 reassignbuf(bp, bp->b_vp);
3172 splx(s);
3173 }
3174
3175 brelse(bp);
3176 return (rtval);
3177 }
3178
3179 return (0);
3180}
3181
3182/*
3183 * nfs special file access vnode op.
3184 * Essentially just get vattr and then imitate iaccess() since the device is
3185 * local to the client.
3186 */
3187static int
3188nfsspec_access(ap)
3189 struct vop_access_args /* {
3190 struct vnode *a_vp;
3191 int a_mode;
3192 struct ucred *a_cred;
dadab5e9 3193 struct thread *a_td;
984263bc
MD
3194 } */ *ap;
3195{
3196 register struct vattr *vap;
3197 register gid_t *gp;
3198 register struct ucred *cred = ap->a_cred;
3199 struct vnode *vp = ap->a_vp;
3200 mode_t mode = ap->a_mode;
3201 struct vattr vattr;
3202 register int i;
3203 int error;
3204
3205 /*
3206 * Disallow write attempts on filesystems mounted read-only;
3207 * unless the file is a socket, fifo, or a block or character
3208 * device resident on the filesystem.
3209 */
3210 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3211 switch (vp->v_type) {
3212 case VREG:
3213 case VDIR:
3214 case VLNK:
3215 return (EROFS);
3216 default:
3217 break;
3218 }
3219 }
3220 /*
3221 * If you're the super-user,
3222 * you always get access.
3223 */
3224 if (cred->cr_uid == 0)
3225 return (0);
3226 vap = &vattr;
dadab5e9 3227 error = VOP_GETATTR(vp, vap, cred, ap->a_td);
984263bc
MD
3228 if (error)
3229 return (error);
3230 /*
3231 * Access check is based on only one of owner, group, public.
3232 * If not owner, then check group. If not a member of the
3233 * group, then check public access.
3234 */
3235 if (cred->cr_uid != vap->va_uid) {
3236 mode >>= 3;
3237 gp = cred->cr_groups;
3238 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3239 if (vap->va_gid == *gp)
3240 goto found;
3241 mode >>= 3;
3242found:
3243 ;
3244 }
3245 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3246 return (error);
3247}
3248
3249/*
3250 * Read wrapper for special devices.
3251 */
3252static int
3253nfsspec_read(ap)
3254 struct vop_read_args /* {
3255 struct vnode *a_vp;
3256 struct uio *a_uio;
3257 int a_ioflag;
3258 struct ucred *a_cred;
3259 } */ *ap;
3260{
3261 register struct nfsnode *np = VTONFS(ap->a_vp);
3262
3263 /*
3264 * Set access flag.
3265 */
3266 np->n_flag |= NACC;
3267 getnanotime(&np->n_atim);
3268 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3269}
3270
3271/*
3272 * Write wrapper for special devices.
3273 */
3274static int
3275nfsspec_write(ap)
3276 struct vop_write_args /* {
3277 struct vnode *a_vp;
3278 struct uio *a_uio;
3279 int a_ioflag;
3280 struct ucred *a_cred;
3281 } */ *ap;
3282{
3283 register struct nfsnode *np = VTONFS(ap->a_vp);
3284
3285 /*
3286 * Set update flag.
3287 */
3288 np->n_flag |= NUPD;
3289 getnanotime(&np->n_mtim);
3290 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3291}
3292
3293/*
3294 * Close wrapper for special devices.
3295 *
3296 * Update the times on the nfsnode then do device close.
3297 */
3298static int
3299nfsspec_close(ap)
3300 struct vop_close_args /* {
3301 struct vnode *a_vp;
3302 int a_fflag;
3303 struct ucred *a_cred;
dadab5e9 3304 struct thread *a_td;
984263bc
MD
3305 } */ *ap;
3306{
3307 register struct vnode *vp = ap->a_vp;
3308 register struct nfsnode *np = VTONFS(vp);
3309 struct vattr vattr;
3310
3311 if (np->n_flag & (NACC | NUPD)) {
3312 np->n_flag |= NCHG;
3313 if (vp->v_usecount == 1 &&
3314 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3315 VATTR_NULL(&vattr);
3316 if (np->n_flag & NACC)
3317 vattr.va_atime = np->n_atim;
3318 if (np->n_flag & NUPD)
3319 vattr.va_mtime = np->n_mtim;
dadab5e9 3320 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
984263bc
MD
3321 }
3322 }
3323 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3324}
3325
3326/*
3327 * Read wrapper for fifos.
3328 */
3329static int
3330nfsfifo_read(ap)
3331 struct vop_read_args /* {
3332 struct vnode *a_vp;
3333 struct uio *a_uio;
3334 int a_ioflag;
3335 struct ucred *a_cred;
3336 } */ *ap;
3337{
3338 register struct nfsnode *np = VTONFS(ap->a_vp);
3339
3340 /*
3341 * Set access flag.
3342 */
3343 np->n_flag |= NACC;
3344 getnanotime(&np->n_atim);
3345 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3346}
3347
3348/*
3349 * Write wrapper for fifos.
3350 */
3351static int
3352nfsfifo_write(ap)
3353 struct vop_write_args /* {
3354 struct vnode *a_vp;
3355 struct uio *a_uio;
3356 int a_ioflag;
3357 struct ucred *a_cred;
3358 } */ *ap;
3359{
3360 register struct nfsnode *np = VTONFS(ap->a_vp);
3361
3362 /*
3363 * Set update flag.
3364 */
3365 np->n_flag |= NUPD;
3366 getnanotime(&np->n_mtim);
3367 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3368}
3369
3370/*
3371 * Close wrapper for fifos.
3372 *
3373 * Update the times on the nfsnode then do fifo close.
3374 */
3375static int
3376nfsfifo_close(ap)
3377 struct vop_close_args /* {
3378 struct vnode *a_vp;
3379 int a_fflag;
3380 struct ucred *a_cred;
dadab5e9 3381 struct thread *a_td;
984263bc
MD
3382 } */ *ap;
3383{
3384 register struct vnode *vp = ap->a_vp;
3385 register struct nfsnode *np = VTONFS(vp);
3386 struct vattr vattr;
3387 struct timespec ts;
3388
3389 if (np->n_flag & (NACC | NUPD)) {
3390 getnanotime(&ts);
3391 if (np->n_flag & NACC)
3392 np->n_atim = ts;
3393 if (np->n_flag & NUPD)
3394 np->n_mtim = ts;
3395 np->n_flag |= NCHG;
3396 if (vp->v_usecount == 1 &&
3397 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3398 VATTR_NULL(&vattr);
3399 if (np->n_flag & NACC)
3400 vattr.va_atime = np->n_atim;
3401 if (np->n_flag & NUPD)
3402 vattr.va_mtime = np->n_mtim;
dadab5e9 3403 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
984263bc
MD
3404 }
3405 }
3406 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3407}
3408