Add NetGear FA-511 support
[dragonfly.git] / sys / vfs / nfs / nfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
a8f169e2 38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.57 2006/04/28 16:34:01 dillon Exp $
984263bc
MD
39 */
40
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/buf.h>
55#include <sys/malloc.h>
56#include <sys/mbuf.h>
57#include <sys/namei.h>
fad57d0e 58#include <sys/nlookup.h>
984263bc
MD
59#include <sys/socket.h>
60#include <sys/vnode.h>
61#include <sys/dirent.h>
62#include <sys/fcntl.h>
63#include <sys/lockf.h>
64#include <sys/stat.h>
65#include <sys/sysctl.h>
66#include <sys/conf.h>
67
68#include <vm/vm.h>
69#include <vm/vm_extern.h>
70#include <vm/vm_zone.h>
71
3020e3be
MD
72#include <sys/buf2.h>
73
1f2de5d4 74#include <vfs/fifofs/fifo.h>
01f31ab3
JS
75#include <vfs/ufs/dir.h>
76
77#undef DIRBLKSIZ
984263bc 78
1f2de5d4
MD
79#include "rpcv2.h"
80#include "nfsproto.h"
81#include "nfs.h"
1f2de5d4 82#include "nfsmount.h"
c1cf1e59 83#include "nfsnode.h"
1f2de5d4
MD
84#include "xdr_subs.h"
85#include "nfsm_subs.h"
984263bc
MD
86
87#include <net/if.h>
88#include <netinet/in.h>
89#include <netinet/in_var.h>
90
165dba55
DR
91#include <sys/thread2.h>
92
984263bc
MD
93/* Defs */
94#define TRUE 1
95#define FALSE 0
96
a6ee311a
RG
97static int nfsspec_read (struct vop_read_args *);
98static int nfsspec_write (struct vop_write_args *);
99static int nfsfifo_read (struct vop_read_args *);
100static int nfsfifo_write (struct vop_write_args *);
101static int nfsspec_close (struct vop_close_args *);
102static int nfsfifo_close (struct vop_close_args *);
984263bc 103#define nfs_poll vop_nopoll
a6ee311a 104static int nfs_setattrrpc (struct vnode *,struct vattr *,struct ucred *,struct thread *);
e62afb5f
MD
105static int nfs_lookup (struct vop_old_lookup_args *);
106static int nfs_create (struct vop_old_create_args *);
107static int nfs_mknod (struct vop_old_mknod_args *);
a6ee311a
RG
108static int nfs_open (struct vop_open_args *);
109static int nfs_close (struct vop_close_args *);
110static int nfs_access (struct vop_access_args *);
111static int nfs_getattr (struct vop_getattr_args *);
112static int nfs_setattr (struct vop_setattr_args *);
113static int nfs_read (struct vop_read_args *);
114static int nfs_mmap (struct vop_mmap_args *);
115static int nfs_fsync (struct vop_fsync_args *);
e62afb5f
MD
116static int nfs_remove (struct vop_old_remove_args *);
117static int nfs_link (struct vop_old_link_args *);
118static int nfs_rename (struct vop_old_rename_args *);
119static int nfs_mkdir (struct vop_old_mkdir_args *);
120static int nfs_rmdir (struct vop_old_rmdir_args *);
121static int nfs_symlink (struct vop_old_symlink_args *);
a6ee311a
RG
122static int nfs_readdir (struct vop_readdir_args *);
123static int nfs_bmap (struct vop_bmap_args *);
124static int nfs_strategy (struct vop_strategy_args *);
125static int nfs_lookitup (struct vnode *, const char *, int,
126 struct ucred *, struct thread *, struct nfsnode **);
127static int nfs_sillyrename (struct vnode *,struct vnode *,struct componentname *);
128static int nfsspec_access (struct vop_access_args *);
129static int nfs_readlink (struct vop_readlink_args *);
130static int nfs_print (struct vop_print_args *);
131static int nfs_advlock (struct vop_advlock_args *);
132static int nfs_bwrite (struct vop_bwrite_args *);
fad57d0e
MD
133
134static int nfs_nresolve (struct vop_nresolve_args *);
984263bc
MD
135/*
136 * Global vfs data structures for nfs
137 */
0961aa92 138struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
2d3e977e 139 { &vop_default_desc, vop_defaultop },
625ddaba
JS
140 { &vop_access_desc, (vnodeopv_entry_t) nfs_access },
141 { &vop_advlock_desc, (vnodeopv_entry_t) nfs_advlock },
142 { &vop_bmap_desc, (vnodeopv_entry_t) nfs_bmap },
143 { &vop_bwrite_desc, (vnodeopv_entry_t) nfs_bwrite },
144 { &vop_close_desc, (vnodeopv_entry_t) nfs_close },
e62afb5f 145 { &vop_old_create_desc, (vnodeopv_entry_t) nfs_create },
625ddaba
JS
146 { &vop_fsync_desc, (vnodeopv_entry_t) nfs_fsync },
147 { &vop_getattr_desc, (vnodeopv_entry_t) nfs_getattr },
148 { &vop_getpages_desc, (vnodeopv_entry_t) nfs_getpages },
149 { &vop_putpages_desc, (vnodeopv_entry_t) nfs_putpages },
150 { &vop_inactive_desc, (vnodeopv_entry_t) nfs_inactive },
151 { &vop_islocked_desc, (vnodeopv_entry_t) vop_stdislocked },
e62afb5f 152 { &vop_old_link_desc, (vnodeopv_entry_t) nfs_link },
625ddaba 153 { &vop_lock_desc, (vnodeopv_entry_t) vop_stdlock },
e62afb5f
MD
154 { &vop_old_lookup_desc, (vnodeopv_entry_t) nfs_lookup },
155 { &vop_old_mkdir_desc, (vnodeopv_entry_t) nfs_mkdir },
156 { &vop_old_mknod_desc, (vnodeopv_entry_t) nfs_mknod },
625ddaba
JS
157 { &vop_mmap_desc, (vnodeopv_entry_t) nfs_mmap },
158 { &vop_open_desc, (vnodeopv_entry_t) nfs_open },
159 { &vop_poll_desc, (vnodeopv_entry_t) nfs_poll },
160 { &vop_print_desc, (vnodeopv_entry_t) nfs_print },
161 { &vop_read_desc, (vnodeopv_entry_t) nfs_read },
162 { &vop_readdir_desc, (vnodeopv_entry_t) nfs_readdir },
163 { &vop_readlink_desc, (vnodeopv_entry_t) nfs_readlink },
164 { &vop_reclaim_desc, (vnodeopv_entry_t) nfs_reclaim },
e62afb5f
MD
165 { &vop_old_remove_desc, (vnodeopv_entry_t) nfs_remove },
166 { &vop_old_rename_desc, (vnodeopv_entry_t) nfs_rename },
167 { &vop_old_rmdir_desc, (vnodeopv_entry_t) nfs_rmdir },
625ddaba
JS
168 { &vop_setattr_desc, (vnodeopv_entry_t) nfs_setattr },
169 { &vop_strategy_desc, (vnodeopv_entry_t) nfs_strategy },
e62afb5f 170 { &vop_old_symlink_desc, (vnodeopv_entry_t) nfs_symlink },
625ddaba
JS
171 { &vop_unlock_desc, (vnodeopv_entry_t) vop_stdunlock },
172 { &vop_write_desc, (vnodeopv_entry_t) nfs_write },
173
174 { &vop_nresolve_desc, (vnodeopv_entry_t) nfs_nresolve },
984263bc
MD
175 { NULL, NULL }
176};
984263bc
MD
177
178/*
179 * Special device vnode ops
180 */
0961aa92 181struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
625ddaba
JS
182 { &vop_default_desc, (vnodeopv_entry_t) spec_vnoperate },
183 { &vop_access_desc, (vnodeopv_entry_t) nfsspec_access },
184 { &vop_close_desc, (vnodeopv_entry_t) nfsspec_close },
185 { &vop_fsync_desc, (vnodeopv_entry_t) nfs_fsync },
186 { &vop_getattr_desc, (vnodeopv_entry_t) nfs_getattr },
187 { &vop_inactive_desc, (vnodeopv_entry_t) nfs_inactive },
188 { &vop_islocked_desc, (vnodeopv_entry_t) vop_stdislocked },
189 { &vop_lock_desc, (vnodeopv_entry_t) vop_stdlock },
190 { &vop_print_desc, (vnodeopv_entry_t) nfs_print },
191 { &vop_read_desc, (vnodeopv_entry_t) nfsspec_read },
192 { &vop_reclaim_desc, (vnodeopv_entry_t) nfs_reclaim },
193 { &vop_setattr_desc, (vnodeopv_entry_t) nfs_setattr },
194 { &vop_unlock_desc, (vnodeopv_entry_t) vop_stdunlock },
195 { &vop_write_desc, (vnodeopv_entry_t) nfsspec_write },
984263bc
MD
196 { NULL, NULL }
197};
984263bc 198
0961aa92 199struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
625ddaba
JS
200 { &vop_default_desc, (vnodeopv_entry_t) fifo_vnoperate },
201 { &vop_access_desc, (vnodeopv_entry_t) nfsspec_access },
202 { &vop_close_desc, (vnodeopv_entry_t) nfsfifo_close },
203 { &vop_fsync_desc, (vnodeopv_entry_t) nfs_fsync },
204 { &vop_getattr_desc, (vnodeopv_entry_t) nfs_getattr },
205 { &vop_inactive_desc, (vnodeopv_entry_t) nfs_inactive },
206 { &vop_islocked_desc, (vnodeopv_entry_t) vop_stdislocked },
207 { &vop_lock_desc, (vnodeopv_entry_t) vop_stdlock },
208 { &vop_print_desc, (vnodeopv_entry_t) nfs_print },
209 { &vop_read_desc, (vnodeopv_entry_t) nfsfifo_read },
210 { &vop_reclaim_desc, (vnodeopv_entry_t) nfs_reclaim },
211 { &vop_setattr_desc, (vnodeopv_entry_t) nfs_setattr },
212 { &vop_unlock_desc, (vnodeopv_entry_t) vop_stdunlock },
213 { &vop_write_desc, (vnodeopv_entry_t) nfsfifo_write },
984263bc
MD
214 { NULL, NULL }
215};
984263bc 216
a6ee311a 217static int nfs_mknodrpc (struct vnode *dvp, struct vnode **vpp,
984263bc 218 struct componentname *cnp,
a6ee311a
RG
219 struct vattr *vap);
220static int nfs_removerpc (struct vnode *dvp, const char *name,
984263bc 221 int namelen,
a6ee311a
RG
222 struct ucred *cred, struct thread *td);
223static int nfs_renamerpc (struct vnode *fdvp, const char *fnameptr,
984263bc
MD
224 int fnamelen, struct vnode *tdvp,
225 const char *tnameptr, int tnamelen,
a6ee311a
RG
226 struct ucred *cred, struct thread *td);
227static int nfs_renameit (struct vnode *sdvp,
984263bc 228 struct componentname *scnp,
a6ee311a 229 struct sillyrename *sp);
984263bc
MD
230
231/*
232 * Global variables
233 */
234extern u_int32_t nfs_true, nfs_false;
235extern u_int32_t nfs_xdrneg1;
236extern struct nfsstats nfsstats;
237extern nfstype nfsv3_type[9];
dadab5e9 238struct thread *nfs_iodwant[NFS_MAXASYNCDAEMON];
984263bc
MD
239struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
240int nfs_numasync = 0;
984263bc
MD
241
242SYSCTL_DECL(_vfs_nfs);
243
97100839 244static int nfsaccess_cache_timeout = NFS_DEFATTRTIMO;
984263bc
MD
245SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
246 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
247
4d17b298
MD
248static int nfsneg_cache_timeout = NFS_MINATTRTIMO;
249SYSCTL_INT(_vfs_nfs, OID_AUTO, neg_cache_timeout, CTLFLAG_RW,
250 &nfsneg_cache_timeout, 0, "NFS NEGATIVE ACCESS cache timeout");
251
984263bc
MD
252static int nfsv3_commit_on_close = 0;
253SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
254 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
255#if 0
256SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
257 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
258
259SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
260 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
261#endif
262
263#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
264 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
265 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
266static int
dadab5e9 267nfs3_access_otw(struct vnode *vp, int wmode,
e851b29e 268 struct thread *td, struct ucred *cred)
984263bc
MD
269{
270 const int v3 = 1;
271 u_int32_t *tl;
272 int error = 0, attrflag;
273
274 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
275 caddr_t bpos, dpos, cp2;
40393ded
RG
276 int32_t t1, t2;
277 caddr_t cp;
984263bc
MD
278 u_int32_t rmode;
279 struct nfsnode *np = VTONFS(vp);
280
281 nfsstats.rpccnt[NFSPROC_ACCESS]++;
282 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
283 nfsm_fhtom(vp, v3);
284 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
285 *tl = txdr_unsigned(wmode);
dadab5e9 286 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
5a9187cb 287 nfsm_postop_attr(vp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
288 if (!error) {
289 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
290 rmode = fxdr_unsigned(u_int32_t, *tl);
291 np->n_mode = rmode;
292 np->n_modeuid = cred->cr_uid;
3a6f9faf 293 np->n_modestamp = mycpu->gd_time_seconds;
984263bc 294 }
6b08710e
MD
295 m_freem(mrep);
296nfsmout:
984263bc
MD
297 return error;
298}
299
300/*
301 * nfs access vnode op.
302 * For nfs version 2, just return ok. File accesses may fail later.
303 * For nfs version 3, use the access rpc to check accessibility. If file modes
304 * are changed on the server, accesses might still fail later.
e851b29e
CP
305 *
306 * nfs_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
307 * struct thread *a_td)
984263bc
MD
308 */
309static int
e851b29e 310nfs_access(struct vop_access_args *ap)
984263bc 311{
40393ded 312 struct vnode *vp = ap->a_vp;
984263bc
MD
313 int error = 0;
314 u_int32_t mode, wmode;
315 int v3 = NFS_ISV3(vp);
316 struct nfsnode *np = VTONFS(vp);
317
318 /*
319 * Disallow write attempts on filesystems mounted read-only;
320 * unless the file is a socket, fifo, or a block or character
321 * device resident on the filesystem.
322 */
323 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
324 switch (vp->v_type) {
325 case VREG:
326 case VDIR:
327 case VLNK:
328 return (EROFS);
329 default:
330 break;
331 }
332 }
333 /*
334 * For nfs v3, check to see if we have done this recently, and if
335 * so return our cached result instead of making an ACCESS call.
336 * If not, do an access rpc, otherwise you are stuck emulating
337 * ufs_access() locally using the vattr. This may not be correct,
338 * since the server may apply other access criteria such as
339 * client uid-->server uid mapping that we do not know about.
340 */
341 if (v3) {
342 if (ap->a_mode & VREAD)
343 mode = NFSV3ACCESS_READ;
344 else
345 mode = 0;
346 if (vp->v_type != VDIR) {
347 if (ap->a_mode & VWRITE)
348 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
349 if (ap->a_mode & VEXEC)
350 mode |= NFSV3ACCESS_EXECUTE;
351 } else {
352 if (ap->a_mode & VWRITE)
353 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
354 NFSV3ACCESS_DELETE);
355 if (ap->a_mode & VEXEC)
356 mode |= NFSV3ACCESS_LOOKUP;
357 }
358 /* XXX safety belt, only make blanket request if caching */
359 if (nfsaccess_cache_timeout > 0) {
360 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
361 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
362 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
363 } else {
364 wmode = mode;
365 }
366
367 /*
368 * Does our cached result allow us to give a definite yes to
369 * this request?
370 */
fad57d0e
MD
371 if (np->n_modestamp &&
372 (mycpu->gd_time_seconds < (np->n_modestamp + nfsaccess_cache_timeout)) &&
373 (ap->a_cred->cr_uid == np->n_modeuid) &&
374 ((np->n_mode & mode) == mode)) {
984263bc
MD
375 nfsstats.accesscache_hits++;
376 } else {
377 /*
378 * Either a no, or a don't know. Go to the wire.
379 */
380 nfsstats.accesscache_misses++;
dadab5e9 381 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
984263bc
MD
382 if (!error) {
383 if ((np->n_mode & mode) != mode) {
384 error = EACCES;
385 }
386 }
387 }
984263bc
MD
388 } else {
389 if ((error = nfsspec_access(ap)) != 0)
390 return (error);
391
392 /*
393 * Attempt to prevent a mapped root from accessing a file
394 * which it shouldn't. We try to read a byte from the file
395 * if the user is root and the file is not zero length.
396 * After calling nfsspec_access, we should have the correct
397 * file size cached.
398 */
399 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
400 && VTONFS(vp)->n_size > 0) {
401 struct iovec aiov;
402 struct uio auio;
403 char buf[1];
404
405 aiov.iov_base = buf;
406 aiov.iov_len = 1;
407 auio.uio_iov = &aiov;
408 auio.uio_iovcnt = 1;
409 auio.uio_offset = 0;
410 auio.uio_resid = 1;
411 auio.uio_segflg = UIO_SYSSPACE;
412 auio.uio_rw = UIO_READ;
dadab5e9 413 auio.uio_td = ap->a_td;
984263bc 414
c1cf1e59 415 if (vp->v_type == VREG) {
3b568787 416 error = nfs_readrpc(vp, &auio);
c1cf1e59 417 } else if (vp->v_type == VDIR) {
984263bc
MD
418 char* bp;
419 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
420 aiov.iov_base = bp;
421 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
3b568787 422 error = nfs_readdirrpc(vp, &auio);
984263bc 423 free(bp, M_TEMP);
c1cf1e59 424 } else if (vp->v_type == VLNK) {
3b568787 425 error = nfs_readlinkrpc(vp, &auio);
c1cf1e59 426 } else {
984263bc 427 error = EACCES;
c1cf1e59 428 }
984263bc 429 }
984263bc 430 }
c1cf1e59
MD
431 /*
432 * [re]record creds for reading and/or writing if access
09b1ee9b
MD
433 * was granted. Assume the NFS server will grant read access
434 * for execute requests.
c1cf1e59
MD
435 */
436 if (error == 0) {
09b1ee9b 437 if ((ap->a_mode & (VREAD|VEXEC)) && ap->a_cred != np->n_rucred) {
c1cf1e59
MD
438 crhold(ap->a_cred);
439 if (np->n_rucred)
440 crfree(np->n_rucred);
441 np->n_rucred = ap->a_cred;
442 }
443 if ((ap->a_mode & VWRITE) && ap->a_cred != np->n_wucred) {
444 crhold(ap->a_cred);
445 if (np->n_wucred)
446 crfree(np->n_wucred);
447 np->n_wucred = ap->a_cred;
448 }
449 }
450 return(error);
984263bc
MD
451}
452
453/*
454 * nfs open vnode op
455 * Check to see if the type is ok
456 * and that deletion is not in progress.
457 * For paged in text files, you will need to flush the page cache
458 * if consistency is lost.
e851b29e
CP
459 *
460 * nfs_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
461 * struct thread *a_td)
984263bc
MD
462 */
463/* ARGSUSED */
464static int
e851b29e 465nfs_open(struct vop_open_args *ap)
984263bc 466{
40393ded 467 struct vnode *vp = ap->a_vp;
984263bc 468 struct nfsnode *np = VTONFS(vp);
984263bc
MD
469 struct vattr vattr;
470 int error;
471
472 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
473#ifdef DIAGNOSTIC
474 printf("open eacces vtyp=%d\n",vp->v_type);
475#endif
ca3a2b2f 476 return (EOPNOTSUPP);
984263bc 477 }
5a9187cb 478
984263bc 479 /*
5a9187cb
MD
480 * Clear the attribute cache only if opening with write access. It
481 * is unclear if we should do this at all here, but we certainly
482 * should not clear the cache unconditionally simply because a file
483 * is being opened.
984263bc 484 */
5a9187cb
MD
485 if (ap->a_mode & FWRITE)
486 np->n_attrstamp = 0;
487
e07fef60
MD
488 /*
489 * For normal NFS, reconcile changes made locally verses
490 * changes made remotely. Note that VOP_GETATTR only goes
491 * to the wire if the cached attribute has timed out or been
492 * cleared.
493 *
494 * If local modifications have been made clear the attribute
495 * cache to force an attribute and modified time check. If
496 * GETATTR detects that the file has been changed by someone
497 * other then us it will set NRMODIFIED.
498 *
499 * If we are opening a directory and local changes have been
500 * made we have to invalidate the cache in order to ensure
501 * that we get the most up-to-date information from the
502 * server. XXX
503 */
504 if (np->n_flag & NLMODIFIED) {
505 np->n_attrstamp = 0;
506 if (vp->v_type == VDIR) {
5a9187cb
MD
507 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
508 if (error == EINTR)
509 return (error);
e07fef60 510 nfs_invaldir(vp);
5a9187cb 511 }
984263bc 512 }
e07fef60
MD
513 error = VOP_GETATTR(vp, &vattr, ap->a_td);
514 if (error)
515 return (error);
516 if (np->n_flag & NRMODIFIED) {
517 if (vp->v_type == VDIR)
518 nfs_invaldir(vp);
519 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
520 if (error == EINTR)
521 return (error);
522 np->n_flag &= ~NRMODIFIED;
523 }
fad57d0e 524
8ddc6004 525 return (vop_stdopen(ap));
984263bc
MD
526}
527
528/*
529 * nfs close vnode op
530 * What an NFS client should do upon close after writing is a debatable issue.
531 * Most NFS clients push delayed writes to the server upon close, basically for
532 * two reasons:
533 * 1 - So that any write errors may be reported back to the client process
534 * doing the close system call. By far the two most likely errors are
535 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
536 * 2 - To put a worst case upper bound on cache inconsistency between
537 * multiple clients for the file.
538 * There is also a consistency problem for Version 2 of the protocol w.r.t.
539 * not being able to tell if other clients are writing a file concurrently,
540 * since there is no way of knowing if the changed modify time in the reply
541 * is only due to the write for this client.
542 * (NFS Version 3 provides weak cache consistency data in the reply that
543 * should be sufficient to detect and handle this case.)
544 *
545 * The current code does the following:
546 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
547 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
548 * or commit them (this satisfies 1 and 2 except for the
549 * case where the server crashes after this close but
550 * before the commit RPC, which is felt to be "good
551 * enough". Changing the last argument to nfs_flush() to
552 * a 1 would force a commit operation, if it is felt a
553 * commit is necessary now.
554 * for NQNFS - do nothing now, since 2 is dealt with via leases and
555 * 1 should be dealt with via an fsync() system call for
556 * cases where write errors are important.
e851b29e
CP
557 *
558 * nfs_close(struct vnodeop_desc *a_desc, struct vnode *a_vp, int a_fflag,
559 * struct ucred *a_cred, struct thread *a_td)
984263bc
MD
560 */
561/* ARGSUSED */
562static int
e851b29e 563nfs_close(struct vop_close_args *ap)
984263bc 564{
40393ded
RG
565 struct vnode *vp = ap->a_vp;
566 struct nfsnode *np = VTONFS(vp);
984263bc
MD
567 int error = 0;
568
569 if (vp->v_type == VREG) {
e07fef60 570 if (np->n_flag & NLMODIFIED) {
984263bc
MD
571 if (NFS_ISV3(vp)) {
572 /*
573 * Under NFSv3 we have dirty buffers to dispose of. We
574 * must flush them to the NFS server. We have the option
575 * of waiting all the way through the commit rpc or just
576 * waiting for the initial write. The default is to only
577 * wait through the initial write so the data is in the
578 * server's cache, which is roughly similar to the state
579 * a standard disk subsystem leaves the file in on close().
580 *
5a9187cb 581 * We cannot clear the NLMODIFIED bit in np->n_flag due to
984263bc
MD
582 * potential races with other processes, and certainly
583 * cannot clear it if we don't commit.
584 */
585 int cm = nfsv3_commit_on_close ? 1 : 0;
3b568787 586 error = nfs_flush(vp, MNT_WAIT, ap->a_td, cm);
5a9187cb 587 /* np->n_flag &= ~NLMODIFIED; */
984263bc 588 } else {
3b568787 589 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
984263bc
MD
590 }
591 np->n_attrstamp = 0;
592 }
593 if (np->n_flag & NWRITEERR) {
594 np->n_flag &= ~NWRITEERR;
595 error = np->n_error;
596 }
597 }
8ddc6004 598 vop_stdclose(ap);
984263bc
MD
599 return (error);
600}
601
602/*
603 * nfs getattr call from vfs.
e851b29e
CP
604 *
605 * nfs_getattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred,
606 * struct thread *a_td)
984263bc
MD
607 */
608static int
e851b29e 609nfs_getattr(struct vop_getattr_args *ap)
984263bc 610{
40393ded
RG
611 struct vnode *vp = ap->a_vp;
612 struct nfsnode *np = VTONFS(vp);
613 caddr_t cp;
614 u_int32_t *tl;
615 int32_t t1, t2;
984263bc
MD
616 caddr_t bpos, dpos;
617 int error = 0;
618 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
619 int v3 = NFS_ISV3(vp);
620
621 /*
622 * Update local times for special files.
623 */
624 if (np->n_flag & (NACC | NUPD))
625 np->n_flag |= NCHG;
626 /*
627 * First look in the cache.
628 */
629 if (nfs_getattrcache(vp, ap->a_vap) == 0)
630 return (0);
631
632 if (v3 && nfsaccess_cache_timeout > 0) {
633 nfsstats.accesscache_misses++;
c1cf1e59 634 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
635 if (nfs_getattrcache(vp, ap->a_vap) == 0)
636 return (0);
637 }
638
639 nfsstats.rpccnt[NFSPROC_GETATTR]++;
640 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
641 nfsm_fhtom(vp, v3);
c1cf1e59 642 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, nfs_vpcred(vp, ND_CHECK));
984263bc
MD
643 if (!error) {
644 nfsm_loadattr(vp, ap->a_vap);
645 }
6b08710e
MD
646 m_freem(mrep);
647nfsmout:
984263bc
MD
648 return (error);
649}
650
651/*
652 * nfs setattr call.
e851b29e
CP
653 *
654 * nfs_setattr(struct vnodeop_desc *a_desc, struct vnode *a_vp,
655 * struct vattr *a_vap, struct ucred *a_cred,
656 * struct thread *a_td)
984263bc
MD
657 */
658static int
e851b29e 659nfs_setattr(struct vop_setattr_args *ap)
984263bc 660{
40393ded
RG
661 struct vnode *vp = ap->a_vp;
662 struct nfsnode *np = VTONFS(vp);
663 struct vattr *vap = ap->a_vap;
984263bc
MD
664 int error = 0;
665 u_quad_t tsize;
666
667#ifndef nolint
668 tsize = (u_quad_t)0;
669#endif
670
671 /*
672 * Setting of flags is not supported.
673 */
674 if (vap->va_flags != VNOVAL)
675 return (EOPNOTSUPP);
676
677 /*
678 * Disallow write attempts if the filesystem is mounted read-only.
679 */
680 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
681 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
682 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
683 (vp->v_mount->mnt_flag & MNT_RDONLY))
684 return (EROFS);
685 if (vap->va_size != VNOVAL) {
686 switch (vp->v_type) {
687 case VDIR:
688 return (EISDIR);
689 case VCHR:
690 case VBLK:
691 case VSOCK:
692 case VFIFO:
693 if (vap->va_mtime.tv_sec == VNOVAL &&
694 vap->va_atime.tv_sec == VNOVAL &&
695 vap->va_mode == (mode_t)VNOVAL &&
696 vap->va_uid == (uid_t)VNOVAL &&
697 vap->va_gid == (gid_t)VNOVAL)
698 return (0);
699 vap->va_size = VNOVAL;
700 break;
701 default:
702 /*
703 * Disallow write attempts if the filesystem is
704 * mounted read-only.
705 */
706 if (vp->v_mount->mnt_flag & MNT_RDONLY)
707 return (EROFS);
708
709 /*
a004bca6
MD
710 * This is nasty. The RPCs we send to flush pending
711 * data often return attribute information which is
712 * cached via a callback to nfs_loadattrcache(), which
713 * has the effect of changing our notion of the file
714 * size. Due to flushed appends and other operations
715 * the file size can be set to virtually anything,
716 * including values that do not match either the old
717 * or intended file size.
718 *
719 * When this condition is detected we must loop to
720 * try the operation again. Hopefully no more
721 * flushing is required on the loop so it works the
722 * second time around. THIS CASE ALMOST ALWAYS
723 * HAPPENS!
984263bc 724 */
984263bc 725 tsize = np->n_size;
a004bca6 726again:
3b568787 727 error = nfs_meta_setsize(vp, ap->a_td, vap->va_size);
984263bc 728
5a9187cb 729 if (np->n_flag & NLMODIFIED) {
984263bc 730 if (vap->va_size == 0)
3b568787 731 error = nfs_vinvalbuf(vp, 0, ap->a_td, 1);
984263bc 732 else
3b568787 733 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
984263bc 734 }
a004bca6
MD
735 /*
736 * note: this loop case almost always happens at
737 * least once per truncation.
b07fc55c 738 */
a004bca6
MD
739 if (error == 0 && np->n_size != vap->va_size)
740 goto again;
741 np->n_vattr.va_size = vap->va_size;
5a9187cb
MD
742 break;
743 }
984263bc 744 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
5a9187cb 745 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NLMODIFIED) &&
984263bc 746 vp->v_type == VREG &&
a004bca6
MD
747 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1)) == EINTR
748 ) {
984263bc 749 return (error);
a004bca6 750 }
dadab5e9 751 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
a004bca6
MD
752
753 /*
754 * Sanity check if a truncation was issued. This should only occur
755 * if multiple processes are racing on the same file.
756 */
757 if (error == 0 && vap->va_size != VNOVAL &&
758 np->n_size != vap->va_size) {
759 printf("NFS ftruncate: server disagrees on the file size: %lld/%lld/%lld\n", tsize, vap->va_size, np->n_size);
760 goto again;
761 }
984263bc
MD
762 if (error && vap->va_size != VNOVAL) {
763 np->n_size = np->n_vattr.va_size = tsize;
764 vnode_pager_setsize(vp, np->n_size);
765 }
766 return (error);
767}
768
769/*
770 * Do an nfs setattr rpc.
771 */
772static int
dadab5e9 773nfs_setattrrpc(struct vnode *vp, struct vattr *vap,
e851b29e 774 struct ucred *cred, struct thread *td)
984263bc 775{
40393ded 776 struct nfsv2_sattr *sp;
999914df 777 struct nfsnode *np = VTONFS(vp);
40393ded
RG
778 caddr_t cp;
779 int32_t t1, t2;
984263bc
MD
780 caddr_t bpos, dpos, cp2;
781 u_int32_t *tl;
782 int error = 0, wccflag = NFSV3_WCCRATTR;
783 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
784 int v3 = NFS_ISV3(vp);
785
786 nfsstats.rpccnt[NFSPROC_SETATTR]++;
787 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
788 nfsm_fhtom(vp, v3);
789 if (v3) {
790 nfsm_v3attrbuild(vap, TRUE);
791 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
792 *tl = nfs_false;
793 } else {
794 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
795 if (vap->va_mode == (mode_t)VNOVAL)
796 sp->sa_mode = nfs_xdrneg1;
797 else
798 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
799 if (vap->va_uid == (uid_t)VNOVAL)
800 sp->sa_uid = nfs_xdrneg1;
801 else
802 sp->sa_uid = txdr_unsigned(vap->va_uid);
803 if (vap->va_gid == (gid_t)VNOVAL)
804 sp->sa_gid = nfs_xdrneg1;
805 else
806 sp->sa_gid = txdr_unsigned(vap->va_gid);
807 sp->sa_size = txdr_unsigned(vap->va_size);
808 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
809 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
810 }
dadab5e9 811 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
984263bc 812 if (v3) {
999914df 813 np->n_modestamp = 0;
984263bc
MD
814 nfsm_wcc_data(vp, wccflag);
815 } else
816 nfsm_loadattr(vp, (struct vattr *)0);
6b08710e
MD
817 m_freem(mrep);
818nfsmout:
984263bc
MD
819 return (error);
820}
821
fad57d0e
MD
822/*
823 * NEW API CALL - replaces nfs_lookup(). However, we cannot remove
824 * nfs_lookup() until all remaining new api calls are implemented.
825 *
826 * Resolve a namecache entry. This function is passed a locked ncp and
827 * must call cache_setvp() on it as appropriate to resolve the entry.
828 */
829static int
830nfs_nresolve(struct vop_nresolve_args *ap)
831{
832 struct thread *td = curthread;
833 struct namecache *ncp;
834 struct ucred *cred;
835 struct nfsnode *np;
836 struct vnode *dvp;
837 struct vnode *nvp;
838 nfsfh_t *fhp;
839 int attrflag;
840 int fhsize;
841 int error;
842 int len;
843 int v3;
844 /******NFSM MACROS********/
845 struct mbuf *mb, *mrep, *mreq, *mb2, *md;
846 caddr_t bpos, dpos, cp, cp2;
847 u_int32_t *tl;
848 int32_t t1, t2;
849
850 cred = ap->a_cred;
851 ncp = ap->a_ncp;
852
853 KKASSERT(ncp->nc_parent && ncp->nc_parent->nc_vp);
854 dvp = ncp->nc_parent->nc_vp;
855 if ((error = vget(dvp, LK_SHARED, td)) != 0)
856 return (error);
857
858 nvp = NULL;
859 v3 = NFS_ISV3(dvp);
860 nfsstats.lookupcache_misses++;
861 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
862 len = ncp->nc_nlen;
863 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
864 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
865 nfsm_fhtom(dvp, v3);
866 nfsm_strtom(ncp->nc_name, len, NFS_MAXNAMLEN);
867 nfsm_request(dvp, NFSPROC_LOOKUP, td, ap->a_cred);
868 if (error) {
869 /*
870 * Cache negatve lookups to reduce NFS traffic, but use
871 * a fast timeout. Otherwise use a timeout of 1 tick.
872 * XXX we should add a namecache flag for no-caching
873 * to uncache the negative hit as soon as possible, but
874 * we cannot simply destroy the entry because it is used
875 * as a placeholder by the caller.
876 */
877 if (error == ENOENT) {
878 int nticks;
879
880 if (nfsneg_cache_timeout)
881 nticks = nfsneg_cache_timeout * hz;
882 else
883 nticks = 1;
884 cache_setvp(ncp, NULL);
885 cache_settimeout(ncp, nticks);
886 }
5a9187cb 887 nfsm_postop_attr(dvp, attrflag, NFS_LATTR_NOSHRINK);
fad57d0e
MD
888 m_freem(mrep);
889 goto nfsmout;
890 }
891
892 /*
893 * Success, get the file handle, do various checks, and load
894 * post-operation data from the reply packet. Theoretically
895 * we should never be looking up "." so, theoretically, we
896 * should never get the same file handle as our directory. But
897 * we check anyway. XXX
898 *
899 * Note that no timeout is set for the positive cache hit. We
900 * assume, theoretically, that ESTALE returns will be dealt with
901 * properly to handle NFS races and in anycase we cannot depend
902 * on a timeout to deal with NFS open/create/excl issues so instead
903 * of a bad hack here the rest of the NFS client code needs to do
904 * the right thing.
905 */
906 nfsm_getfh(fhp, fhsize, v3);
907
908 np = VTONFS(dvp);
909 if (NFS_CMPFH(np, fhp, fhsize)) {
910 vref(dvp);
911 nvp = dvp;
912 } else {
913 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
914 if (error) {
915 m_freem(mrep);
916 vput(dvp);
917 return (error);
918 }
919 nvp = NFSTOV(np);
920 }
921 if (v3) {
5a9187cb
MD
922 nfsm_postop_attr(nvp, attrflag, NFS_LATTR_NOSHRINK);
923 nfsm_postop_attr(dvp, attrflag, NFS_LATTR_NOSHRINK);
fad57d0e
MD
924 } else {
925 nfsm_loadattr(nvp, NULL);
926 }
927 cache_setvp(ncp, nvp);
928 m_freem(mrep);
929nfsmout:
930 vput(dvp);
931 if (nvp) {
932 if (nvp == dvp)
933 vrele(nvp);
934 else
935 vput(nvp);
936 }
937 return (error);
938}
939
984263bc 940/*
4d17b298 941 * 'cached' nfs directory lookup
e851b29e 942 *
fad57d0e
MD
943 * NOTE: cannot be removed until NFS implements all the new n*() API calls.
944 *
e851b29e
CP
945 * nfs_lookup(struct vnodeop_desc *a_desc, struct vnode *a_dvp,
946 * struct vnode **a_vpp, struct componentname *a_cnp)
984263bc
MD
947 */
948static int
e62afb5f 949nfs_lookup(struct vop_old_lookup_args *ap)
984263bc
MD
950{
951 struct componentname *cnp = ap->a_cnp;
952 struct vnode *dvp = ap->a_dvp;
953 struct vnode **vpp = ap->a_vpp;
954 int flags = cnp->cn_flags;
955 struct vnode *newvp;
956 u_int32_t *tl;
957 caddr_t cp;
958 int32_t t1, t2;
959 struct nfsmount *nmp;
960 caddr_t bpos, dpos, cp2;
961 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
962 long len;
963 nfsfh_t *fhp;
964 struct nfsnode *np;
965 int lockparent, wantparent, error = 0, attrflag, fhsize;
966 int v3 = NFS_ISV3(dvp);
dadab5e9 967 struct thread *td = cnp->cn_td;
984263bc 968
4d17b298
MD
969 /*
970 * Read-only mount check and directory check.
971 */
984263bc 972 *vpp = NULLVP;
fad57d0e 973 if ((dvp->v_mount->mnt_flag & MNT_RDONLY) &&
2b69e610 974 (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME))
984263bc 975 return (EROFS);
4d17b298 976
984263bc
MD
977 if (dvp->v_type != VDIR)
978 return (ENOTDIR);
4d17b298
MD
979
980 /*
981 * Look it up in the cache. Note that ENOENT is only returned if we
982 * previously entered a negative hit (see later on). The additional
983 * nfsneg_cache_timeout check causes previously cached results to
984 * be instantly ignored if the negative caching is turned off.
985 */
2b69e610
MD
986 lockparent = flags & CNP_LOCKPARENT;
987 wantparent = flags & (CNP_LOCKPARENT|CNP_WANTPARENT);
984263bc
MD
988 nmp = VFSTONFS(dvp->v_mount);
989 np = VTONFS(dvp);
984263bc 990
4d17b298 991 /*
fad57d0e 992 * Go to the wire.
4d17b298 993 */
984263bc
MD
994 error = 0;
995 newvp = NULLVP;
996 nfsstats.lookupcache_misses++;
997 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
998 len = cnp->cn_namelen;
999 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
1000 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
1001 nfsm_fhtom(dvp, v3);
1002 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
dadab5e9 1003 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_td, cnp->cn_cred);
984263bc 1004 if (error) {
5a9187cb 1005 nfsm_postop_attr(dvp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
1006 m_freem(mrep);
1007 goto nfsmout;
1008 }
1009 nfsm_getfh(fhp, fhsize, v3);
1010
1011 /*
1012 * Handle RENAME case...
1013 */
fad57d0e 1014 if (cnp->cn_nameiop == NAMEI_RENAME && wantparent) {
984263bc
MD
1015 if (NFS_CMPFH(np, fhp, fhsize)) {
1016 m_freem(mrep);
1017 return (EISDIR);
1018 }
1019 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1020 if (error) {
1021 m_freem(mrep);
1022 return (error);
1023 }
1024 newvp = NFSTOV(np);
1025 if (v3) {
5a9187cb
MD
1026 nfsm_postop_attr(newvp, attrflag, NFS_LATTR_NOSHRINK);
1027 nfsm_postop_attr(dvp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
1028 } else
1029 nfsm_loadattr(newvp, (struct vattr *)0);
1030 *vpp = newvp;
1031 m_freem(mrep);
7ab77df6 1032 if (!lockparent) {
5fd012e0 1033 VOP_UNLOCK(dvp, 0, td);
7ab77df6
MD
1034 cnp->cn_flags |= CNP_PDIRUNLOCK;
1035 }
984263bc
MD
1036 return (0);
1037 }
1038
2b69e610 1039 if (flags & CNP_ISDOTDOT) {
5fd012e0 1040 VOP_UNLOCK(dvp, 0, td);
7ab77df6 1041 cnp->cn_flags |= CNP_PDIRUNLOCK;
984263bc
MD
1042 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1043 if (error) {
5fd012e0 1044 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
7ab77df6
MD
1045 cnp->cn_flags &= ~CNP_PDIRUNLOCK;
1046 return (error); /* NOTE: return error from nget */
984263bc
MD
1047 }
1048 newvp = NFSTOV(np);
fad57d0e 1049 if (lockparent) {
5fd012e0 1050 error = vn_lock(dvp, LK_EXCLUSIVE, td);
7ab77df6
MD
1051 if (error) {
1052 vput(newvp);
1053 return (error);
1054 }
1055 cnp->cn_flags |= CNP_PDIRUNLOCK;
984263bc
MD
1056 }
1057 } else if (NFS_CMPFH(np, fhp, fhsize)) {
597aea93 1058 vref(dvp);
984263bc
MD
1059 newvp = dvp;
1060 } else {
1061 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1062 if (error) {
1063 m_freem(mrep);
1064 return (error);
1065 }
fad57d0e 1066 if (!lockparent) {
5fd012e0 1067 VOP_UNLOCK(dvp, 0, td);
7ab77df6
MD
1068 cnp->cn_flags |= CNP_PDIRUNLOCK;
1069 }
984263bc
MD
1070 newvp = NFSTOV(np);
1071 }
1072 if (v3) {
5a9187cb
MD
1073 nfsm_postop_attr(newvp, attrflag, NFS_LATTR_NOSHRINK);
1074 nfsm_postop_attr(dvp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
1075 } else
1076 nfsm_loadattr(newvp, (struct vattr *)0);
fad57d0e
MD
1077#if 0
1078 /* XXX MOVE TO nfs_nremove() */
2b69e610 1079 if ((cnp->cn_flags & CNP_MAKEENTRY) &&
fad57d0e
MD
1080 cnp->cn_nameiop != NAMEI_DELETE) {
1081 np->n_ctime = np->n_vattr.va_ctime.tv_sec; /* XXX */
984263bc 1082 }
fad57d0e 1083#endif
984263bc 1084 *vpp = newvp;
6b08710e
MD
1085 m_freem(mrep);
1086nfsmout:
984263bc
MD
1087 if (error) {
1088 if (newvp != NULLVP) {
1089 vrele(newvp);
1090 *vpp = NULLVP;
1091 }
fad57d0e
MD
1092 if ((cnp->cn_nameiop == NAMEI_CREATE ||
1093 cnp->cn_nameiop == NAMEI_RENAME) &&
1094 error == ENOENT) {
7ab77df6 1095 if (!lockparent) {
5fd012e0 1096 VOP_UNLOCK(dvp, 0, td);
7ab77df6
MD
1097 cnp->cn_flags |= CNP_PDIRUNLOCK;
1098 }
984263bc
MD
1099 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
1100 error = EROFS;
1101 else
1102 error = EJUSTRETURN;
1103 }
984263bc
MD
1104 }
1105 return (error);
1106}
1107
1108/*
1109 * nfs read call.
1110 * Just call nfs_bioread() to do the work.
e851b29e
CP
1111 *
1112 * nfs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1113 * struct ucred *a_cred)
984263bc
MD
1114 */
1115static int
e851b29e 1116nfs_read(struct vop_read_args *ap)
984263bc 1117{
40393ded 1118 struct vnode *vp = ap->a_vp;
984263bc 1119
3b568787 1120 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag));
ca3a2b2f
HP
1121 switch (vp->v_type) {
1122 case VREG:
1123 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag));
1124 case VDIR:
1125 return (EISDIR);
1126 default:
1127 return EOPNOTSUPP;
1128 }
984263bc
MD
1129}
1130
1131/*
1132 * nfs readlink call
e851b29e
CP
1133 *
1134 * nfs_readlink(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
984263bc
MD
1135 */
1136static int
e851b29e 1137nfs_readlink(struct vop_readlink_args *ap)
984263bc 1138{
40393ded 1139 struct vnode *vp = ap->a_vp;
984263bc
MD
1140
1141 if (vp->v_type != VLNK)
1142 return (EINVAL);
3b568787 1143 return (nfs_bioread(vp, ap->a_uio, 0));
984263bc
MD
1144}
1145
1146/*
1147 * Do a readlink rpc.
1148 * Called by nfs_doio() from below the buffer cache.
1149 */
1150int
3b568787 1151nfs_readlinkrpc(struct vnode *vp, struct uio *uiop)
984263bc 1152{
40393ded
RG
1153 u_int32_t *tl;
1154 caddr_t cp;
1155 int32_t t1, t2;
984263bc
MD
1156 caddr_t bpos, dpos, cp2;
1157 int error = 0, len, attrflag;
1158 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1159 int v3 = NFS_ISV3(vp);
1160
1161 nfsstats.rpccnt[NFSPROC_READLINK]++;
1162 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1163 nfsm_fhtom(vp, v3);
c1cf1e59 1164 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, nfs_vpcred(vp, ND_CHECK));
984263bc 1165 if (v3)
5a9187cb 1166 nfsm_postop_attr(vp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
1167 if (!error) {
1168 nfsm_strsiz(len, NFS_MAXPATHLEN);
1169 if (len == NFS_MAXPATHLEN) {
1170 struct nfsnode *np = VTONFS(vp);
1171 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1172 len = np->n_size;
1173 }
1174 nfsm_mtouio(uiop, len);
1175 }
6b08710e
MD
1176 m_freem(mrep);
1177nfsmout:
984263bc
MD
1178 return (error);
1179}
1180
1181/*
1182 * nfs read rpc call
1183 * Ditto above
1184 */
1185int
3b568787 1186nfs_readrpc(struct vnode *vp, struct uio *uiop)
984263bc 1187{
40393ded
RG
1188 u_int32_t *tl;
1189 caddr_t cp;
1190 int32_t t1, t2;
984263bc
MD
1191 caddr_t bpos, dpos, cp2;
1192 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1193 struct nfsmount *nmp;
1194 int error = 0, len, retlen, tsiz, eof, attrflag;
1195 int v3 = NFS_ISV3(vp);
1196
1197#ifndef nolint
1198 eof = 0;
1199#endif
1200 nmp = VFSTONFS(vp->v_mount);
1201 tsiz = uiop->uio_resid;
1202 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1203 return (EFBIG);
1204 while (tsiz > 0) {
1205 nfsstats.rpccnt[NFSPROC_READ]++;
1206 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1207 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1208 nfsm_fhtom(vp, v3);
1209 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1210 if (v3) {
1211 txdr_hyper(uiop->uio_offset, tl);
1212 *(tl + 2) = txdr_unsigned(len);
1213 } else {
1214 *tl++ = txdr_unsigned(uiop->uio_offset);
1215 *tl++ = txdr_unsigned(len);
1216 *tl = 0;
1217 }
c1cf1e59 1218 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc 1219 if (v3) {
5a9187cb 1220 nfsm_postop_attr(vp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
1221 if (error) {
1222 m_freem(mrep);
1223 goto nfsmout;
1224 }
1225 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1226 eof = fxdr_unsigned(int, *(tl + 1));
1227 } else
1228 nfsm_loadattr(vp, (struct vattr *)0);
1229 nfsm_strsiz(retlen, nmp->nm_rsize);
1230 nfsm_mtouio(uiop, retlen);
1231 m_freem(mrep);
1232 tsiz -= retlen;
1233 if (v3) {
1234 if (eof || retlen == 0) {
1235 tsiz = 0;
1236 }
1237 } else if (retlen < len) {
1238 tsiz = 0;
1239 }
1240 }
1241nfsmout:
1242 return (error);
1243}
1244
1245/*
1246 * nfs write call
1247 */
1248int
e851b29e 1249nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit)
984263bc 1250{
40393ded
RG
1251 u_int32_t *tl;
1252 caddr_t cp;
1253 int32_t t1, t2, backup;
984263bc
MD
1254 caddr_t bpos, dpos, cp2;
1255 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1256 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1257 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1258 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1259
1260#ifndef DIAGNOSTIC
1261 if (uiop->uio_iovcnt != 1)
1262 panic("nfs: writerpc iovcnt > 1");
1263#endif
1264 *must_commit = 0;
1265 tsiz = uiop->uio_resid;
1266 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1267 return (EFBIG);
1268 while (tsiz > 0) {
1269 nfsstats.rpccnt[NFSPROC_WRITE]++;
1270 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1271 nfsm_reqhead(vp, NFSPROC_WRITE,
1272 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1273 nfsm_fhtom(vp, v3);
1274 if (v3) {
1275 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1276 txdr_hyper(uiop->uio_offset, tl);
1277 tl += 2;
1278 *tl++ = txdr_unsigned(len);
1279 *tl++ = txdr_unsigned(*iomode);
1280 *tl = txdr_unsigned(len);
1281 } else {
40393ded 1282 u_int32_t x;
984263bc
MD
1283
1284 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1285 /* Set both "begin" and "current" to non-garbage. */
1286 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1287 *tl++ = x; /* "begin offset" */
1288 *tl++ = x; /* "current offset" */
1289 x = txdr_unsigned(len);
1290 *tl++ = x; /* total to this offset */
1291 *tl = x; /* size of this write */
1292 }
1293 nfsm_uiotom(uiop, len);
c1cf1e59 1294 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, nfs_vpcred(vp, ND_WRITE));
984263bc 1295 if (v3) {
5a9187cb
MD
1296 /*
1297 * The write RPC returns a before and after mtime. The
1298 * nfsm_wcc_data() macro checks the before n_mtime
1299 * against the before time and stores the after time
1300 * in the nfsnode's cached vattr and n_mtime field.
1301 * The NRMODIFIED bit will be set if the before
1302 * time did not match the original mtime.
1303 */
984263bc
MD
1304 wccflag = NFSV3_WCCCHK;
1305 nfsm_wcc_data(vp, wccflag);
1306 if (!error) {
1307 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1308 + NFSX_V3WRITEVERF);
1309 rlen = fxdr_unsigned(int, *tl++);
1310 if (rlen == 0) {
1311 error = NFSERR_IO;
1312 m_freem(mrep);
1313 break;
1314 } else if (rlen < len) {
1315 backup = len - rlen;
1316 uiop->uio_iov->iov_base -= backup;
1317 uiop->uio_iov->iov_len += backup;
1318 uiop->uio_offset -= backup;
1319 uiop->uio_resid += backup;
1320 len = rlen;
1321 }
1322 commit = fxdr_unsigned(int, *tl++);
1323
1324 /*
1325 * Return the lowest committment level
1326 * obtained by any of the RPCs.
1327 */
1328 if (committed == NFSV3WRITE_FILESYNC)
1329 committed = commit;
1330 else if (committed == NFSV3WRITE_DATASYNC &&
1331 commit == NFSV3WRITE_UNSTABLE)
1332 committed = commit;
1333 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1334 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1335 NFSX_V3WRITEVERF);
1336 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1337 } else if (bcmp((caddr_t)tl,
1338 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1339 *must_commit = 1;
1340 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1341 NFSX_V3WRITEVERF);
1342 }
1343 }
5a9187cb
MD
1344 } else {
1345 nfsm_loadattr(vp, (struct vattr *)0);
1346 }
984263bc
MD
1347 m_freem(mrep);
1348 if (error)
1349 break;
1350 tsiz -= len;
1351 }
1352nfsmout:
1353 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1354 committed = NFSV3WRITE_FILESYNC;
1355 *iomode = committed;
1356 if (error)
1357 uiop->uio_resid = tsiz;
1358 return (error);
1359}
1360
1361/*
1362 * nfs mknod rpc
1363 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1364 * mode set to specify the file type and the size field for rdev.
1365 */
1366static int
e851b29e
CP
1367nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1368 struct vattr *vap)
984263bc 1369{
40393ded
RG
1370 struct nfsv2_sattr *sp;
1371 u_int32_t *tl;
1372 caddr_t cp;
1373 int32_t t1, t2;
984263bc
MD
1374 struct vnode *newvp = (struct vnode *)0;
1375 struct nfsnode *np = (struct nfsnode *)0;
1376 struct vattr vattr;
1377 char *cp2;
1378 caddr_t bpos, dpos;
1379 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1380 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1381 u_int32_t rdev;
1382 int v3 = NFS_ISV3(dvp);
1383
1384 if (vap->va_type == VCHR || vap->va_type == VBLK)
1385 rdev = txdr_unsigned(vap->va_rdev);
1386 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1387 rdev = nfs_xdrneg1;
1388 else {
1389 return (EOPNOTSUPP);
1390 }
3b568787 1391 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1392 return (error);
1393 }
1394 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1395 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1396 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1397 nfsm_fhtom(dvp, v3);
1398 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1399 if (v3) {
1400 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1401 *tl++ = vtonfsv3_type(vap->va_type);
1402 nfsm_v3attrbuild(vap, FALSE);
1403 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1404 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1405 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1406 *tl = txdr_unsigned(uminor(vap->va_rdev));
1407 }
1408 } else {
1409 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1410 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1411 sp->sa_uid = nfs_xdrneg1;
1412 sp->sa_gid = nfs_xdrneg1;
1413 sp->sa_size = rdev;
1414 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1415 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1416 }
dadab5e9 1417 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1418 if (!error) {
1419 nfsm_mtofh(dvp, newvp, v3, gotvp);
1420 if (!gotvp) {
1421 if (newvp) {
1422 vput(newvp);
1423 newvp = (struct vnode *)0;
1424 }
1425 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1426 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1427 if (!error)
1428 newvp = NFSTOV(np);
1429 }
1430 }
1431 if (v3)
1432 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
1433 m_freem(mrep);
1434nfsmout:
984263bc
MD
1435 if (error) {
1436 if (newvp)
1437 vput(newvp);
1438 } else {
984263bc
MD
1439 *vpp = newvp;
1440 }
5a9187cb 1441 VTONFS(dvp)->n_flag |= NLMODIFIED;
984263bc
MD
1442 if (!wccflag)
1443 VTONFS(dvp)->n_attrstamp = 0;
1444 return (error);
1445}
1446
1447/*
1448 * nfs mknod vop
1449 * just call nfs_mknodrpc() to do the work.
e851b29e
CP
1450 *
1451 * nfs_mknod(struct vnode *a_dvp, struct vnode **a_vpp,
1452 * struct componentname *a_cnp, struct vattr *a_vap)
984263bc
MD
1453 */
1454/* ARGSUSED */
1455static int
e62afb5f 1456nfs_mknod(struct vop_old_mknod_args *ap)
984263bc
MD
1457{
1458 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1459}
1460
1461static u_long create_verf;
1462/*
1463 * nfs file create call
e851b29e
CP
1464 *
1465 * nfs_create(struct vnode *a_dvp, struct vnode **a_vpp,
1466 * struct componentname *a_cnp, struct vattr *a_vap)
984263bc
MD
1467 */
1468static int
e62afb5f 1469nfs_create(struct vop_old_create_args *ap)
984263bc 1470{
40393ded
RG
1471 struct vnode *dvp = ap->a_dvp;
1472 struct vattr *vap = ap->a_vap;
1473 struct componentname *cnp = ap->a_cnp;
1474 struct nfsv2_sattr *sp;
1475 u_int32_t *tl;
1476 caddr_t cp;
1477 int32_t t1, t2;
984263bc
MD
1478 struct nfsnode *np = (struct nfsnode *)0;
1479 struct vnode *newvp = (struct vnode *)0;
1480 caddr_t bpos, dpos, cp2;
1481 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1482 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1483 struct vattr vattr;
1484 int v3 = NFS_ISV3(dvp);
1485
1486 /*
1487 * Oops, not for me..
1488 */
1489 if (vap->va_type == VSOCK)
1490 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1491
3b568787 1492 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1493 return (error);
1494 }
1495 if (vap->va_vaflags & VA_EXCLUSIVE)
1496 fmode |= O_EXCL;
1497again:
1498 nfsstats.rpccnt[NFSPROC_CREATE]++;
1499 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1500 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1501 nfsm_fhtom(dvp, v3);
1502 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1503 if (v3) {
1504 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1505 if (fmode & O_EXCL) {
1506 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1507 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1508#ifdef INET
1509 if (!TAILQ_EMPTY(&in_ifaddrhead))
ecd80f47 1510 *tl++ = IA_SIN(TAILQ_FIRST(&in_ifaddrhead))->sin_addr.s_addr;
984263bc
MD
1511 else
1512#endif
1513 *tl++ = create_verf;
1514 *tl = ++create_verf;
1515 } else {
1516 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1517 nfsm_v3attrbuild(vap, FALSE);
1518 }
1519 } else {
1520 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1521 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1522 sp->sa_uid = nfs_xdrneg1;
1523 sp->sa_gid = nfs_xdrneg1;
1524 sp->sa_size = 0;
1525 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1526 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1527 }
dadab5e9 1528 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1529 if (!error) {
1530 nfsm_mtofh(dvp, newvp, v3, gotvp);
1531 if (!gotvp) {
1532 if (newvp) {
1533 vput(newvp);
1534 newvp = (struct vnode *)0;
1535 }
1536 error = nfs_lookitup(dvp, cnp->cn_nameptr,
dadab5e9 1537 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1538 if (!error)
1539 newvp = NFSTOV(np);
1540 }
1541 }
1542 if (v3)
1543 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
1544 m_freem(mrep);
1545nfsmout:
984263bc
MD
1546 if (error) {
1547 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1548 fmode &= ~O_EXCL;
1549 goto again;
1550 }
1551 if (newvp)
1552 vput(newvp);
1553 } else if (v3 && (fmode & O_EXCL)) {
1554 /*
1555 * We are normally called with only a partially initialized
1556 * VAP. Since the NFSv3 spec says that server may use the
1557 * file attributes to store the verifier, the spec requires
1558 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1559 * in atime, but we can't really assume that all servers will
1560 * so we ensure that our SETATTR sets both atime and mtime.
1561 */
1562 if (vap->va_mtime.tv_sec == VNOVAL)
1563 vfs_timestamp(&vap->va_mtime);
1564 if (vap->va_atime.tv_sec == VNOVAL)
1565 vap->va_atime = vap->va_mtime;
dadab5e9 1566 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1567 }
1568 if (!error) {
c1cf1e59
MD
1569 /*
1570 * The new np may have enough info for access
1571 * checks, make sure rucred and wucred are
1572 * initialized for read and write rpc's.
1573 */
1574 np = VTONFS(newvp);
1575 if (np->n_rucred == NULL)
1576 np->n_rucred = crhold(cnp->cn_cred);
1577 if (np->n_wucred == NULL)
1578 np->n_wucred = crhold(cnp->cn_cred);
984263bc
MD
1579 *ap->a_vpp = newvp;
1580 }
5a9187cb 1581 VTONFS(dvp)->n_flag |= NLMODIFIED;
984263bc
MD
1582 if (!wccflag)
1583 VTONFS(dvp)->n_attrstamp = 0;
1584 return (error);
1585}
1586
1587/*
1588 * nfs file remove call
1589 * To try and make nfs semantics closer to ufs semantics, a file that has
1590 * other processes using the vnode is renamed instead of removed and then
1591 * removed later on the last close.
1592 * - If v_usecount > 1
1593 * If a rename is not already in the works
1594 * call nfs_sillyrename() to set it up
1595 * else
1596 * do the remove rpc
e851b29e
CP
1597 *
1598 * nfs_remove(struct vnodeop_desc *a_desc, struct vnode *a_dvp,
1599 * struct vnode *a_vp, struct componentname *a_cnp)
984263bc
MD
1600 */
1601static int
e62afb5f 1602nfs_remove(struct vop_old_remove_args *ap)
984263bc 1603{
40393ded
RG
1604 struct vnode *vp = ap->a_vp;
1605 struct vnode *dvp = ap->a_dvp;
1606 struct componentname *cnp = ap->a_cnp;
1607 struct nfsnode *np = VTONFS(vp);
984263bc
MD
1608 int error = 0;
1609 struct vattr vattr;
1610
1611#ifndef DIAGNOSTIC
984263bc
MD
1612 if (vp->v_usecount < 1)
1613 panic("nfs_remove: bad v_usecount");
1614#endif
1615 if (vp->v_type == VDIR)
1616 error = EPERM;
1617 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
3b568787 1618 VOP_GETATTR(vp, &vattr, cnp->cn_td) == 0 &&
984263bc 1619 vattr.va_nlink > 1)) {
984263bc
MD
1620 /*
1621 * throw away biocache buffers, mainly to avoid
1622 * unnecessary delayed writes later.
1623 */
3b568787 1624 error = nfs_vinvalbuf(vp, 0, cnp->cn_td, 1);
984263bc
MD
1625 /* Do the rpc */
1626 if (error != EINTR)
1627 error = nfs_removerpc(dvp, cnp->cn_nameptr,
dadab5e9 1628 cnp->cn_namelen, cnp->cn_cred, cnp->cn_td);
984263bc
MD
1629 /*
1630 * Kludge City: If the first reply to the remove rpc is lost..
1631 * the reply to the retransmitted request will be ENOENT
1632 * since the file was in fact removed
1633 * Therefore, we cheat and return success.
1634 */
1635 if (error == ENOENT)
1636 error = 0;
fad57d0e 1637 } else if (!np->n_sillyrename) {
984263bc 1638 error = nfs_sillyrename(dvp, vp, cnp);
fad57d0e 1639 }
984263bc
MD
1640 np->n_attrstamp = 0;
1641 return (error);
1642}
1643
1644/*
1645 * nfs file remove rpc called from nfs_inactive
1646 */
1647int
dadab5e9 1648nfs_removeit(struct sillyrename *sp)
984263bc 1649{
dadab5e9
MD
1650 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen,
1651 sp->s_cred, NULL));
984263bc
MD
1652}
1653
1654/*
1655 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1656 */
1657static int
e851b29e
CP
1658nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
1659 struct ucred *cred, struct thread *td)
984263bc 1660{
40393ded
RG
1661 u_int32_t *tl;
1662 caddr_t cp;
1663 int32_t t1, t2;
984263bc
MD
1664 caddr_t bpos, dpos, cp2;
1665 int error = 0, wccflag = NFSV3_WCCRATTR;
1666 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1667 int v3 = NFS_ISV3(dvp);
1668
1669 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1670 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1671 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1672 nfsm_fhtom(dvp, v3);
1673 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
dadab5e9 1674 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
984263bc
MD
1675 if (v3)
1676 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
1677 m_freem(mrep);
1678nfsmout:
5a9187cb 1679 VTONFS(dvp)->n_flag |= NLMODIFIED;
984263bc
MD
1680 if (!wccflag)
1681 VTONFS(dvp)->n_attrstamp = 0;
1682 return (error);
1683}
1684
1685/*
1686 * nfs file rename call
e851b29e
CP
1687 *
1688 * nfs_rename(struct vnode *a_fdvp, struct vnode *a_fvp,
1689 * struct componentname *a_fcnp, struct vnode *a_tdvp,
1690 * struct vnode *a_tvp, struct componentname *a_tcnp)
984263bc
MD
1691 */
1692static int
e62afb5f 1693nfs_rename(struct vop_old_rename_args *ap)
984263bc 1694{
40393ded
RG
1695 struct vnode *fvp = ap->a_fvp;
1696 struct vnode *tvp = ap->a_tvp;
1697 struct vnode *fdvp = ap->a_fdvp;
1698 struct vnode *tdvp = ap->a_tdvp;
1699 struct componentname *tcnp = ap->a_tcnp;
1700 struct componentname *fcnp = ap->a_fcnp;
984263bc
MD
1701 int error;
1702
984263bc
MD
1703 /* Check for cross-device rename */
1704 if ((fvp->v_mount != tdvp->v_mount) ||
1705 (tvp && (fvp->v_mount != tvp->v_mount))) {
1706 error = EXDEV;
1707 goto out;
1708 }
1709
1710 /*
1711 * We have to flush B_DELWRI data prior to renaming
1712 * the file. If we don't, the delayed-write buffers
1713 * can be flushed out later after the file has gone stale
1714 * under NFSV3. NFSV2 does not have this problem because
1715 * ( as far as I can tell ) it flushes dirty buffers more
1716 * often.
1717 */
1718
3b568787 1719 VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_td);
984263bc 1720 if (tvp)
3b568787 1721 VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_td);
984263bc
MD
1722
1723 /*
1724 * If the tvp exists and is in use, sillyrename it before doing the
1725 * rename of the new file over it.
fad57d0e 1726 *
984263bc 1727 * XXX Can't sillyrename a directory.
5fd012e0 1728 *
fad57d0e
MD
1729 * We do not attempt to do any namecache purges in this old API
1730 * routine. The new API compat functions have access to the actual
1731 * namecache structures and will do it for us.
984263bc
MD
1732 */
1733 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1734 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1735 vput(tvp);
1736 tvp = NULL;
5fd012e0 1737 } else if (tvp) {
fad57d0e 1738 ;
984263bc
MD
1739 }
1740
1741 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1742 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
dadab5e9 1743 tcnp->cn_td);
984263bc 1744
984263bc
MD
1745out:
1746 if (tdvp == tvp)
1747 vrele(tdvp);
1748 else
1749 vput(tdvp);
1750 if (tvp)
1751 vput(tvp);
1752 vrele(fdvp);
1753 vrele(fvp);
1754 /*
1755 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1756 */
1757 if (error == ENOENT)
1758 error = 0;
1759 return (error);
1760}
1761
1762/*
1763 * nfs file rename rpc called from nfs_remove() above
1764 */
1765static int
e851b29e
CP
1766nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
1767 struct sillyrename *sp)
984263bc
MD
1768{
1769 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
dadab5e9 1770 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_td));
984263bc
MD
1771}
1772
1773/*
1774 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1775 */
1776static int
e851b29e
CP
1777nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
1778 struct vnode *tdvp, const char *tnameptr, int tnamelen,
1779 struct ucred *cred, struct thread *td)
984263bc 1780{
40393ded
RG
1781 u_int32_t *tl;
1782 caddr_t cp;
1783 int32_t t1, t2;
984263bc
MD
1784 caddr_t bpos, dpos, cp2;
1785 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1786 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1787 int v3 = NFS_ISV3(fdvp);
1788
1789 nfsstats.rpccnt[NFSPROC_RENAME]++;
1790 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1791 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1792 nfsm_rndup(tnamelen));
1793 nfsm_fhtom(fdvp, v3);
1794 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1795 nfsm_fhtom(tdvp, v3);
1796 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
dadab5e9 1797 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
984263bc
MD
1798 if (v3) {
1799 nfsm_wcc_data(fdvp, fwccflag);
1800 nfsm_wcc_data(tdvp, twccflag);
1801 }
6b08710e
MD
1802 m_freem(mrep);
1803nfsmout:
5a9187cb
MD
1804 VTONFS(fdvp)->n_flag |= NLMODIFIED;
1805 VTONFS(tdvp)->n_flag |= NLMODIFIED;
984263bc
MD
1806 if (!fwccflag)
1807 VTONFS(fdvp)->n_attrstamp = 0;
1808 if (!twccflag)
1809 VTONFS(tdvp)->n_attrstamp = 0;
1810 return (error);
1811}
1812
1813/*
1814 * nfs hard link create call
e851b29e
CP
1815 *
1816 * nfs_link(struct vnode *a_tdvp, struct vnode *a_vp,
1817 * struct componentname *a_cnp)
984263bc
MD
1818 */
1819static int
e62afb5f 1820nfs_link(struct vop_old_link_args *ap)
984263bc 1821{
40393ded
RG
1822 struct vnode *vp = ap->a_vp;
1823 struct vnode *tdvp = ap->a_tdvp;
1824 struct componentname *cnp = ap->a_cnp;
1825 u_int32_t *tl;
1826 caddr_t cp;
1827 int32_t t1, t2;
984263bc
MD
1828 caddr_t bpos, dpos, cp2;
1829 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1830 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1831 int v3;
1832
1833 if (vp->v_mount != tdvp->v_mount) {
1834 return (EXDEV);
1835 }
1836
1837 /*
1838 * Push all writes to the server, so that the attribute cache
1839 * doesn't get "out of sync" with the server.
1840 * XXX There should be a better way!
1841 */
3b568787 1842 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_td);
984263bc
MD
1843
1844 v3 = NFS_ISV3(vp);
1845 nfsstats.rpccnt[NFSPROC_LINK]++;
1846 nfsm_reqhead(vp, NFSPROC_LINK,
1847 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1848 nfsm_fhtom(vp, v3);
1849 nfsm_fhtom(tdvp, v3);
1850 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 1851 nfsm_request(vp, NFSPROC_LINK, cnp->cn_td, cnp->cn_cred);
984263bc 1852 if (v3) {
5a9187cb 1853 nfsm_postop_attr(vp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
1854 nfsm_wcc_data(tdvp, wccflag);
1855 }
6b08710e
MD
1856 m_freem(mrep);
1857nfsmout:
5a9187cb 1858 VTONFS(tdvp)->n_flag |= NLMODIFIED;
984263bc
MD
1859 if (!attrflag)
1860 VTONFS(vp)->n_attrstamp = 0;
1861 if (!wccflag)
1862 VTONFS(tdvp)->n_attrstamp = 0;
1863 /*
1864 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1865 */
1866 if (error == EEXIST)
1867 error = 0;
1868 return (error);
1869}
1870
1871/*
1872 * nfs symbolic link create call
e851b29e
CP
1873 *
1874 * nfs_symlink(struct vnode *a_dvp, struct vnode **a_vpp,
1875 * struct componentname *a_cnp, struct vattr *a_vap,
1876 * char *a_target)
984263bc
MD
1877 */
1878static int
e62afb5f 1879nfs_symlink(struct vop_old_symlink_args *ap)
984263bc 1880{
40393ded
RG
1881 struct vnode *dvp = ap->a_dvp;
1882 struct vattr *vap = ap->a_vap;
1883 struct componentname *cnp = ap->a_cnp;
1884 struct nfsv2_sattr *sp;
1885 u_int32_t *tl;
1886 caddr_t cp;
1887 int32_t t1, t2;
984263bc
MD
1888 caddr_t bpos, dpos, cp2;
1889 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1890 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1891 struct vnode *newvp = (struct vnode *)0;
1892 int v3 = NFS_ISV3(dvp);
1893
1894 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1895 slen = strlen(ap->a_target);
1896 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1897 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1898 nfsm_fhtom(dvp, v3);
1899 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1900 if (v3) {
1901 nfsm_v3attrbuild(vap, FALSE);
1902 }
1903 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1904 if (!v3) {
1905 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1906 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1907 sp->sa_uid = nfs_xdrneg1;
1908 sp->sa_gid = nfs_xdrneg1;
1909 sp->sa_size = nfs_xdrneg1;
1910 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1911 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1912 }
1913
1914 /*
1915 * Issue the NFS request and get the rpc response.
1916 *
1917 * Only NFSv3 responses returning an error of 0 actually return
1918 * a file handle that can be converted into newvp without having
1919 * to do an extra lookup rpc.
1920 */
dadab5e9 1921 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_td, cnp->cn_cred);
984263bc
MD
1922 if (v3) {
1923 if (error == 0)
1924 nfsm_mtofh(dvp, newvp, v3, gotvp);
1925 nfsm_wcc_data(dvp, wccflag);
1926 }
1927
1928 /*
1929 * out code jumps -> here, mrep is also freed.
1930 */
1931
6b08710e
MD
1932 m_freem(mrep);
1933nfsmout:
984263bc
MD
1934
1935 /*
1936 * If we get an EEXIST error, silently convert it to no-error
1937 * in case of an NFS retry.
1938 */
1939 if (error == EEXIST)
1940 error = 0;
1941
1942 /*
1943 * If we do not have (or no longer have) an error, and we could
1944 * not extract the newvp from the response due to the request being
1945 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1946 * to obtain a newvp to return.
1947 */
1948 if (error == 0 && newvp == NULL) {
1949 struct nfsnode *np = NULL;
1950
1951 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
dadab5e9 1952 cnp->cn_cred, cnp->cn_td, &np);
984263bc
MD
1953 if (!error)
1954 newvp = NFSTOV(np);
1955 }
1956 if (error) {
1957 if (newvp)
1958 vput(newvp);
1959 } else {
1960 *ap->a_vpp = newvp;
1961 }
5a9187cb 1962 VTONFS(dvp)->n_flag |= NLMODIFIED;
984263bc
MD
1963 if (!wccflag)
1964 VTONFS(dvp)->n_attrstamp = 0;
1965 return (error);
1966}
1967
1968/*
1969 * nfs make dir call
e851b29e
CP
1970 *
1971 * nfs_mkdir(struct vnode *a_dvp, struct vnode **a_vpp,
1972 * struct componentname *a_cnp, struct vattr *a_vap)
984263bc
MD
1973 */
1974static int
e62afb5f 1975nfs_mkdir(struct vop_old_mkdir_args *ap)
984263bc 1976{
40393ded
RG
1977 struct vnode *dvp = ap->a_dvp;
1978 struct vattr *vap = ap->a_vap;
1979 struct componentname *cnp = ap->a_cnp;
1980 struct nfsv2_sattr *sp;
1981 u_int32_t *tl;
1982 caddr_t cp;
1983 int32_t t1, t2;
1984 int len;
984263bc
MD
1985 struct nfsnode *np = (struct nfsnode *)0;
1986 struct vnode *newvp = (struct vnode *)0;
1987 caddr_t bpos, dpos, cp2;
1988 int error = 0, wccflag = NFSV3_WCCRATTR;
1989 int gotvp = 0;
1990 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1991 struct vattr vattr;
1992 int v3 = NFS_ISV3(dvp);
1993
3b568787 1994 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_td)) != 0) {
984263bc
MD
1995 return (error);
1996 }
1997 len = cnp->cn_namelen;
1998 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1999 nfsm_reqhead(dvp, NFSPROC_MKDIR,
2000 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2001 nfsm_fhtom(dvp, v3);
2002 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2003 if (v3) {
2004 nfsm_v3attrbuild(vap, FALSE);
2005 } else {
2006 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2007 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2008 sp->sa_uid = nfs_xdrneg1;
2009 sp->sa_gid = nfs_xdrneg1;
2010 sp->sa_size = nfs_xdrneg1;
2011 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2012 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2013 }
dadab5e9 2014 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
2015 if (!error)
2016 nfsm_mtofh(dvp, newvp, v3, gotvp);
2017 if (v3)
2018 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
2019 m_freem(mrep);
2020nfsmout:
5a9187cb 2021 VTONFS(dvp)->n_flag |= NLMODIFIED;
984263bc
MD
2022 if (!wccflag)
2023 VTONFS(dvp)->n_attrstamp = 0;
2024 /*
2025 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2026 * if we can succeed in looking up the directory.
2027 */
2028 if (error == EEXIST || (!error && !gotvp)) {
2029 if (newvp) {
2030 vrele(newvp);
2031 newvp = (struct vnode *)0;
2032 }
2033 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
dadab5e9 2034 cnp->cn_td, &np);
984263bc
MD
2035 if (!error) {
2036 newvp = NFSTOV(np);
2037 if (newvp->v_type != VDIR)
2038 error = EEXIST;
2039 }
2040 }
2041 if (error) {
2042 if (newvp)
2043 vrele(newvp);
2044 } else
2045 *ap->a_vpp = newvp;
2046 return (error);
2047}
2048
2049/*
2050 * nfs remove directory call
e851b29e
CP
2051 *
2052 * nfs_rmdir(struct vnode *a_dvp, struct vnode *a_vp,
2053 * struct componentname *a_cnp)
984263bc
MD
2054 */
2055static int
e62afb5f 2056nfs_rmdir(struct vop_old_rmdir_args *ap)
984263bc 2057{
40393ded
RG
2058 struct vnode *vp = ap->a_vp;
2059 struct vnode *dvp = ap->a_dvp;
2060 struct componentname *cnp = ap->a_cnp;
2061 u_int32_t *tl;
2062 caddr_t cp;
2063 int32_t t1, t2;
984263bc
MD
2064 caddr_t bpos, dpos, cp2;
2065 int error = 0, wccflag = NFSV3_WCCRATTR;
2066 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2067 int v3 = NFS_ISV3(dvp);
2068
2069 if (dvp == vp)
2070 return (EINVAL);
2071 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2072 nfsm_reqhead(dvp, NFSPROC_RMDIR,
2073 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2074 nfsm_fhtom(dvp, v3);
2075 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
dadab5e9 2076 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_td, cnp->cn_cred);
984263bc
MD
2077 if (v3)
2078 nfsm_wcc_data(dvp, wccflag);
6b08710e
MD
2079 m_freem(mrep);
2080nfsmout:
5a9187cb 2081 VTONFS(dvp)->n_flag |= NLMODIFIED;
984263bc
MD
2082 if (!wccflag)
2083 VTONFS(dvp)->n_attrstamp = 0;
984263bc
MD
2084 /*
2085 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2086 */
2087 if (error == ENOENT)
2088 error = 0;
2089 return (error);
2090}
2091
2092/*
2093 * nfs readdir call
e851b29e
CP
2094 *
2095 * nfs_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
984263bc
MD
2096 */
2097static int
e851b29e 2098nfs_readdir(struct vop_readdir_args *ap)
984263bc 2099{
40393ded
RG
2100 struct vnode *vp = ap->a_vp;
2101 struct nfsnode *np = VTONFS(vp);
2102 struct uio *uio = ap->a_uio;
984263bc
MD
2103 int tresid, error;
2104 struct vattr vattr;
2105
2106 if (vp->v_type != VDIR)
2107 return (EPERM);
5a9187cb 2108
984263bc 2109 /*
5a9187cb
MD
2110 * If we have a valid EOF offset cache we must call VOP_GETATTR()
2111 * and then check that is still valid, or if this is an NQNFS mount
2112 * we call NQNFS_CKCACHEABLE() instead of VOP_GETATTR(). Note that
2113 * VOP_GETATTR() does not necessarily go to the wire.
984263bc
MD
2114 */
2115 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
5a9187cb 2116 (np->n_flag & (NLMODIFIED|NRMODIFIED)) == 0) {
e07fef60
MD
2117 if (VOP_GETATTR(vp, &vattr, uio->uio_td) == 0 &&
2118 (np->n_flag & (NLMODIFIED|NRMODIFIED)) == 0
5a9187cb 2119 ) {
984263bc
MD
2120 nfsstats.direofcache_hits++;
2121 return (0);
2122 }
2123 }
2124
2125 /*
5a9187cb
MD
2126 * Call nfs_bioread() to do the real work. nfs_bioread() does its
2127 * own cache coherency checks so we do not have to.
984263bc
MD
2128 */
2129 tresid = uio->uio_resid;
3b568787 2130 error = nfs_bioread(vp, uio, 0);
984263bc
MD
2131
2132 if (!error && uio->uio_resid == tresid)
2133 nfsstats.direofcache_misses++;
2134 return (error);
2135}
2136
2137/*
7d877edf
MD
2138 * Readdir rpc call. nfs_bioread->nfs_doio->nfs_readdirrpc.
2139 *
2140 * Note that for directories, nfs_bioread maintains the underlying nfs-centric
2141 * offset/block and converts the nfs formatted directory entries for userland
2142 * consumption as well as deals with offsets into the middle of blocks.
2143 * nfs_doio only deals with logical blocks. In particular, uio_offset will
2144 * be block-bounded. It must convert to cookies for the actual RPC.
984263bc
MD
2145 */
2146int
3b568787 2147nfs_readdirrpc(struct vnode *vp, struct uio *uiop)
984263bc 2148{
40393ded 2149 int len, left;
01f31ab3 2150 struct nfs_dirent *dp = NULL;
40393ded
RG
2151 u_int32_t *tl;
2152 caddr_t cp;
2153 int32_t t1, t2;
2154 nfsuint64 *cookiep;
984263bc
MD
2155 caddr_t bpos, dpos, cp2;
2156 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2157 nfsuint64 cookie;
2158 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2159 struct nfsnode *dnp = VTONFS(vp);
2160 u_quad_t fileno;
2161 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2162 int attrflag;
2163 int v3 = NFS_ISV3(vp);
2164
2165#ifndef DIAGNOSTIC
2166 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2167 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2168 panic("nfs readdirrpc bad uio");
2169#endif
2170
2171 /*
2172 * If there is no cookie, assume directory was stale.
2173 */
2174 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2175 if (cookiep)
2176 cookie = *cookiep;
2177 else
2178 return (NFSERR_BAD_COOKIE);
2179 /*
2180 * Loop around doing readdir rpc's of size nm_readdirsize
2181 * truncated to a multiple of DIRBLKSIZ.
2182 * The stopping criteria is EOF or buffer full.
2183 */
2184 while (more_dirs && bigenough) {
2185 nfsstats.rpccnt[NFSPROC_READDIR]++;
2186 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2187 NFSX_READDIR(v3));
2188 nfsm_fhtom(vp, v3);
2189 if (v3) {
2190 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2191 *tl++ = cookie.nfsuquad[0];
2192 *tl++ = cookie.nfsuquad[1];
2193 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2194 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2195 } else {
2196 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2197 *tl++ = cookie.nfsuquad[0];
2198 }
2199 *tl = txdr_unsigned(nmp->nm_readdirsize);
c1cf1e59 2200 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, nfs_vpcred(vp, ND_READ));
984263bc 2201 if (v3) {
5a9187cb 2202 nfsm_postop_attr(vp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
2203 if (!error) {
2204 nfsm_dissect(tl, u_int32_t *,
2205 2 * NFSX_UNSIGNED);
2206 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2207 dnp->n_cookieverf.nfsuquad[1] = *tl;
2208 } else {
2209 m_freem(mrep);
2210 goto nfsmout;
2211 }
2212 }
2213 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2214 more_dirs = fxdr_unsigned(int, *tl);
2215
7d877edf 2216 /* loop thru the dir entries, converting them to std form */
984263bc
MD
2217 while (more_dirs && bigenough) {
2218 if (v3) {
2219 nfsm_dissect(tl, u_int32_t *,
2220 3 * NFSX_UNSIGNED);
2221 fileno = fxdr_hyper(tl);
2222 len = fxdr_unsigned(int, *(tl + 2));
2223 } else {
2224 nfsm_dissect(tl, u_int32_t *,
2225 2 * NFSX_UNSIGNED);
2226 fileno = fxdr_unsigned(u_quad_t, *tl++);
2227 len = fxdr_unsigned(int, *tl);
2228 }
2229 if (len <= 0 || len > NFS_MAXNAMLEN) {
2230 error = EBADRPC;
2231 m_freem(mrep);
2232 goto nfsmout;
2233 }
7d877edf
MD
2234
2235 /*
2236 * len is the number of bytes in the path element
2237 * name, not including the \0 termination.
2238 *
2239 * tlen is the number of bytes w have to reserve for
2240 * the path element name.
2241 */
984263bc
MD
2242 tlen = nfsm_rndup(len);
2243 if (tlen == len)
2244 tlen += 4; /* To ensure null termination */
7d877edf
MD
2245
2246 /*
2247 * If the entry would cross a DIRBLKSIZ boundary,
2248 * extend the previous nfs_dirent to cover the
2249 * remaining space.
2250 */
984263bc 2251 left = DIRBLKSIZ - blksiz;
01f31ab3
JS
2252 if ((tlen + sizeof(struct nfs_dirent)) > left) {
2253 dp->nfs_reclen += left;
984263bc
MD
2254 uiop->uio_iov->iov_base += left;
2255 uiop->uio_iov->iov_len -= left;
2256 uiop->uio_offset += left;
2257 uiop->uio_resid -= left;
2258 blksiz = 0;
2259 }
01f31ab3 2260 if ((tlen + sizeof(struct nfs_dirent)) > uiop->uio_resid)
984263bc
MD
2261 bigenough = 0;
2262 if (bigenough) {
01f31ab3
JS
2263 dp = (struct nfs_dirent *)uiop->uio_iov->iov_base;
2264 dp->nfs_ino = fileno;
2265 dp->nfs_namlen = len;
2266 dp->nfs_reclen = tlen + sizeof(struct nfs_dirent);
2267 dp->nfs_type = DT_UNKNOWN;
2268 blksiz += dp->nfs_reclen;
984263bc
MD
2269 if (blksiz == DIRBLKSIZ)
2270 blksiz = 0;
01f31ab3
JS
2271 uiop->uio_offset += sizeof(struct nfs_dirent);
2272 uiop->uio_resid -= sizeof(struct nfs_dirent);
2273 uiop->uio_iov->iov_base += sizeof(struct nfs_dirent);
2274 uiop->uio_iov->iov_len -= sizeof(struct nfs_dirent);
984263bc 2275 nfsm_mtouio(uiop, len);
7d877edf
MD
2276
2277 /*
2278 * The uiop has advanced by nfs_dirent + len
2279 * but really needs to advance by
2280 * nfs_dirent + tlen
2281 */
984263bc
MD
2282 cp = uiop->uio_iov->iov_base;
2283 tlen -= len;
2284 *cp = '\0'; /* null terminate */
2285 uiop->uio_iov->iov_base += tlen;
2286 uiop->uio_iov->iov_len -= tlen;
2287 uiop->uio_offset += tlen;
2288 uiop->uio_resid -= tlen;
7d877edf
MD
2289 } else {
2290 /*
2291 * NFS strings must be rounded up (nfsm_myouio
2292 * handled that in the bigenough case).
2293 */
984263bc 2294 nfsm_adv(nfsm_rndup(len));
7d877edf 2295 }
984263bc
MD
2296 if (v3) {
2297 nfsm_dissect(tl, u_int32_t *,
2298 3 * NFSX_UNSIGNED);
2299 } else {
2300 nfsm_dissect(tl, u_int32_t *,
2301 2 * NFSX_UNSIGNED);
2302 }
7d877edf
MD
2303
2304 /*
2305 * If we were able to accomodate the last entry,
2306 * get the cookie for the next one. Otherwise
2307 * hold-over the cookie for the one we were not
2308 * able to accomodate.
2309 */
984263bc
MD
2310 if (bigenough) {
2311 cookie.nfsuquad[0] = *tl++;
2312 if (v3)
2313 cookie.nfsuquad[1] = *tl++;
7d877edf 2314 } else if (v3) {
984263bc 2315 tl += 2;
7d877edf 2316 } else {
984263bc 2317 tl++;
7d877edf 2318 }
984263bc
MD
2319 more_dirs = fxdr_unsigned(int, *tl);
2320 }
2321 /*
2322 * If at end of rpc data, get the eof boolean
2323 */
2324 if (!more_dirs) {
2325 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2326 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2327 }
2328 m_freem(mrep);
2329 }
2330 /*
2331 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2332 * by increasing d_reclen for the last record.
2333 */
2334 if (blksiz > 0) {
2335 left = DIRBLKSIZ - blksiz;
01f31ab3 2336 dp->nfs_reclen += left;
984263bc
MD
2337 uiop->uio_iov->iov_base += left;
2338 uiop->uio_iov->iov_len -= left;
2339 uiop->uio_offset += left;
2340 uiop->uio_resid -= left;
2341 }
2342
7d877edf
MD
2343 if (bigenough) {
2344 /*
2345 * We hit the end of the directory, update direofoffset.
2346 */
984263bc 2347 dnp->n_direofoffset = uiop->uio_offset;
7d877edf
MD
2348 } else {
2349 /*
2350 * There is more to go, insert the link cookie so the
2351 * next block can be read.
2352 */
984263bc
MD
2353 if (uiop->uio_resid > 0)
2354 printf("EEK! readdirrpc resid > 0\n");
2355 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2356 *cookiep = cookie;
2357 }
2358nfsmout:
2359 return (error);
2360}
2361
2362/*
2363 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2364 */
2365int
3b568787 2366nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop)
984263bc 2367{
40393ded 2368 int len, left;
01f31ab3 2369 struct nfs_dirent *dp;
40393ded
RG
2370 u_int32_t *tl;
2371 caddr_t cp;
2372 int32_t t1, t2;
2373 struct vnode *newvp;
2374 nfsuint64 *cookiep;
984263bc
MD
2375 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2376 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
984263bc
MD
2377 nfsuint64 cookie;
2378 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2379 struct nfsnode *dnp = VTONFS(vp), *np;
2380 nfsfh_t *fhp;
2381 u_quad_t fileno;
2382 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2383 int attrflag, fhsize;
fad57d0e
MD
2384 struct namecache *ncp;
2385 struct namecache *dncp;
2386 struct nlcomponent nlc;
984263bc
MD
2387
2388#ifndef nolint
01f31ab3 2389 dp = NULL;
984263bc
MD
2390#endif
2391#ifndef DIAGNOSTIC
2392 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2393 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2394 panic("nfs readdirplusrpc bad uio");
2395#endif
fad57d0e
MD
2396 /*
2397 * Obtain the namecache record for the directory so we have something
2398 * to use as a basis for creating the entries. This function will
2399 * return a held (but not locked) ncp. The ncp may be disconnected
2400 * from the tree and cannot be used for upward traversals, and the
2401 * ncp may be unnamed. Note that other unrelated operations may
2402 * cause the ncp to be named at any time.
2403 */
2404 dncp = cache_fromdvp(vp, NULL, 0);
2405 bzero(&nlc, sizeof(nlc));
984263bc
MD
2406 newvp = NULLVP;
2407
2408 /*
2409 * If there is no cookie, assume directory was stale.
2410 */
2411 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2412 if (cookiep)
2413 cookie = *cookiep;
2414 else
2415 return (NFSERR_BAD_COOKIE);
2416 /*
2417 * Loop around doing readdir rpc's of size nm_readdirsize
2418 * truncated to a multiple of DIRBLKSIZ.
2419 * The stopping criteria is EOF or buffer full.
2420 */
2421 while (more_dirs && bigenough) {
2422 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2423 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2424 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2425 nfsm_fhtom(vp, 1);
2426 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2427 *tl++ = cookie.nfsuquad[0];
2428 *tl++ = cookie.nfsuquad[1];
2429 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2430 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2431 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2432 *tl = txdr_unsigned(nmp->nm_rsize);
c1cf1e59 2433 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, nfs_vpcred(vp, ND_READ));
5a9187cb 2434 nfsm_postop_attr(vp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
2435 if (error) {
2436 m_freem(mrep);
2437 goto nfsmout;
2438 }
2439 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2440 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2441 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2442 more_dirs = fxdr_unsigned(int, *tl);
2443
2444 /* loop thru the dir entries, doctoring them to 4bsd form */
2445 while (more_dirs && bigenough) {
2446 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2447 fileno = fxdr_hyper(tl);
2448 len = fxdr_unsigned(int, *(tl + 2));
2449 if (len <= 0 || len > NFS_MAXNAMLEN) {
2450 error = EBADRPC;
2451 m_freem(mrep);
2452 goto nfsmout;
2453 }
2454 tlen = nfsm_rndup(len);
2455 if (tlen == len)
2456 tlen += 4; /* To ensure null termination*/
2457 left = DIRBLKSIZ - blksiz;
01f31ab3
JS
2458 if ((tlen + sizeof(struct nfs_dirent)) > left) {
2459 dp->nfs_reclen += left;
984263bc
MD
2460 uiop->uio_iov->iov_base += left;
2461 uiop->uio_iov->iov_len -= left;
2462 uiop->uio_offset += left;
2463 uiop->uio_resid -= left;
2464 blksiz = 0;
2465 }
01f31ab3 2466 if ((tlen + sizeof(struct nfs_dirent)) > uiop->uio_resid)
984263bc
MD
2467 bigenough = 0;
2468 if (bigenough) {
01f31ab3
JS
2469 dp = (struct nfs_dirent *)uiop->uio_iov->iov_base;
2470 dp->nfs_ino = fileno;
2471 dp->nfs_namlen = len;
2472 dp->nfs_reclen = tlen + sizeof(struct nfs_dirent);
2473 dp->nfs_type = DT_UNKNOWN;
2474 blksiz += dp->nfs_reclen;
984263bc
MD
2475 if (blksiz == DIRBLKSIZ)
2476 blksiz = 0;
01f31ab3
JS
2477 uiop->uio_offset += sizeof(struct nfs_dirent);
2478 uiop->uio_resid -= sizeof(struct nfs_dirent);
2479 uiop->uio_iov->iov_base += sizeof(struct nfs_dirent);
2480 uiop->uio_iov->iov_len -= sizeof(struct nfs_dirent);
fad57d0e
MD
2481 nlc.nlc_nameptr = uiop->uio_iov->iov_base;
2482 nlc.nlc_namelen = len;
984263bc
MD
2483 nfsm_mtouio(uiop, len);
2484 cp = uiop->uio_iov->iov_base;
2485 tlen -= len;
2486 *cp = '\0';
2487 uiop->uio_iov->iov_base += tlen;
2488 uiop->uio_iov->iov_len -= tlen;
2489 uiop->uio_offset += tlen;
2490 uiop->uio_resid -= tlen;
2491 } else
2492 nfsm_adv(nfsm_rndup(len));
2493 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2494 if (bigenough) {
2495 cookie.nfsuquad[0] = *tl++;
2496 cookie.nfsuquad[1] = *tl++;
2497 } else
2498 tl += 2;
2499
2500 /*
2501 * Since the attributes are before the file handle
2502 * (sigh), we must skip over the attributes and then
2503 * come back and get them.
2504 */
2505 attrflag = fxdr_unsigned(int, *tl);
2506 if (attrflag) {
2507 dpossav1 = dpos;
2508 mdsav1 = md;
2509 nfsm_adv(NFSX_V3FATTR);
2510 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2511 doit = fxdr_unsigned(int, *tl);
2512 if (doit) {
2513 nfsm_getfh(fhp, fhsize, 1);
2514 if (NFS_CMPFH(dnp, fhp, fhsize)) {
597aea93 2515 vref(vp);
984263bc
MD
2516 newvp = vp;
2517 np = dnp;
2518 } else {
2519 error = nfs_nget(vp->v_mount, fhp,
2520 fhsize, &np);
2521 if (error)
2522 doit = 0;
2523 else
2524 newvp = NFSTOV(np);
2525 }
2526 }
2527 if (doit && bigenough) {
2528 dpossav2 = dpos;
2529 dpos = dpossav1;
2530 mdsav2 = md;
2531 md = mdsav1;
2532 nfsm_loadattr(newvp, (struct vattr *)0);
2533 dpos = dpossav2;
2534 md = mdsav2;
01f31ab3 2535 dp->nfs_type =
984263bc 2536 IFTODT(VTTOIF(np->n_vattr.va_type));
fad57d0e
MD
2537 if (dncp) {
2538 printf("NFS/READDIRPLUS, ENTER %*.*s\n",
2539 nlc.nlc_namelen, nlc.nlc_namelen,
2540 nlc.nlc_nameptr);
2541 ncp = cache_nlookup(dncp, &nlc);
2542 cache_setunresolved(ncp);
2543 cache_setvp(ncp, newvp);
2544 cache_put(ncp);
2545 } else {
2546 printf("NFS/READDIRPLUS, UNABLE TO ENTER"
2547 " %*.*s\n",
2548 nlc.nlc_namelen, nlc.nlc_namelen,
2549 nlc.nlc_nameptr);
2550 }
984263bc
MD
2551 }
2552 } else {
2553 /* Just skip over the file handle */
2554 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2555 i = fxdr_unsigned(int, *tl);
2556 nfsm_adv(nfsm_rndup(i));
2557 }
2558 if (newvp != NULLVP) {
2559 if (newvp == vp)
2560 vrele(newvp);
2561 else
2562 vput(newvp);
2563 newvp = NULLVP;
2564 }
2565 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2566 more_dirs = fxdr_unsigned(int, *tl);
2567 }
2568 /*
2569 * If at end of rpc data, get the eof boolean
2570 */
2571 if (!more_dirs) {
2572 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2573 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2574 }
2575 m_freem(mrep);
2576 }
2577 /*
2578 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2579 * by increasing d_reclen for the last record.
2580 */
2581 if (blksiz > 0) {
2582 left = DIRBLKSIZ - blksiz;
01f31ab3 2583 dp->nfs_reclen += left;
984263bc
MD
2584 uiop->uio_iov->iov_base += left;
2585 uiop->uio_iov->iov_len -= left;
2586 uiop->uio_offset += left;
2587 uiop->uio_resid -= left;
2588 }
2589
2590 /*
2591 * We are now either at the end of the directory or have filled the
2592 * block.
2593 */
2594 if (bigenough)
2595 dnp->n_direofoffset = uiop->uio_offset;
2596 else {
2597 if (uiop->uio_resid > 0)
2598 printf("EEK! readdirplusrpc resid > 0\n");
2599 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2600 *cookiep = cookie;
2601 }
2602nfsmout:
2603 if (newvp != NULLVP) {
2604 if (newvp == vp)
2605 vrele(newvp);
2606 else
2607 vput(newvp);
2608 newvp = NULLVP;
2609 }
fad57d0e
MD
2610 if (dncp)
2611 cache_drop(dncp);
984263bc
MD
2612 return (error);
2613}
2614
2615/*
2616 * Silly rename. To make the NFS filesystem that is stateless look a little
2617 * more like the "ufs" a remove of an active vnode is translated to a rename
2618 * to a funny looking filename that is removed by nfs_inactive on the
2619 * nfsnode. There is the potential for another process on a different client
2620 * to create the same funny name between the nfs_lookitup() fails and the
2621 * nfs_rename() completes, but...
2622 */
2623static int
e851b29e 2624nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
984263bc 2625{
40393ded 2626 struct sillyrename *sp;
984263bc
MD
2627 struct nfsnode *np;
2628 int error;
984263bc 2629
8c361dda
MD
2630 /*
2631 * We previously purged dvp instead of vp. I don't know why, it
2632 * completely destroys performance. We can't do it anyway with the
2633 * new VFS API since we would be breaking the namecache topology.
2634 */
fad57d0e 2635 cache_purge(vp); /* XXX */
984263bc
MD
2636 np = VTONFS(vp);
2637#ifndef DIAGNOSTIC
2638 if (vp->v_type == VDIR)
2639 panic("nfs: sillyrename dir");
2640#endif
2641 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2642 M_NFSREQ, M_WAITOK);
2643 sp->s_cred = crdup(cnp->cn_cred);
2644 sp->s_dvp = dvp;
597aea93 2645 vref(dvp);
984263bc
MD
2646
2647 /* Fudge together a funny name */
dadab5e9 2648 sp->s_namlen = sprintf(sp->s_name, ".nfsA%08x4.4", (int)cnp->cn_td);
984263bc
MD
2649
2650 /* Try lookitups until we get one that isn't there */
2651 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2652 cnp->cn_td, (struct nfsnode **)0) == 0) {
984263bc
MD
2653 sp->s_name[4]++;
2654 if (sp->s_name[4] > 'z') {
2655 error = EINVAL;
2656 goto bad;
2657 }
2658 }
2659 error = nfs_renameit(dvp, cnp, sp);
2660 if (error)
2661 goto bad;
2662 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
dadab5e9 2663 cnp->cn_td, &np);
984263bc
MD
2664 np->n_sillyrename = sp;
2665 return (0);
2666bad:
2667 vrele(sp->s_dvp);
2668 crfree(sp->s_cred);
2669 free((caddr_t)sp, M_NFSREQ);
2670 return (error);
2671}
2672
2673/*
2674 * Look up a file name and optionally either update the file handle or
2675 * allocate an nfsnode, depending on the value of npp.
2676 * npp == NULL --> just do the lookup
2677 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2678 * handled too
2679 * *npp != NULL --> update the file handle in the vnode
2680 */
2681static int
e851b29e
CP
2682nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
2683 struct thread *td, struct nfsnode **npp)
984263bc 2684{
40393ded
RG
2685 u_int32_t *tl;
2686 caddr_t cp;
2687 int32_t t1, t2;
984263bc
MD
2688 struct vnode *newvp = (struct vnode *)0;
2689 struct nfsnode *np, *dnp = VTONFS(dvp);
2690 caddr_t bpos, dpos, cp2;
2691 int error = 0, fhlen, attrflag;
2692 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2693 nfsfh_t *nfhp;
2694 int v3 = NFS_ISV3(dvp);
2695
2696 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2697 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2698 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2699 nfsm_fhtom(dvp, v3);
2700 nfsm_strtom(name, len, NFS_MAXNAMLEN);
dadab5e9 2701 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
984263bc
MD
2702 if (npp && !error) {
2703 nfsm_getfh(nfhp, fhlen, v3);
2704 if (*npp) {
2705 np = *npp;
2706 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2707 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2708 np->n_fhp = &np->n_fh;
2709 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2710 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2711 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2712 np->n_fhsize = fhlen;
2713 newvp = NFSTOV(np);
2714 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
597aea93 2715 vref(dvp);
984263bc
MD
2716 newvp = dvp;
2717 } else {
2718 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2719 if (error) {
2720 m_freem(mrep);
2721 return (error);
2722 }
2723 newvp = NFSTOV(np);
2724 }
2725 if (v3) {
5a9187cb 2726 nfsm_postop_attr(newvp, attrflag, NFS_LATTR_NOSHRINK);
984263bc
MD
2727 if (!attrflag && *npp == NULL) {
2728 m_freem(mrep);
2729 if (newvp == dvp)
2730 vrele(newvp);
2731 else
2732 vput(newvp);
2733 return (ENOENT);
2734 }
2735 } else
2736 nfsm_loadattr(newvp, (struct vattr *)0);
2737 }
6b08710e
MD
2738 m_freem(mrep);
2739nfsmout:
984263bc
MD
2740 if (npp && *npp == NULL) {
2741 if (error) {
2742 if (newvp) {
2743 if (newvp == dvp)
2744 vrele(newvp);
2745 else
2746 vput(newvp);
2747 }
2748 } else
2749 *npp = np;
2750 }
2751 return (error);
2752}
2753
2754/*
2755 * Nfs Version 3 commit rpc
2756 */
2757int
3b568787 2758nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct thread *td)
984263bc 2759{
40393ded
RG
2760 caddr_t cp;
2761 u_int32_t *tl;
2762 int32_t t1, t2;
2763 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
984263bc
MD
2764 caddr_t bpos, dpos, cp2;
2765 int error = 0, wccflag = NFSV3_WCCRATTR;
2766 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2767
2768 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2769 return (0);
2770 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2771 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2772 nfsm_fhtom(vp, 1);
2773 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2774 txdr_hyper(offset, tl);
2775 tl += 2;
2776 *tl = txdr_unsigned(cnt);
c1cf1e59 2777 nfsm_request(vp, NFSPROC_COMMIT, td, nfs_vpcred(vp, ND_WRITE));
984263bc
MD
2778 nfsm_wcc_data(vp, wccflag);
2779 if (!error) {
2780 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2781 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2782 NFSX_V3WRITEVERF)) {
2783 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2784 NFSX_V3WRITEVERF);
2785 error = NFSERR_STALEWRITEVERF;
2786 }
2787 }
6b08710e
MD
2788 m_freem(mrep);
2789nfsmout:
984263bc
MD
2790 return (error);
2791}
2792
2793/*
2794 * Kludge City..
2795 * - make nfs_bmap() essentially a no-op that does no translation
2796 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2797 * (Maybe I could use the process's page mapping, but I was concerned that
2798 * Kernel Write might not be enabled and also figured copyout() would do
2799 * a lot more work than bcopy() and also it currently happens in the
2800 * context of the swapper process (2).
e851b29e 2801 *
54078292
MD
2802 * nfs_bmap(struct vnode *a_vp, off_t a_loffset, struct vnode **a_vpp,
2803 * off_t *a_doffsetp, int *a_runp, int *a_runb)
984263bc
MD
2804 */
2805static int
e851b29e 2806nfs_bmap(struct vop_bmap_args *ap)
984263bc 2807{
40393ded 2808 struct vnode *vp = ap->a_vp;
984263bc
MD
2809
2810 if (ap->a_vpp != NULL)
2811 *ap->a_vpp = vp;
54078292
MD
2812 if (ap->a_doffsetp != NULL)
2813 *ap->a_doffsetp = ap->a_loffset;
984263bc
MD
2814 if (ap->a_runp != NULL)
2815 *ap->a_runp = 0;
2816 if (ap->a_runb != NULL)
2817 *ap->a_runb = 0;
2818 return (0);
2819}
2820
2821/*
2822 * Strategy routine.
81b5c339 2823 *
984263bc
MD
2824 * For async requests when nfsiod(s) are running, queue the request by
2825 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2826 * request.
2827 */
2828static int
e851b29e 2829nfs_strategy(struct vop_strategy_args *ap)
984263bc 2830{
81b5c339
MD
2831 struct bio *bio = ap->a_bio;
2832 struct bio *nbio;
2833 struct buf *bp = bio->bio_buf;
dadab5e9 2834 struct thread *td;
984263bc
MD
2835 int error = 0;
2836
81b5c339
MD
2837 KASSERT(!(bp->b_flags & B_DONE),
2838 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2839 KASSERT(BUF_REFCNT(bp) > 0,
2840 ("nfs_strategy: buffer %p not locked", bp));
984263bc 2841
984263bc 2842 if (bp->b_flags & B_ASYNC)
dadab5e9 2843 td = NULL;
984263bc 2844 else
dadab5e9 2845 td = curthread; /* XXX */
984263bc 2846
81b5c339 2847 /*
54078292
MD
2848 * We probably don't need to push an nbio any more since no
2849 * block conversion is required due to the use of 64 bit byte
2850 * offsets, but do it anyway.
81b5c339
MD
2851 */
2852 nbio = push_bio(bio);
54078292 2853 nbio->bio_offset = bio->bio_offset;
81b5c339 2854
984263bc
MD
2855 /*
2856 * If the op is asynchronous and an i/o daemon is waiting
2857 * queue the request, wake it up and wait for completion
2858 * otherwise just do it ourselves.
2859 */
81b5c339
MD
2860 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(ap->a_vp, nbio, td))
2861 error = nfs_doio(ap->a_vp, nbio, td);
984263bc
MD
2862 return (error);
2863}
2864
2865/*
2866 * Mmap a file
2867 *
2868 * NB Currently unsupported.
e851b29e
CP
2869 *
2870 * nfs_mmap(struct vnode *a_vp, int a_fflags, struct ucred *a_cred,
2871 * struct thread *a_td)
984263bc
MD
2872 */
2873/* ARGSUSED */
2874static int
e851b29e 2875nfs_mmap(struct vop_mmap_args *ap)
984263bc 2876{
984263bc
MD
2877 return (EINVAL);
2878}
2879
2880/*
2881 * fsync vnode op. Just call nfs_flush() with commit == 1.
e851b29e
CP
2882 *
2883 * nfs_fsync(struct vnodeop_desc *a_desc, struct vnode *a_vp,
2884 * struct ucred * a_cred, int a_waitfor, struct thread *a_td)
984263bc
MD
2885 */
2886/* ARGSUSED */
2887static int
e851b29e 2888nfs_fsync(struct vop_fsync_args *ap)
984263bc 2889{
3b568787 2890 return (nfs_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1));
984263bc
MD
2891}
2892
2893/*
6bae6177
MD
2894 * Flush all the blocks associated with a vnode. Dirty NFS buffers may be
2895 * in one of two states: If B_NEEDCOMMIT is clear then the buffer contains
2896 * new NFS data which needs to be written to the server. If B_NEEDCOMMIT is
2897 * set the buffer contains data that has already been written to the server
2898 * and which now needs a commit RPC.
2899 *
2900 * If commit is 0 we only take one pass and only flush buffers containing new
2901 * dirty data.
2902 *
2903 * If commit is 1 we take two passes, issuing a commit RPC in the second
2904 * pass.
2905 *
2906 * If waitfor is MNT_WAIT and commit is 1, we loop as many times as required
2907 * to completely flush all pending data.
2908 *
2909 * Note that the RB_SCAN code properly handles the case where the
2910 * callback might block and directly or indirectly (another thread) cause
2911 * the RB tree to change.
984263bc 2912 */
6bae6177
MD
2913
2914#ifndef NFS_COMMITBVECSIZ
2915#define NFS_COMMITBVECSIZ 16
2916#endif
2917
2918struct nfs_flush_info {
2919 enum { NFI_FLUSHNEW, NFI_COMMIT } mode;
2920 struct thread *td;
2921 struct vnode *vp;
2922 int waitfor;
2923 int slpflag;
2924 int slptimeo;
2925 int loops;
2926 struct buf *bvary[NFS_COMMITBVECSIZ];
2927 int bvsize;
2928 off_t beg_off;
2929 off_t end_off;
2930};
2931
2932static int nfs_flush_bp(struct buf *bp, void *data);
2933static int nfs_flush_docommit(struct nfs_flush_info *info, int error);
2934
5a9187cb 2935int
e851b29e 2936nfs_flush(struct vnode *vp, int waitfor, struct thread *td, int commit)
984263bc 2937{
40393ded 2938 struct nfsnode *np = VTONFS(vp);
984263bc 2939 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
6bae6177
MD
2940 struct nfs_flush_info info;
2941 int error;
984263bc 2942
6bae6177
MD
2943 bzero(&info, sizeof(info));
2944 info.td = td;
2945 info.vp = vp;
2946 info.waitfor = waitfor;
2947 info.slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
2948 info.loops = 0;
2949
2950 do {
984263bc 2951 /*
6bae6177 2952 * Flush mode
984263bc 2953 */
6bae6177
MD
2954 info.mode = NFI_FLUSHNEW;
2955 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
2956 nfs_flush_bp, &info);
2957
2958 /*
2959 * Take a second pass if committing and no error occured.
2960 * Clean up any left over collection (whether an error
2961 * occurs or not).
2962 */
2963 if (commit && error == 0) {
2964 info.mode = NFI_COMMIT;
2965 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
2966 nfs_flush_bp, &info);
2967 if (info.bvsize)
2968 error = nfs_flush_docommit(&info, error);
984263bc 2969 }
6bae6177 2970
984263bc 2971 /*
6bae6177
MD
2972 * Wait for pending I/O to complete before checking whether
2973 * any further dirty buffers exist.
984263bc 2974 */
81b5c339
MD
2975 while (waitfor == MNT_WAIT && vp->v_track_write.bk_active) {
2976 vp->v_track_write.bk_waitflag = 1;
2977 error = tsleep(&vp->v_track_write,
6bae6177
MD
2978 info.slpflag, "nfsfsync", info.slptimeo);
2979 if (error) {
2980 /*
2981 * We have to be able to break out if this
2982 * is an 'intr' mount.
2983 */
2984 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
2985 error = -EINTR;
2986 break;
2987 }
2988
2989 /*
2990 * Since we do not process pending signals,
2991 * once we get a PCATCH our tsleep() will no
2992 * longer sleep, switch to a fixed timeout
2993 * instead.
2994 */
2995 if (info.slpflag == PCATCH) {
2996 info.slpflag = 0;
2997 info.slptimeo = 2 * hz;
2998 }
2999 error = 0;
3000 }
3001 }
3002 ++info.loops;
3003 /*
3004 * Loop if we are flushing synchronous as well as committing,
3005 * and dirty buffers are still present. Otherwise we might livelock.
3006 */
3007 } while (waitfor == MNT_WAIT && commit &&
3008 error == 0 && !RB_EMPTY(&vp->v_rbdirty_tree));
3009
3010 /*
3011 * The callbacks have to return a negative error to terminate the
3012 * RB scan.
3013 */
3014 if (error < 0)
3015 error = -error;
3016
3017 /*
3018 * Deal with any error collection
3019 */
3020 if (np->n_flag & NWRITEERR) {
3021 error = np->n_error;
3022 np->n_flag &= ~NWRITEERR;
3023 }
3024 return (error);
3025}
3026
3027
3028static
3029int
3030nfs_flush_bp(struct buf *bp, void *data)
3031{
3032 struct nfs_flush_info *info = data;
3033 off_t toff;
3034 int error;
6bae6177
MD
3035
3036 error = 0;
3037 switch(info->mode) {
3038 case NFI_FLUSHNEW:
165dba55 3039 crit_enter();
6bae6177
MD
3040 if (info->loops && info->waitfor == MNT_WAIT) {
3041 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT);
3042 if (error) {
f2770c70
MD
3043 int lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL;
3044 if (info->slpflag & PCATCH)
3045 lkflags |= LK_PCATCH;
3046 error = BUF_TIMELOCK(bp, lkflags, "nfsfsync",
3047 info->slptimeo);
6bae6177 3048 }
984263bc 3049 } else {
6bae6177 3050 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT);
984263bc 3051 }
6bae6177 3052 if (error == 0) {
65c6c519
MD
3053 KKASSERT(bp->b_vp == info->vp);
3054
6bae6177
MD
3055 if ((bp->b_flags & B_DELWRI) == 0)
3056 panic("nfs_fsync: not dirty");
3057 if (bp->b_flags & B_NEEDCOMMIT) {
3058 BUF_UNLOCK(bp);
165dba55 3059 crit_exit();
984263bc 3060 break;
6bae6177 3061 }
984263bc 3062 bremfree(bp);
984263bc 3063
6bae6177 3064 bp->b_flags |= B_ASYNC;
165dba55 3065 crit_exit();
6bae6177
MD
3066 VOP_BWRITE(bp->b_vp, bp);
3067 } else {
165dba55 3068 crit_exit();
6bae6177
MD
3069 error = 0;
3070 }
3071 break;
3072 case NFI_COMMIT:
3073 /*
3074 * Only process buffers in need of a commit which we can
3075 * immediately lock. This may prevent a buffer from being
3076 * committed, but the normal flush loop will block on the
3077 * same buffer so we shouldn't get into an endless loop.
3078 */
165dba55 3079 crit_enter();
6bae6177
MD
3080 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
3081 (B_DELWRI | B_NEEDCOMMIT) ||
3082 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
165dba55 3083 crit_exit();
6bae6177
MD
3084 break;
3085 }
984263bc 3086
65c6c519 3087 KKASSERT(bp->b_vp == info->vp);
6bae6177
MD
3088 bremfree(bp);
3089
3090 /*
3091 * NOTE: we are not clearing B_DONE here, so we have
3092 * to do it later on in this routine if we intend to
3093 * initiate I/O on the bp.
3094 *
3095 * Note: to avoid loopback deadlocks, we do not
3096 * assign b_runningbufspace.
3097 */
a8f169e2 3098 vfs_busy_pages(bp->b_vp, bp, 1);
6bae6177
MD
3099
3100 info->bvary[info->bvsize] = bp;
54078292 3101 toff = bp->b_bio2.bio_offset + bp->b_dirtyoff;
6bae6177
MD
3102 if (info->bvsize == 0 || toff < info->beg_off)
3103 info->beg_off = toff;
54078292 3104 toff += (off_t)(bp->b_dirtyend - bp->b_dirtyoff);
6bae6177
MD
3105 if (info->bvsize == 0 || toff > info->end_off)
3106 info->end_off = toff;
3107 ++info->bvsize;
3108 if (info->bvsize == NFS_COMMITBVECSIZ) {
3109 error = nfs_flush_docommit(info, 0);
3110 KKASSERT(info->bvsize == 0);
984263bc 3111 }
165dba55 3112 crit_exit();
984263bc 3113 }
6bae6177
MD
3114 return (error);
3115}
3116
3117static
3118int
3119nfs_flush_docommit(struct nfs_flush_info *info, int error)
3120{
3121 struct vnode *vp;
3122 struct buf *bp;
3123 off_t bytes;
3124 int retv;
3125 int i;
6bae6177
MD
3126
3127 vp = info->vp;
3128
3129 if (info->bvsize > 0) {
984263bc 3130 /*
3b568787
MD
3131 * Commit data on the server, as required. Note that
3132 * nfs_commit will use the vnode's cred for the commit.
6bae6177 3133 * The NFSv3 commit RPC is limited to a 32 bit byte count.
984263bc 3134 */
6bae6177
MD
3135 bytes = info->end_off - info->beg_off;
3136 if (bytes > 0x40000000)
3137 bytes = 0x40000000;
3138 if (error) {
3139 retv = -error;
3140 } else {
3141 retv = nfs_commit(vp, info->beg_off,
3142 (int)bytes, info->td);
3143 if (retv == NFSERR_STALEWRITEVERF)
3144 nfs_clearcommit(vp->v_mount);
3145 }
984263bc
MD
3146
3147 /*
3148 * Now, either mark the blocks I/O done or mark the
3149 * blocks dirty, depending on whether the commit
3150 * succeeded.
3151 */
6bae6177
MD
3152 for (i = 0; i < info->bvsize; ++i) {
3153 bp = info->bvary[i];
3154 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
984263bc
MD
3155 if (retv) {
3156 /*
3157 * Error, leave B_DELWRI intact
3158 */
3159 vfs_unbusy_pages(bp);
3160 brelse(bp);
3161 } else {
3162 /*
3163 * Success, remove B_DELWRI ( bundirty() ).
3164 *
3165 * b_dirtyoff/b_dirtyend seem to be NFS
3166 * specific. We should probably move that
3167 * into bundirty(). XXX
81b5c339
MD
3168 *
3169 * We are faking an I/O write, we have to
3170 * start the transaction in order to
3171 * immediately biodone() it.
984263bc 3172 */
165dba55 3173 crit_enter();
984263bc
MD
3174 bp->b_flags |= B_ASYNC;
3175 bundirty(bp);
3176 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3177 bp->b_dirtyoff = bp->b_dirtyend = 0;
165dba55 3178 crit_exit();
81b5c339 3179 biodone(&bp->b_bio1);
984263bc
MD
3180 }
3181 }
6bae6177 3182 info->bvsize = 0;
984263bc 3183 }
984263bc
MD
3184 return (error);
3185}
3186
3187/*
3188 * NFS advisory byte-level locks.
3189 * Currently unsupported.
e851b29e
CP
3190 *
3191 * nfs_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, struct flock *a_fl,
3192 * int a_flags)
984263bc
MD
3193 */
3194static int
e851b29e 3195nfs_advlock(struct vop_advlock_args *ap)
984263bc 3196{
40393ded 3197 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3198
3199 /*
3200 * The following kludge is to allow diskless support to work
3201 * until a real NFS lockd is implemented. Basically, just pretend
3202 * that this is a local lock.
3203 */
3204 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
3205}
3206
3207/*
3208 * Print out the contents of an nfsnode.
e851b29e
CP
3209 *
3210 * nfs_print(struct vnode *a_vp)
984263bc
MD
3211 */
3212static int
e851b29e 3213nfs_print(struct vop_print_args *ap)
984263bc 3214{
40393ded
RG
3215 struct vnode *vp = ap->a_vp;
3216 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3217
3218 printf("tag VT_NFS, fileid %ld fsid 0x%x",
3219 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3220 if (vp->v_type == VFIFO)
3221 fifo_printinfo(vp);
3222 printf("\n");
3223 return (0);
3224}
3225
3226/*
3227 * Just call nfs_writebp() with the force argument set to 1.
3228 *
3229 * NOTE: B_DONE may or may not be set in a_bp on call.
e851b29e
CP
3230 *
3231 * nfs_bwrite(struct vnode *a_bp)
984263bc
MD
3232 */
3233static int
e851b29e 3234nfs_bwrite(struct vop_bwrite_args *ap)
984263bc 3235{
dadab5e9 3236 return (nfs_writebp(ap->a_bp, 1, curthread));
984263bc
MD
3237}
3238
3239/*
6bae6177
MD
3240 * This is a clone of vn_bwrite(), except that it also handles the
3241 * B_NEEDCOMMIT flag. We set B_CACHE if this is a VMIO buffer.
984263bc
MD
3242 */
3243int
e851b29e 3244nfs_writebp(struct buf *bp, int force, struct thread *td)
984263bc 3245{
1f1ea522 3246 int error;
984263bc
MD
3247
3248 if (BUF_REFCNT(bp) == 0)
3249 panic("bwrite: buffer is not locked???");
3250
3251 if (bp->b_flags & B_INVAL) {
3252 brelse(bp);
3253 return(0);
3254 }
3255
3256 bp->b_flags |= B_CACHE;
3257
3258 /*
3259 * Undirty the bp. We will redirty it later if the I/O fails.
3260 */
165dba55 3261 crit_enter();
984263bc
MD
3262 bundirty(bp);
3263 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
165dba55 3264 crit_exit();
984263bc
MD
3265
3266 /*
3267 * Note: to avoid loopback deadlocks, we do not
3268 * assign b_runningbufspace.
3269 */
a8f169e2 3270 vfs_busy_pages(bp->b_vp, bp, 1);
984263bc 3271 BUF_KERNPROC(bp);
984263bc 3272
1f1ea522
MD
3273 if (bp->b_flags & B_ASYNC) {
3274 vn_strategy(bp->b_vp, &bp->b_bio1);
3275 error = 0;
3276 } else {
3277 vn_strategy(bp->b_vp, &bp->b_bio1);
3278 error = biowait(bp);
984263bc 3279 brelse(bp);
984263bc 3280 }
1f1ea522 3281 return (error);
984263bc
MD
3282}
3283
3284/*
3285 * nfs special file access vnode op.
3286 * Essentially just get vattr and then imitate iaccess() since the device is
3287 * local to the client.
e851b29e
CP
3288 *
3289 * nfsspec_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
3290 * struct thread *a_td)
984263bc
MD
3291 */
3292static int
e851b29e 3293nfsspec_access(struct vop_access_args *ap)
984263bc 3294{
40393ded
RG
3295 struct vattr *vap;
3296 gid_t *gp;
3297 struct ucred *cred = ap->a_cred;
984263bc
MD
3298 struct vnode *vp = ap->a_vp;
3299 mode_t mode = ap->a_mode;
3300 struct vattr vattr;
40393ded 3301 int i;
984263bc
MD
3302 int error;
3303
3304 /*
3305 * Disallow write attempts on filesystems mounted read-only;
3306 * unless the file is a socket, fifo, or a block or character
3307 * device resident on the filesystem.
3308 */
3309 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3310 switch (vp->v_type) {
3311 case VREG:
3312 case VDIR:
3313 case VLNK:
3314 return (EROFS);
3315 default:
3316 break;
3317 }
3318 }
3319 /*
3320 * If you're the super-user,
3321 * you always get access.
3322 */
3323 if (cred->cr_uid == 0)
3324 return (0);
3325 vap = &vattr;
3b568787 3326 error = VOP_GETATTR(vp, vap, ap->a_td);
984263bc
MD
3327 if (error)
3328 return (error);
3329 /*
3330 * Access check is based on only one of owner, group, public.
3331 * If not owner, then check group. If not a member of the
3332 * group, then check public access.
3333 */
3334 if (cred->cr_uid != vap->va_uid) {
3335 mode >>= 3;
3336 gp = cred->cr_groups;
3337 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3338 if (vap->va_gid == *gp)
3339 goto found;
3340 mode >>= 3;
3341found:
3342 ;
3343 }
3344 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3345 return (error);
3346}
3347
3348/*
3349 * Read wrapper for special devices.
e851b29e
CP
3350 *
3351 * nfsspec_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3352 * struct ucred *a_cred)
984263bc
MD
3353 */
3354static int
e851b29e 3355nfsspec_read(struct vop_read_args *ap)
984263bc 3356{
40393ded 3357 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3358
3359 /*
3360 * Set access flag.
3361 */
3362 np->n_flag |= NACC;
3363 getnanotime(&np->n_atim);
0961aa92 3364 return (VOCALL(spec_vnode_vops, &ap->a_head));
984263bc
MD
3365}
3366
3367/*
3368 * Write wrapper for special devices.
e851b29e
CP
3369 *
3370 * nfsspec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3371 * struct ucred *a_cred)
984263bc
MD
3372 */
3373static int
e851b29e 3374nfsspec_write(struct vop_write_args *ap)
984263bc 3375{
40393ded 3376 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3377
3378 /*
3379 * Set update flag.
3380 */
3381 np->n_flag |= NUPD;
3382 getnanotime(&np->n_mtim);
0961aa92 3383 return (VOCALL(spec_vnode_vops, &ap->a_head));
984263bc
MD
3384}
3385
3386/*
3387 * Close wrapper for special devices.
3388 *
3389 * Update the times on the nfsnode then do device close.
e851b29e
CP
3390 *
3391 * nfsspec_close(struct vnode *a_vp, int a_fflag, struct ucred *a_cred,
3392 * struct thread *a_td)
984263bc
MD
3393 */
3394static int
e851b29e 3395nfsspec_close(struct vop_close_args *ap)
984263bc 3396{
40393ded
RG
3397 struct vnode *vp = ap->a_vp;
3398 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3399 struct vattr vattr;
3400
3401 if (np->n_flag & (NACC | NUPD)) {
3402 np->n_flag |= NCHG;
3403 if (vp->v_usecount == 1 &&
3404 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3405 VATTR_NULL(&vattr);
3406 if (np->n_flag & NACC)
3407 vattr.va_atime = np->n_atim;
3408 if (np->n_flag & NUPD)
3409 vattr.va_mtime = np->n_mtim;
c1cf1e59 3410 (void)VOP_SETATTR(vp, &vattr, nfs_vpcred(vp, ND_WRITE), ap->a_td);
984263bc
MD
3411 }
3412 }
0961aa92 3413 return (VOCALL(spec_vnode_vops, &ap->a_head));
984263bc
MD
3414}
3415
3416/*
3417 * Read wrapper for fifos.
e851b29e
CP
3418 *
3419 * nfsfifo_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3420 * struct ucred *a_cred)
984263bc
MD
3421 */
3422static int
e851b29e 3423nfsfifo_read(struct vop_read_args *ap)
984263bc 3424{
40393ded 3425 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3426
3427 /*
3428 * Set access flag.
3429 */
3430 np->n_flag |= NACC;
3431 getnanotime(&np->n_atim);
0961aa92 3432 return (VOCALL(fifo_vnode_vops, &ap->a_head));
984263bc
MD
3433}
3434
3435/*
3436 * Write wrapper for fifos.
e851b29e
CP
3437 *
3438 * nfsfifo_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3439 * struct ucred *a_cred)
984263bc
MD
3440 */
3441static int
e851b29e 3442nfsfifo_write(struct vop_write_args *ap)
984263bc 3443{
40393ded 3444 struct nfsnode *np = VTONFS(ap->a_vp);
984263bc
MD
3445
3446 /*
3447 * Set update flag.
3448 */
3449 np->n_flag |= NUPD;
3450 getnanotime(&np->n_mtim);
0961aa92 3451 return (VOCALL(fifo_vnode_vops, &ap->a_head));
984263bc
MD
3452}
3453
3454/*
3455 * Close wrapper for fifos.
3456 *
3457 * Update the times on the nfsnode then do fifo close.
e851b29e
CP
3458 *
3459 * nfsfifo_close(struct vnode *a_vp, int a_fflag, struct thread *a_td)
984263bc
MD
3460 */
3461static int
e851b29e 3462nfsfifo_close(struct vop_close_args *ap)
984263bc 3463{
40393ded
RG
3464 struct vnode *vp = ap->a_vp;
3465 struct nfsnode *np = VTONFS(vp);
984263bc
MD
3466 struct vattr vattr;
3467 struct timespec ts;
3468
3469 if (np->n_flag & (NACC | NUPD)) {
3470 getnanotime(&ts);
3471 if (np->n_flag & NACC)
3472 np->n_atim = ts;
3473 if (np->n_flag & NUPD)
3474 np->n_mtim = ts;
3475 np->n_flag |= NCHG;
3476 if (vp->v_usecount == 1 &&
3477 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3478 VATTR_NULL(&vattr);
3479 if (np->n_flag & NACC)
3480 vattr.va_atime = np->n_atim;
3481 if (np->n_flag & NUPD)
3482 vattr.va_mtime = np->n_mtim;
c1cf1e59 3483 (void)VOP_SETATTR(vp, &vattr, nfs_vpcred(vp, ND_WRITE), ap->a_td);
984263bc
MD
3484 }
3485 }
0961aa92 3486 return (VOCALL(fifo_vnode_vops, &ap->a_head));
984263bc
MD
3487}
3488