du - Add an option to generate results based on file size
[dragonfly.git] / sys / vfs / nfs / nfs_bio.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
dc71b7ab 16 * 3. Neither the name of the University nor the names of its contributors
984263bc
MD
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
79e5012e 33 * $FreeBSD: /repoman/r/ncvs/src/sys/nfsclient/nfs_bio.c,v 1.130 2004/04/14 23:23:55 peadar Exp $
984263bc
MD
34 */
35
984263bc
MD
36#include <sys/param.h>
37#include <sys/systm.h>
13dd34d8 38#include <sys/uio.h>
984263bc
MD
39#include <sys/resourcevar.h>
40#include <sys/signalvar.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>
45#include <sys/kernel.h>
e2164e29 46#include <sys/malloc.h>
edb90c22 47#include <sys/mbuf.h>
984263bc
MD
48
49#include <vm/vm.h>
50#include <vm/vm_extern.h>
51#include <vm/vm_page.h>
52#include <vm/vm_object.h>
53#include <vm/vm_pager.h>
54#include <vm/vnode_pager.h>
55
edb90c22 56#include <sys/buf2.h>
165dba55 57#include <sys/thread2.h>
1a54183b 58#include <vm/vm_page2.h>
165dba55 59
1f2de5d4
MD
60#include "rpcv2.h"
61#include "nfsproto.h"
62#include "nfs.h"
63#include "nfsmount.h"
1f2de5d4 64#include "nfsnode.h"
edb90c22
MD
65#include "xdr_subs.h"
66#include "nfsm_subs.h"
67
984263bc 68
54078292
MD
69static struct buf *nfs_getcacheblk(struct vnode *vp, off_t loffset,
70 int size, struct thread *td);
b66959e2 71static int nfs_check_dirent(struct nfs_dirent *dp, int maxlen);
ae8e83e6 72static void nfsiodone_sync(struct bio *bio);
cc7d050e
MD
73static void nfs_readrpc_bio_done(nfsm_info_t info);
74static void nfs_writerpc_bio_done(nfsm_info_t info);
75static void nfs_commitrpc_bio_done(nfsm_info_t info);
984263bc 76
05c073d6
MD
77static __inline
78void
79nfs_knote(struct vnode *vp, int flags)
80{
81 if (flags)
82 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
83}
84
984263bc
MD
85/*
86 * Vnode op for read using bio
87 */
88int
3b568787 89nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag)
984263bc 90{
dadab5e9
MD
91 struct nfsnode *np = VTONFS(vp);
92 int biosize, i;
a63246d1 93 struct buf *bp, *rabp;
984263bc 94 struct vattr vattr;
dadab5e9 95 struct thread *td;
984263bc 96 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
a63246d1 97 off_t lbn, rabn;
54078292
MD
98 off_t raoffset;
99 off_t loffset;
984263bc 100 int seqcount;
a63246d1
MD
101 int nra, error = 0;
102 int boff = 0;
103 size_t n;
984263bc
MD
104
105#ifdef DIAGNOSTIC
106 if (uio->uio_rw != UIO_READ)
107 panic("nfs_read mode");
108#endif
109 if (uio->uio_resid == 0)
110 return (0);
111 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
112 return (EINVAL);
dadab5e9 113 td = uio->uio_td;
984263bc
MD
114
115 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
116 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
3b568787 117 (void)nfs_fsinfo(nmp, vp, td);
984263bc
MD
118 if (vp->v_type != VDIR &&
119 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
120 return (EFBIG);
121 biosize = vp->v_mount->mnt_stat.f_iosize;
dc6a6bd2 122 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / MAXBSIZE);
5a9187cb 123
984263bc
MD
124 /*
125 * For nfs, cache consistency can only be maintained approximately.
126 * Although RFC1094 does not specify the criteria, the following is
127 * believed to be compatible with the reference port.
5a9187cb 128 *
5a9187cb
MD
129 * NFS: If local changes have been made and this is a
130 * directory, the directory must be invalidated and
131 * the attribute cache must be cleared.
132 *
39915bf2
MD
133 * GETATTR is called to synchronize the file size. To
134 * avoid a deadlock again the VM system, we cannot do
135 * this for UIO_NOCOPY reads.
5a9187cb
MD
136 *
137 * If remote changes are detected local data is flushed
138 * and the cache is invalidated.
139 *
5a9187cb
MD
140 * NOTE: In the normal case the attribute cache is not
141 * cleared which means GETATTR may use cached data and
142 * not immediately detect changes made on the server.
984263bc 143 */
e07fef60
MD
144 if ((np->n_flag & NLMODIFIED) && vp->v_type == VDIR) {
145 nfs_invaldir(vp);
87de5057 146 error = nfs_vinvalbuf(vp, V_SAVE, 1);
e07fef60
MD
147 if (error)
148 return (error);
149 np->n_attrstamp = 0;
150 }
39915bf2
MD
151
152 /*
153 * Synchronize the file size when possible. We can't do this without
154 * risking a deadlock if this is NOCOPY read from a vm_fault->getpages
155 * sequence.
156 */
157 if (uio->uio_segflg != UIO_NOCOPY) {
158 error = VOP_GETATTR(vp, &vattr);
159 if (error)
160 return (error);
161 }
8452310f
MD
162
163 /*
164 * This can deadlock getpages/putpages for regular
165 * files. Only do it for directories.
166 */
e07fef60 167 if (np->n_flag & NRMODIFIED) {
8452310f 168 if (vp->v_type == VDIR) {
5a9187cb 169 nfs_invaldir(vp);
8452310f
MD
170 error = nfs_vinvalbuf(vp, V_SAVE, 1);
171 if (error)
172 return (error);
173 np->n_flag &= ~NRMODIFIED;
174 }
984263bc 175 }
a63246d1
MD
176
177 /*
178 * Loop until uio exhausted or we hit EOF
179 */
984263bc 180 do {
a63246d1
MD
181 bp = NULL;
182
984263bc
MD
183 switch (vp->v_type) {
184 case VREG:
185 nfsstats.biocache_reads++;
186 lbn = uio->uio_offset / biosize;
a63246d1 187 boff = uio->uio_offset & (biosize - 1);
39215f4c 188 loffset = lbn * biosize;
984263bc
MD
189
190 /*
191 * Start the read ahead(s), as required.
192 */
edb90c22 193 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp)) {
984263bc
MD
194 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
195 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
196 rabn = lbn + 1 + nra;
39215f4c 197 raoffset = rabn * biosize;
b1c20cfa 198 if (findblk(vp, raoffset, FINDBLK_TEST) == NULL) {
54078292 199 rabp = nfs_getcacheblk(vp, raoffset, biosize, td);
984263bc
MD
200 if (!rabp)
201 return (EINTR);
202 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
10f3fee5
MD
203 rabp->b_cmd = BUF_CMD_READ;
204 vfs_busy_pages(vp, rabp);
edb90c22 205 nfs_asyncio(vp, &rabp->b_bio2);
984263bc
MD
206 } else {
207 brelse(rabp);
208 }
209 }
210 }
211 }
212
213 /*
214 * Obtain the buffer cache block. Figure out the buffer size
215 * when we are at EOF. If we are modifying the size of the
bce6845a 216 * buffer based on an EOF condition we need to hold
984263bc
MD
217 * nfs_rslock() through obtaining the buffer to prevent
218 * a potential writer-appender from messing with n_size.
219 * Otherwise we may accidently truncate the buffer and
220 * lose dirty data.
221 *
222 * Note that bcount is *not* DEV_BSIZE aligned.
223 */
a63246d1
MD
224 if (loffset + boff >= np->n_size) {
225 n = 0;
226 break;
984263bc 227 }
a63246d1 228 bp = nfs_getcacheblk(vp, loffset, biosize, td);
984263bc 229
a63246d1 230 if (bp == NULL)
984263bc
MD
231 return (EINTR);
232
233 /*
234 * If B_CACHE is not set, we must issue the read. If this
235 * fails, we return an error.
236 */
984263bc 237 if ((bp->b_flags & B_CACHE) == 0) {
28953d39
MD
238 bp->b_cmd = BUF_CMD_READ;
239 bp->b_bio2.bio_done = nfsiodone_sync;
240 bp->b_bio2.bio_flags |= BIO_SYNC;
241 vfs_busy_pages(vp, bp);
242 error = nfs_doio(vp, &bp->b_bio2, td);
243 if (error) {
244 brelse(bp);
245 return (error);
246 }
984263bc
MD
247 }
248
249 /*
250 * on is the offset into the current bp. Figure out how many
251 * bytes we can copy out of the bp. Note that bcount is
252 * NOT DEV_BSIZE aligned.
253 *
254 * Then figure out how many bytes we can copy into the uio.
255 */
a63246d1
MD
256 n = biosize - boff;
257 if (n > uio->uio_resid)
258 n = uio->uio_resid;
259 if (loffset + boff + n > np->n_size)
260 n = np->n_size - loffset - boff;
984263bc
MD
261 break;
262 case VLNK:
ded0173f 263 biosize = min(NFS_MAXPATHLEN, np->n_size);
984263bc 264 nfsstats.biocache_readlinks++;
ded0173f 265 bp = nfs_getcacheblk(vp, (off_t)0, biosize, td);
81b5c339 266 if (bp == NULL)
984263bc
MD
267 return (EINTR);
268 if ((bp->b_flags & B_CACHE) == 0) {
28953d39
MD
269 bp->b_cmd = BUF_CMD_READ;
270 bp->b_bio2.bio_done = nfsiodone_sync;
271 bp->b_bio2.bio_flags |= BIO_SYNC;
272 vfs_busy_pages(vp, bp);
273 error = nfs_doio(vp, &bp->b_bio2, td);
274 if (error) {
275 bp->b_flags |= B_ERROR | B_INVAL;
276 brelse(bp);
277 return (error);
278 }
984263bc 279 }
a63246d1
MD
280 n = szmin(uio->uio_resid, (size_t)bp->b_bcount - bp->b_resid);
281 boff = 0;
984263bc
MD
282 break;
283 case VDIR:
284 nfsstats.biocache_readdirs++;
a63246d1
MD
285 if (np->n_direofoffset &&
286 uio->uio_offset >= np->n_direofoffset
287 ) {
288 return (0);
984263bc
MD
289 }
290 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
a63246d1
MD
291 boff = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
292 loffset = uio->uio_offset - boff;
54078292 293 bp = nfs_getcacheblk(vp, loffset, NFS_DIRBLKSIZ, td);
81b5c339 294 if (bp == NULL)
a63246d1 295 return (EINTR);
b66959e2 296
984263bc 297 if ((bp->b_flags & B_CACHE) == 0) {
10f3fee5 298 bp->b_cmd = BUF_CMD_READ;
ae8e83e6
MD
299 bp->b_bio2.bio_done = nfsiodone_sync;
300 bp->b_bio2.bio_flags |= BIO_SYNC;
10f3fee5 301 vfs_busy_pages(vp, bp);
cc7d050e
MD
302 error = nfs_doio(vp, &bp->b_bio2, td);
303 if (error)
984263bc 304 brelse(bp);
984263bc 305 while (error == NFSERR_BAD_COOKIE) {
086c1d7e 306 kprintf("got bad cookie vp %p bp %p\n", vp, bp);
984263bc 307 nfs_invaldir(vp);
87de5057 308 error = nfs_vinvalbuf(vp, 0, 1);
984263bc
MD
309 /*
310 * Yuck! The directory has been modified on the
311 * server. The only way to get the block is by
312 * reading from the beginning to get all the
313 * offset cookies.
314 *
315 * Leave the last bp intact unless there is an error.
316 * Loop back up to the while if the error is another
317 * NFSERR_BAD_COOKIE (double yuch!).
318 */
319 for (i = 0; i <= lbn && !error; i++) {
320 if (np->n_direofoffset
321 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
322 return (0);
54078292
MD
323 bp = nfs_getcacheblk(vp, (off_t)i * NFS_DIRBLKSIZ,
324 NFS_DIRBLKSIZ, td);
984263bc
MD
325 if (!bp)
326 return (EINTR);
327 if ((bp->b_flags & B_CACHE) == 0) {
10f3fee5 328 bp->b_cmd = BUF_CMD_READ;
ae8e83e6
MD
329 bp->b_bio2.bio_done = nfsiodone_sync;
330 bp->b_bio2.bio_flags |= BIO_SYNC;
10f3fee5 331 vfs_busy_pages(vp, bp);
cc7d050e 332 error = nfs_doio(vp, &bp->b_bio2, td);
984263bc
MD
333 /*
334 * no error + B_INVAL == directory EOF,
335 * use the block.
336 */
337 if (error == 0 && (bp->b_flags & B_INVAL))
338 break;
339 }
340 /*
341 * An error will throw away the block and the
342 * for loop will break out. If no error and this
343 * is not the block we want, we throw away the
344 * block and go for the next one via the for loop.
345 */
346 if (error || i < lbn)
347 brelse(bp);
348 }
349 }
350 /*
351 * The above while is repeated if we hit another cookie
352 * error. If we hit an error and it wasn't a cookie error,
353 * we give up.
354 */
355 if (error)
356 return (error);
357 }
358
359 /*
360 * If not eof and read aheads are enabled, start one.
361 * (You need the current block first, so that you have the
362 * directory offset cookie of the next block.)
363 */
edb90c22 364 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp) &&
984263bc
MD
365 (bp->b_flags & B_INVAL) == 0 &&
366 (np->n_direofoffset == 0 ||
54078292 367 loffset + NFS_DIRBLKSIZ < np->n_direofoffset) &&
b1c20cfa
MD
368 findblk(vp, loffset + NFS_DIRBLKSIZ, FINDBLK_TEST) == NULL
369 ) {
54078292
MD
370 rabp = nfs_getcacheblk(vp, loffset + NFS_DIRBLKSIZ,
371 NFS_DIRBLKSIZ, td);
984263bc
MD
372 if (rabp) {
373 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
10f3fee5
MD
374 rabp->b_cmd = BUF_CMD_READ;
375 vfs_busy_pages(vp, rabp);
edb90c22 376 nfs_asyncio(vp, &rabp->b_bio2);
984263bc
MD
377 } else {
378 brelse(rabp);
379 }
380 }
381 }
382 /*
383 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
384 * chopped for the EOF condition, we cannot tell how large
385 * NFS directories are going to be until we hit EOF. So
386 * an NFS directory buffer is *not* chopped to its EOF. Now,
387 * it just so happens that b_resid will effectively chop it
388 * to EOF. *BUT* this information is lost if the buffer goes
389 * away and is reconstituted into a B_CACHE state ( due to
390 * being VMIO ) later. So we keep track of the directory eof
bce6845a 391 * in np->n_direofoffset and chop it off as an extra step
984263bc 392 * right here.
c0b6e0f5
MD
393 *
394 * NOTE: boff could already be beyond EOF.
984263bc 395 */
c0b6e0f5
MD
396 if ((size_t)boff > NFS_DIRBLKSIZ - bp->b_resid) {
397 n = 0;
398 } else {
399 n = szmin(uio->uio_resid,
400 NFS_DIRBLKSIZ - bp->b_resid - (size_t)boff);
401 }
a63246d1
MD
402 if (np->n_direofoffset &&
403 n > (size_t)(np->n_direofoffset - uio->uio_offset)) {
404 n = (size_t)(np->n_direofoffset - uio->uio_offset);
405 }
984263bc
MD
406 break;
407 default:
086c1d7e 408 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type);
a63246d1 409 n = 0;
984263bc 410 break;
0fdb7d01 411 }
984263bc 412
984263bc
MD
413 switch (vp->v_type) {
414 case VREG:
01f31ab3 415 if (n > 0)
44480e31 416 error = uiomovebp(bp, bp->b_data + boff, n, uio);
984263bc
MD
417 break;
418 case VLNK:
01f31ab3 419 if (n > 0)
44480e31 420 error = uiomovebp(bp, bp->b_data + boff, n, uio);
984263bc
MD
421 n = 0;
422 break;
423 case VDIR:
01f31ab3
JS
424 if (n > 0) {
425 off_t old_off = uio->uio_offset;
426 caddr_t cpos, epos;
427 struct nfs_dirent *dp;
428
b66959e2
MD
429 /*
430 * We are casting cpos to nfs_dirent, it must be
431 * int-aligned.
432 */
a63246d1 433 if (boff & 3) {
b66959e2
MD
434 error = EINVAL;
435 break;
436 }
437
a63246d1
MD
438 cpos = bp->b_data + boff;
439 epos = bp->b_data + boff + n;
01f31ab3
JS
440 while (cpos < epos && error == 0 && uio->uio_resid > 0) {
441 dp = (struct nfs_dirent *)cpos;
b66959e2
MD
442 error = nfs_check_dirent(dp, (int)(epos - cpos));
443 if (error)
444 break;
01f31ab3 445 if (vop_write_dirent(&error, uio, dp->nfs_ino,
b66959e2 446 dp->nfs_type, dp->nfs_namlen, dp->nfs_name)) {
01f31ab3 447 break;
b66959e2 448 }
01f31ab3
JS
449 cpos += dp->nfs_reclen;
450 }
451 n = 0;
a63246d1
MD
452 if (error == 0) {
453 uio->uio_offset = old_off + cpos -
454 bp->b_data - boff;
455 }
01f31ab3 456 }
984263bc
MD
457 break;
458 default:
086c1d7e 459 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type);
984263bc 460 }
a63246d1
MD
461 if (bp)
462 brelse(bp);
984263bc
MD
463 } while (error == 0 && uio->uio_resid > 0 && n > 0);
464 return (error);
465}
466
b66959e2
MD
467/*
468 * Userland can supply any 'seek' offset when reading a NFS directory.
469 * Validate the structure so we don't panic the kernel. Note that
470 * the element name is nul terminated and the nul is not included
471 * in nfs_namlen.
472 */
473static
474int
475nfs_check_dirent(struct nfs_dirent *dp, int maxlen)
476{
477 int nfs_name_off = offsetof(struct nfs_dirent, nfs_name[0]);
478
479 if (nfs_name_off >= maxlen)
480 return (EINVAL);
481 if (dp->nfs_reclen < nfs_name_off || dp->nfs_reclen > maxlen)
482 return (EINVAL);
483 if (nfs_name_off + dp->nfs_namlen >= dp->nfs_reclen)
484 return (EINVAL);
485 if (dp->nfs_reclen & 3)
486 return (EINVAL);
487 return (0);
488}
489
984263bc
MD
490/*
491 * Vnode op for write using bio
e851b29e
CP
492 *
493 * nfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
494 * struct ucred *a_cred)
984263bc
MD
495 */
496int
e851b29e 497nfs_write(struct vop_write_args *ap)
984263bc 498{
984263bc 499 struct uio *uio = ap->a_uio;
dadab5e9 500 struct thread *td = uio->uio_td;
984263bc
MD
501 struct vnode *vp = ap->a_vp;
502 struct nfsnode *np = VTONFS(vp);
984263bc
MD
503 int ioflag = ap->a_ioflag;
504 struct buf *bp;
505 struct vattr vattr;
506 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
54078292 507 off_t loffset;
a63246d1
MD
508 int boff, bytes;
509 int error = 0;
984263bc 510 int haverslock = 0;
81b5c339
MD
511 int bcount;
512 int biosize;
8452310f 513 int trivial;
05c073d6 514 int kflags = 0;
984263bc
MD
515
516#ifdef DIAGNOSTIC
517 if (uio->uio_rw != UIO_WRITE)
518 panic("nfs_write mode");
7b95be2a 519 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
984263bc
MD
520 panic("nfs_write proc");
521#endif
522 if (vp->v_type != VREG)
523 return (EIO);
104db2fb
MD
524
525 lwkt_gettoken(&nmp->nm_token);
526
984263bc
MD
527 if (np->n_flag & NWRITEERR) {
528 np->n_flag &= ~NWRITEERR;
104db2fb 529 lwkt_reltoken(&nmp->nm_token);
984263bc
MD
530 return (np->n_error);
531 }
532 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
104db2fb 533 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
3b568787 534 (void)nfs_fsinfo(nmp, vp, td);
104db2fb 535 }
984263bc
MD
536
537 /*
538 * Synchronously flush pending buffers if we are in synchronous
539 * mode or if we are appending.
540 */
541 if (ioflag & (IO_APPEND | IO_SYNC)) {
5a9187cb 542 if (np->n_flag & NLMODIFIED) {
984263bc 543 np->n_attrstamp = 0;
5a9187cb 544 error = nfs_flush(vp, MNT_WAIT, td, 0);
87de5057 545 /* error = nfs_vinvalbuf(vp, V_SAVE, 1); */
984263bc 546 if (error)
104db2fb 547 goto done;
984263bc
MD
548 }
549 }
550
551 /*
552 * If IO_APPEND then load uio_offset. We restart here if we cannot
553 * get the append lock.
554 */
555restart:
556 if (ioflag & IO_APPEND) {
557 np->n_attrstamp = 0;
87de5057 558 error = VOP_GETATTR(vp, &vattr);
984263bc 559 if (error)
104db2fb 560 goto done;
984263bc
MD
561 uio->uio_offset = np->n_size;
562 }
563
104db2fb
MD
564 if (uio->uio_offset < 0) {
565 error = EINVAL;
566 goto done;
567 }
568 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) {
569 error = EFBIG;
570 goto done;
571 }
572 if (uio->uio_resid == 0) {
573 error = 0;
574 goto done;
575 }
984263bc
MD
576
577 /*
578 * We need to obtain the rslock if we intend to modify np->n_size
579 * in order to guarentee the append point with multiple contending
580 * writers, to guarentee that no other appenders modify n_size
581 * while we are trying to obtain a truncated buffer (i.e. to avoid
582 * accidently truncating data written by another appender due to
583 * the race), and to ensure that the buffer is populated prior to
584 * our extending of the file. We hold rslock through the entire
585 * operation.
586 *
587 * Note that we do not synchronize the case where someone truncates
588 * the file while we are appending to it because attempting to lock
589 * this case may deadlock other parts of the system unexpectedly.
590 */
591 if ((ioflag & IO_APPEND) ||
592 uio->uio_offset + uio->uio_resid > np->n_size) {
2313ec23 593 switch(nfs_rslock(np)) {
984263bc
MD
594 case ENOLCK:
595 goto restart;
596 /* not reached */
597 case EINTR:
598 case ERESTART:
104db2fb
MD
599 error = EINTR;
600 goto done;
984263bc
MD
601 /* not reached */
602 default:
603 break;
604 }
605 haverslock = 1;
606 }
607
608 /*
609 * Maybe this should be above the vnode op call, but so long as
610 * file servers have no limits, i don't think it matters
611 */
8452310f 612 if (td && td->td_proc && uio->uio_offset + uio->uio_resid >
dadab5e9 613 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
7278a846 614 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
984263bc 615 if (haverslock)
2313ec23 616 nfs_rsunlock(np);
104db2fb
MD
617 error = EFBIG;
618 goto done;
984263bc
MD
619 }
620
621 biosize = vp->v_mount->mnt_stat.f_iosize;
622
623 do {
984263bc 624 nfsstats.biocache_writes++;
a63246d1
MD
625 boff = uio->uio_offset & (biosize-1);
626 loffset = uio->uio_offset - boff;
627 bytes = (int)szmin((unsigned)(biosize - boff), uio->uio_resid);
984263bc
MD
628again:
629 /*
630 * Handle direct append and file extension cases, calculate
a63246d1
MD
631 * unaligned buffer size. When extending B_CACHE will be
632 * set if possible. See UIO_NOCOPY note below.
984263bc 633 */
a63246d1
MD
634 if (uio->uio_offset + bytes > np->n_size) {
635 np->n_flag |= NLMODIFIED;
8452310f
MD
636 trivial = (uio->uio_segflg != UIO_NOCOPY &&
637 uio->uio_offset <= np->n_size);
638 nfs_meta_setsize(vp, td, uio->uio_offset + bytes,
639 trivial);
05c073d6 640 kflags |= NOTE_EXTEND;
984263bc 641 }
8452310f 642 bp = nfs_getcacheblk(vp, loffset, biosize, td);
81b5c339 643 if (bp == NULL) {
984263bc
MD
644 error = EINTR;
645 break;
646 }
647
a63246d1
MD
648 /*
649 * Actual bytes in buffer which we care about
650 */
651 if (loffset + biosize < np->n_size)
652 bcount = biosize;
653 else
654 bcount = (int)(np->n_size - loffset);
655
984263bc 656 /*
28953d39 657 * Avoid a read by setting B_CACHE where the data we
a63246d1
MD
658 * intend to write covers the entire buffer. Note
659 * that the buffer may have been set to B_CACHE by
660 * nfs_meta_setsize() above or otherwise inherited the
661 * flag, but if B_CACHE isn't set the buffer may be
662 * uninitialized and must be zero'd to accomodate
663 * future seek+write's.
984263bc 664 *
28953d39 665 * See the comments in kern/vfs_bio.c's getblk() for
984263bc
MD
666 * more information.
667 *
8aa7625b
MD
668 * When doing a UIO_NOCOPY write the buffer is not
669 * overwritten and we cannot just set B_CACHE unconditionally
670 * for full-block writes.
984263bc 671 */
a63246d1
MD
672 if (boff == 0 && bytes == biosize &&
673 uio->uio_segflg != UIO_NOCOPY) {
984263bc
MD
674 bp->b_flags |= B_CACHE;
675 bp->b_flags &= ~(B_ERROR | B_INVAL);
676 }
677
28953d39
MD
678 /*
679 * b_resid may be set due to file EOF if we extended out.
680 * The NFS bio code will zero the difference anyway so
681 * just acknowledged the fact and set b_resid to 0.
682 */
984263bc 683 if ((bp->b_flags & B_CACHE) == 0) {
10f3fee5 684 bp->b_cmd = BUF_CMD_READ;
ae8e83e6
MD
685 bp->b_bio2.bio_done = nfsiodone_sync;
686 bp->b_bio2.bio_flags |= BIO_SYNC;
10f3fee5 687 vfs_busy_pages(vp, bp);
cc7d050e 688 error = nfs_doio(vp, &bp->b_bio2, td);
984263bc
MD
689 if (error) {
690 brelse(bp);
691 break;
692 }
28953d39 693 bp->b_resid = 0;
984263bc 694 }
5a9187cb 695 np->n_flag |= NLMODIFIED;
05c073d6 696 kflags |= NOTE_WRITE;
984263bc
MD
697
698 /*
699 * If dirtyend exceeds file size, chop it down. This should
700 * not normally occur but there is an append race where it
bce6845a 701 * might occur XXX, so we log it.
984263bc
MD
702 *
703 * If the chopping creates a reverse-indexed or degenerate
704 * situation with dirtyoff/end, we 0 both of them.
705 */
984263bc 706 if (bp->b_dirtyend > bcount) {
bce6845a 707 kprintf("NFS append race @%08llx:%d\n",
973c11b9 708 (long long)bp->b_bio2.bio_offset,
984263bc
MD
709 bp->b_dirtyend - bcount);
710 bp->b_dirtyend = bcount;
711 }
712
713 if (bp->b_dirtyoff >= bp->b_dirtyend)
714 bp->b_dirtyoff = bp->b_dirtyend = 0;
715
716 /*
717 * If the new write will leave a contiguous dirty
718 * area, just update the b_dirtyoff and b_dirtyend,
719 * otherwise force a write rpc of the old dirty area.
720 *
bce6845a 721 * While it is possible to merge discontiguous writes due to
984263bc 722 * our having a B_CACHE buffer ( and thus valid read data
bce6845a 723 * for the hole), we don't because it could lead to
984263bc
MD
724 * significant cache coherency problems with multiple clients,
725 * especially if locking is implemented later on.
726 *
727 * as an optimization we could theoretically maintain
728 * a linked list of discontinuous areas, but we would still
729 * have to commit them separately so there isn't much
730 * advantage to it except perhaps a bit of asynchronization.
731 */
984263bc 732 if (bp->b_dirtyend > 0 &&
a63246d1
MD
733 (boff > bp->b_dirtyend ||
734 (boff + bytes) < bp->b_dirtyoff)
735 ) {
62cfda27 736 if (bwrite(bp) == EINTR) {
984263bc
MD
737 error = EINTR;
738 break;
739 }
740 goto again;
741 }
742
44480e31 743 error = uiomovebp(bp, bp->b_data + boff, bytes, uio);
984263bc
MD
744
745 /*
746 * Since this block is being modified, it must be written
747 * again and not just committed. Since write clustering does
748 * not work for the stage 1 data write, only the stage 2
749 * commit rpc, we have to clear B_CLUSTEROK as well.
750 */
751 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
752
753 if (error) {
984263bc
MD
754 brelse(bp);
755 break;
756 }
757
758 /*
bce6845a 759 * Only update dirtyoff/dirtyend if not a degenerate
984263bc 760 * condition.
1a54183b
MD
761 *
762 * The underlying VM pages have been marked valid by
763 * virtue of acquiring the bp. Because the entire buffer
764 * is marked dirty we do not have to worry about cleaning
765 * out the related dirty bits (and wouldn't really know
766 * how to deal with byte ranges anyway)
984263bc 767 */
a63246d1 768 if (bytes) {
984263bc 769 if (bp->b_dirtyend > 0) {
a63246d1
MD
770 bp->b_dirtyoff = imin(boff, bp->b_dirtyoff);
771 bp->b_dirtyend = imax(boff + bytes,
772 bp->b_dirtyend);
984263bc 773 } else {
a63246d1
MD
774 bp->b_dirtyoff = boff;
775 bp->b_dirtyend = boff + bytes;
984263bc 776 }
984263bc 777 }
984263bc
MD
778
779 /*
780 * If the lease is non-cachable or IO_SYNC do bwrite().
781 *
782 * IO_INVAL appears to be unused. The idea appears to be
783 * to turn off caching in this case. Very odd. XXX
a482a28a
MD
784 *
785 * If nfs_async is set bawrite() will use an unstable write
786 * (build dirty bufs on the server), so we might as well
787 * push it out with bawrite(). If nfs_async is not set we
788 * use bdwrite() to cache dirty bufs on the client.
984263bc 789 */
a63246d1 790 if (ioflag & IO_SYNC) {
984263bc
MD
791 if (ioflag & IO_INVAL)
792 bp->b_flags |= B_NOCACHE;
62cfda27 793 error = bwrite(bp);
984263bc
MD
794 if (error)
795 break;
a63246d1 796 } else if (boff + bytes == biosize && nfs_async) {
a482a28a 797 bawrite(bp);
984263bc
MD
798 } else {
799 bdwrite(bp);
800 }
a63246d1 801 } while (uio->uio_resid > 0 && bytes > 0);
984263bc
MD
802
803 if (haverslock)
2313ec23 804 nfs_rsunlock(np);
984263bc 805
104db2fb 806done:
05c073d6 807 nfs_knote(vp, kflags);
104db2fb 808 lwkt_reltoken(&nmp->nm_token);
984263bc
MD
809 return (error);
810}
811
812/*
813 * Get an nfs cache block.
814 *
815 * Allocate a new one if the block isn't currently in the cache
816 * and return the block marked busy. If the calling process is
817 * interrupted by a signal for an interruptible mount point, return
818 * NULL.
819 *
820 * The caller must carefully deal with the possible B_INVAL state of
edb90c22 821 * the buffer. nfs_startio() clears B_INVAL (and nfs_asyncio() clears it
984263bc
MD
822 * indirectly), so synchronous reads can be issued without worrying about
823 * the B_INVAL state. We have to be a little more careful when dealing
824 * with writes (see comments in nfs_write()) when extending a file past
825 * its EOF.
826 */
827static struct buf *
54078292 828nfs_getcacheblk(struct vnode *vp, off_t loffset, int size, struct thread *td)
984263bc 829{
40393ded 830 struct buf *bp;
984263bc
MD
831 struct mount *mp;
832 struct nfsmount *nmp;
833
834 mp = vp->v_mount;
835 nmp = VFSTONFS(mp);
836
837 if (nmp->nm_flag & NFSMNT_INT) {
4b958e7b 838 bp = getblk(vp, loffset, size, GETBLK_PCATCH, 0);
81b5c339 839 while (bp == NULL) {
60233e58 840 if (nfs_sigintr(nmp, NULL, td))
81b5c339 841 return (NULL);
54078292 842 bp = getblk(vp, loffset, size, 0, 2 * hz);
984263bc
MD
843 }
844 } else {
54078292 845 bp = getblk(vp, loffset, size, 0, 0);
984263bc
MD
846 }
847
81b5c339 848 /*
54078292
MD
849 * bio2, the 'device' layer. Since BIOs use 64 bit byte offsets
850 * now, no translation is necessary.
81b5c339 851 */
54078292 852 bp->b_bio2.bio_offset = loffset;
984263bc
MD
853 return (bp);
854}
855
856/*
857 * Flush and invalidate all dirty buffers. If another process is already
858 * doing the flush, just wait for completion.
859 */
860int
87de5057 861nfs_vinvalbuf(struct vnode *vp, int flags, int intrflg)
984263bc 862{
40393ded 863 struct nfsnode *np = VTONFS(vp);
984263bc
MD
864 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
865 int error = 0, slpflag, slptimeo;
87de5057 866 thread_t td = curthread;
984263bc 867
5fd012e0 868 if (vp->v_flag & VRECLAIMED)
984263bc 869 return (0);
984263bc
MD
870
871 if ((nmp->nm_flag & NFSMNT_INT) == 0)
872 intrflg = 0;
873 if (intrflg) {
874 slpflag = PCATCH;
875 slptimeo = 2 * hz;
876 } else {
877 slpflag = 0;
878 slptimeo = 0;
879 }
880 /*
881 * First wait for any other process doing a flush to complete.
882 */
883 while (np->n_flag & NFLUSHINPROG) {
884 np->n_flag |= NFLUSHWANT;
377d4740 885 error = tsleep((caddr_t)&np->n_flag, 0, "nfsvinval", slptimeo);
87de5057 886 if (error && intrflg && nfs_sigintr(nmp, NULL, td))
984263bc
MD
887 return (EINTR);
888 }
889
890 /*
891 * Now, flush as required.
892 */
893 np->n_flag |= NFLUSHINPROG;
87de5057 894 error = vinvalbuf(vp, flags, slpflag, 0);
984263bc 895 while (error) {
87de5057 896 if (intrflg && nfs_sigintr(nmp, NULL, td)) {
984263bc
MD
897 np->n_flag &= ~NFLUSHINPROG;
898 if (np->n_flag & NFLUSHWANT) {
899 np->n_flag &= ~NFLUSHWANT;
900 wakeup((caddr_t)&np->n_flag);
901 }
902 return (EINTR);
903 }
87de5057 904 error = vinvalbuf(vp, flags, 0, slptimeo);
984263bc 905 }
5a9187cb 906 np->n_flag &= ~(NLMODIFIED | NFLUSHINPROG);
984263bc
MD
907 if (np->n_flag & NFLUSHWANT) {
908 np->n_flag &= ~NFLUSHWANT;
909 wakeup((caddr_t)&np->n_flag);
910 }
911 return (0);
912}
913
914/*
edb90c22
MD
915 * Return true (non-zero) if the txthread and rxthread are operational
916 * and we do not already have too many not-yet-started BIO's built up.
984263bc
MD
917 */
918int
edb90c22
MD
919nfs_asyncok(struct nfsmount *nmp)
920{
cc7d050e 921 return (nmp->nm_bioqlen < nfs_maxasyncbio &&
f8565b0f 922 nmp->nm_bioqlen < nmp->nm_maxasync_scaled / NFS_ASYSCALE &&
edb90c22
MD
923 nmp->nm_rxstate <= NFSSVC_PENDING &&
924 nmp->nm_txstate <= NFSSVC_PENDING);
925}
926
927/*
928 * The read-ahead code calls this to queue a bio to the txthread.
929 *
930 * We don't touch the bio otherwise... that is, we do not even
931 * construct or send the initial rpc. The txthread will do it
932 * for us.
f8565b0f
MD
933 *
934 * NOTE! nm_bioqlen is not decremented until the request completes,
935 * so it does not reflect the number of bio's on bioq.
edb90c22
MD
936 */
937void
938nfs_asyncio(struct vnode *vp, struct bio *bio)
984263bc 939{
81b5c339 940 struct buf *bp = bio->bio_buf;
edb90c22 941 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
984263bc 942
81b5c339 943 KKASSERT(vp->v_tag == VT_NFS);
52e1cf57 944 BUF_KERNPROC(bp);
c504e38e
MD
945
946 /*
947 * Shortcut swap cache (not done automatically because we are not
948 * using bread()).
949 */
950 if (vn_cache_strategy(vp, bio))
951 return;
952
52e1cf57 953 bio->bio_driver_info = vp;
f8565b0f 954 crit_enter();
52e1cf57 955 TAILQ_INSERT_TAIL(&nmp->nm_bioq, bio, bio_act);
f8565b0f
MD
956 atomic_add_int(&nmp->nm_bioqlen, 1);
957 crit_exit();
52e1cf57 958 nfssvc_iod_writer_wakeup(nmp);
984263bc
MD
959}
960
961/*
5e6f1ca5 962 * nfs_doio() - Execute a BIO operation synchronously. The BIO will be
cc7d050e
MD
963 * completed and its error returned. The caller is responsible
964 * for brelse()ing it. ONLY USE FOR BIO_SYNC IOs! Otherwise
965 * our error probe will be against an invalid pointer.
edb90c22 966 *
cc7d050e 967 * nfs_startio()- Execute a BIO operation assynchronously.
dadab5e9 968 *
cc7d050e
MD
969 * NOTE: nfs_asyncio() is used to initiate an asynchronous BIO operation,
970 * which basically just queues it to the txthread. nfs_startio()
971 * actually initiates the I/O AFTER it has gotten to the txthread.
ae8e83e6 972 *
cc7d050e 973 * NOTE: td might be NULL.
cb1cf930
MD
974 *
975 * NOTE: Caller has already busied the I/O.
984263bc 976 */
edb90c22
MD
977void
978nfs_startio(struct vnode *vp, struct bio *bio, struct thread *td)
cc7d050e
MD
979{
980 struct buf *bp = bio->bio_buf;
cc7d050e
MD
981
982 KKASSERT(vp->v_tag == VT_NFS);
cc7d050e
MD
983
984 /*
985 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
986 * do this here so we do not have to do it in all the code that
987 * calls us.
988 */
989 bp->b_flags &= ~(B_ERROR | B_INVAL);
990
991 KASSERT(bp->b_cmd != BUF_CMD_DONE,
992 ("nfs_doio: bp %p already marked done!", bp));
993
994 if (bp->b_cmd == BUF_CMD_READ) {
995 switch (vp->v_type) {
996 case VREG:
997 nfsstats.read_bios++;
998 nfs_readrpc_bio(vp, bio);
999 break;
1000 case VLNK:
1001#if 0
1002 bio->bio_offset = 0;
1003 nfsstats.readlink_bios++;
1004 nfs_readlinkrpc_bio(vp, bio);
1005#else
1006 nfs_doio(vp, bio, td);
1007#endif
1008 break;
1009 case VDIR:
1010 /*
1011 * NOTE: If nfs_readdirplusrpc_bio() is requested but
1012 * not supported, it will chain to
1013 * nfs_readdirrpc_bio().
1014 */
1015#if 0
1016 nfsstats.readdir_bios++;
1017 uiop->uio_offset = bio->bio_offset;
1018 if (nmp->nm_flag & NFSMNT_RDIRPLUS)
1019 nfs_readdirplusrpc_bio(vp, bio);
1020 else
1021 nfs_readdirrpc_bio(vp, bio);
1022#else
1023 nfs_doio(vp, bio, td);
1024#endif
1025 break;
1026 default:
1027 kprintf("nfs_doio: type %x unexpected\n",vp->v_type);
1028 bp->b_flags |= B_ERROR;
1029 bp->b_error = EINVAL;
1030 biodone(bio);
1031 break;
1032 }
1033 } else {
1034 /*
1035 * If we only need to commit, try to commit. If this fails
1036 * it will chain through to the write. Basically all the logic
1037 * in nfs_doio() is replicated.
1038 */
1039 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
1040 if (bp->b_flags & B_NEEDCOMMIT)
1041 nfs_commitrpc_bio(vp, bio);
1042 else
1043 nfs_writerpc_bio(vp, bio);
1044 }
1045}
1046
1047int
1048nfs_doio(struct vnode *vp, struct bio *bio, struct thread *td)
984263bc 1049{
81b5c339 1050 struct buf *bp = bio->bio_buf;
984263bc 1051 struct uio *uiop;
984263bc
MD
1052 struct nfsnode *np;
1053 struct nfsmount *nmp;
cc7d050e
MD
1054 int error = 0;
1055 int iomode, must_commit;
28953d39 1056 size_t n;
984263bc
MD
1057 struct uio uio;
1058 struct iovec io;
1059
c504e38e
MD
1060#if 0
1061 /*
1062 * Shortcut swap cache (not done automatically because we are not
1063 * using bread()).
1064 *
1065 * XXX The biowait is a hack until we can figure out how to stop a
1066 * biodone chain when a middle element is BIO_SYNC. BIO_SYNC is
1067 * set so the bp shouldn't get ripped out from under us. The only
1068 * use-cases are fully synchronous I/O cases.
1069 *
1070 * XXX This is having problems, give up for now.
1071 */
1072 if (vn_cache_strategy(vp, bio)) {
c504e38e
MD
1073 error = biowait(&bio->bio_buf->b_bio1, "nfsrsw");
1074 return (error);
1075 }
1076#endif
1077
81b5c339 1078 KKASSERT(vp->v_tag == VT_NFS);
984263bc
MD
1079 np = VTONFS(vp);
1080 nmp = VFSTONFS(vp->v_mount);
1081 uiop = &uio;
1082 uiop->uio_iov = &io;
1083 uiop->uio_iovcnt = 1;
1084 uiop->uio_segflg = UIO_SYSSPACE;
dadab5e9 1085 uiop->uio_td = td;
984263bc
MD
1086
1087 /*
1088 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
1089 * do this here so we do not have to do it in all the code that
1090 * calls us.
1091 */
1092 bp->b_flags &= ~(B_ERROR | B_INVAL);
1093
bce6845a 1094 KASSERT(bp->b_cmd != BUF_CMD_DONE,
10f3fee5
MD
1095 ("nfs_doio: bp %p already marked done!", bp));
1096
1097 if (bp->b_cmd == BUF_CMD_READ) {
e54488bb 1098 io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
984263bc
MD
1099 io.iov_base = bp->b_data;
1100 uiop->uio_rw = UIO_READ;
1101
1102 switch (vp->v_type) {
1103 case VREG:
28953d39
MD
1104 /*
1105 * When reading from a regular file zero-fill any residual.
1106 * Note that this residual has nothing to do with NFS short
1107 * reads, which nfs_readrpc_uio() will handle for us.
1108 *
1109 * We have to do this because when we are write extending
1110 * a file the server may not have the same notion of
1111 * filesize as we do. Our BIOs should already be sized
1112 * (b_bcount) to account for the file EOF.
1113 */
984263bc 1114 nfsstats.read_bios++;
edb90c22
MD
1115 uiop->uio_offset = bio->bio_offset;
1116 error = nfs_readrpc_uio(vp, uiop);
28953d39
MD
1117 if (error == 0 && uiop->uio_resid) {
1118 n = (size_t)bp->b_bcount - uiop->uio_resid;
1119 bzero(bp->b_data + n, bp->b_bcount - n);
1120 uiop->uio_resid = 0;
984263bc 1121 }
dadab5e9 1122 if (td && td->td_proc && (vp->v_flag & VTEXT) &&
e07fef60 1123 np->n_mtime != np->n_vattr.va_mtime.tv_sec) {
984263bc 1124 uprintf("Process killed due to text file modification\n");
84204577 1125 ksignal(td->td_proc, SIGKILL);
984263bc
MD
1126 }
1127 break;
1128 case VLNK:
81b5c339 1129 uiop->uio_offset = 0;
984263bc 1130 nfsstats.readlink_bios++;
cc7d050e 1131 error = nfs_readlinkrpc_uio(vp, uiop);
984263bc
MD
1132 break;
1133 case VDIR:
1134 nfsstats.readdir_bios++;
54078292 1135 uiop->uio_offset = bio->bio_offset;
984263bc 1136 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
cc7d050e 1137 error = nfs_readdirplusrpc_uio(vp, uiop);
984263bc
MD
1138 if (error == NFSERR_NOTSUPP)
1139 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1140 }
1141 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
cc7d050e 1142 error = nfs_readdirrpc_uio(vp, uiop);
984263bc
MD
1143 /*
1144 * end-of-directory sets B_INVAL but does not generate an
1145 * error.
1146 */
1147 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1148 bp->b_flags |= B_INVAL;
1149 break;
1150 default:
086c1d7e 1151 kprintf("nfs_doio: type %x unexpected\n",vp->v_type);
984263bc 1152 break;
0fdb7d01 1153 }
984263bc
MD
1154 if (error) {
1155 bp->b_flags |= B_ERROR;
1156 bp->b_error = error;
1157 }
cc7d050e 1158 bp->b_resid = uiop->uio_resid;
984263bc 1159 } else {
bce6845a 1160 /*
cb1cf930
MD
1161 * If we only need to commit, try to commit.
1162 *
1163 * NOTE: The I/O has already been staged for the write and
1164 * its pages busied, so b_dirtyoff/end is valid.
984263bc 1165 */
10f3fee5 1166 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
984263bc
MD
1167 if (bp->b_flags & B_NEEDCOMMIT) {
1168 int retv;
1169 off_t off;
1170
54078292 1171 off = bio->bio_offset + bp->b_dirtyoff;
cc7d050e
MD
1172 retv = nfs_commitrpc_uio(vp, off,
1173 bp->b_dirtyend - bp->b_dirtyoff,
1174 td);
984263bc
MD
1175 if (retv == 0) {
1176 bp->b_dirtyoff = bp->b_dirtyend = 0;
1177 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1178 bp->b_resid = 0;
81b5c339 1179 biodone(bio);
cc7d050e 1180 return(0);
984263bc
MD
1181 }
1182 if (retv == NFSERR_STALEWRITEVERF) {
81b5c339 1183 nfs_clearcommit(vp->v_mount);
984263bc
MD
1184 }
1185 }
1186
1187 /*
1188 * Setup for actual write
1189 */
54078292
MD
1190 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
1191 bp->b_dirtyend = np->n_size - bio->bio_offset;
984263bc
MD
1192
1193 if (bp->b_dirtyend > bp->b_dirtyoff) {
1194 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1195 - bp->b_dirtyoff;
54078292 1196 uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
984263bc
MD
1197 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1198 uiop->uio_rw = UIO_WRITE;
1199 nfsstats.write_bios++;
1200
ae8e83e6 1201 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0)
984263bc
MD
1202 iomode = NFSV3WRITE_UNSTABLE;
1203 else
1204 iomode = NFSV3WRITE_FILESYNC;
1205
cc7d050e
MD
1206 must_commit = 0;
1207 error = nfs_writerpc_uio(vp, uiop, &iomode, &must_commit);
984263bc
MD
1208
1209 /*
8ae5c7e0
MD
1210 * We no longer try to use kern/vfs_bio's cluster code to
1211 * cluster commits, so B_CLUSTEROK is no longer set with
1212 * B_NEEDCOMMIT. The problem is that a vfs_busy_pages()
1213 * may have to clear B_NEEDCOMMIT if it finds underlying
1214 * pages have been redirtied through a memory mapping
1215 * and doing this on a clustered bp will probably cause
1216 * a panic, plus the flag in the underlying NFS bufs
1217 * making up the cluster bp will not be properly cleared.
984263bc 1218 */
984263bc
MD
1219 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1220 bp->b_flags |= B_NEEDCOMMIT;
8ae5c7e0
MD
1221#if 0
1222 /* XXX do not enable commit clustering */
984263bc
MD
1223 if (bp->b_dirtyoff == 0
1224 && bp->b_dirtyend == bp->b_bcount)
1225 bp->b_flags |= B_CLUSTEROK;
8ae5c7e0 1226#endif
984263bc
MD
1227 } else {
1228 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1229 }
984263bc
MD
1230
1231 /*
1232 * For an interrupted write, the buffer is still valid
1233 * and the write hasn't been pushed to the server yet,
1234 * so we can't set B_ERROR and report the interruption
ae8e83e6 1235 * by setting B_EINTR. For the async case, B_EINTR
984263bc
MD
1236 * is not relevant, so the rpc attempt is essentially
1237 * a noop. For the case of a V3 write rpc not being
1238 * committed to stable storage, the block is still
1239 * dirty and requires either a commit rpc or another
1240 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1241 * the block is reused. This is indicated by setting
1242 * the B_DELWRI and B_NEEDCOMMIT flags.
1243 *
1244 * If the buffer is marked B_PAGING, it does not reside on
1245 * the vp's paging queues so we cannot call bdirty(). The
1246 * bp in this case is not an NFS cache block so we should
1247 * be safe. XXX
1248 */
1249 if (error == EINTR
1250 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
165dba55 1251 crit_enter();
984263bc 1252 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
10f3fee5 1253 if ((bp->b_flags & B_PAGING) == 0)
984263bc 1254 bdirty(bp);
ae8e83e6 1255 if (error)
984263bc 1256 bp->b_flags |= B_EINTR;
165dba55 1257 crit_exit();
984263bc
MD
1258 } else {
1259 if (error) {
1260 bp->b_flags |= B_ERROR;
1261 bp->b_error = np->n_error = error;
1262 np->n_flag |= NWRITEERR;
1263 }
1264 bp->b_dirtyoff = bp->b_dirtyend = 0;
1265 }
cc7d050e
MD
1266 if (must_commit)
1267 nfs_clearcommit(vp->v_mount);
1268 bp->b_resid = uiop->uio_resid;
984263bc
MD
1269 } else {
1270 bp->b_resid = 0;
984263bc
MD
1271 }
1272 }
cc7d050e
MD
1273
1274 /*
1275 * I/O was run synchronously, biodone() it and calculate the
1276 * error to return.
1277 */
81b5c339 1278 biodone(bio);
cc7d050e
MD
1279 KKASSERT(bp->b_cmd == BUF_CMD_DONE);
1280 if (bp->b_flags & B_EINTR)
1281 return (EINTR);
1282 if (bp->b_flags & B_ERROR)
1283 return (bp->b_error ? bp->b_error : EIO);
1284 return (0);
984263bc
MD
1285}
1286
1287/*
8452310f
MD
1288 * Handle all truncation, write-extend, and ftruncate()-extend operations
1289 * on the NFS lcient side.
cb1cf930 1290 *
8452310f
MD
1291 * We use the new API in kern/vfs_vm.c to perform these operations in a
1292 * VM-friendly way. With this API VM pages are properly zerod and pages
1293 * still mapped into the buffer straddling EOF are not invalidated.
984263bc 1294 */
8452310f
MD
1295int
1296nfs_meta_setsize(struct vnode *vp, struct thread *td, off_t nsize, int trivial)
984263bc
MD
1297{
1298 struct nfsnode *np = VTONFS(vp);
8452310f 1299 off_t osize;
984263bc 1300 int biosize = vp->v_mount->mnt_stat.f_iosize;
8452310f 1301 int error;
984263bc 1302
8452310f 1303 osize = np->n_size;
984263bc
MD
1304 np->n_size = nsize;
1305
a63246d1 1306 if (nsize < osize) {
753df37e 1307 error = nvtruncbuf(vp, nsize, biosize, -1, 0);
a63246d1 1308 } else {
8452310f 1309 error = nvextendbuf(vp, osize, nsize,
3bb7eedb
MD
1310 biosize, biosize, -1, -1,
1311 trivial);
984263bc 1312 }
8452310f 1313 return(error);
984263bc
MD
1314}
1315
ae8e83e6
MD
1316/*
1317 * Synchronous completion for nfs_doio. Call bpdone() with elseit=FALSE.
1318 * Caller is responsible for brelse()'ing the bp.
1319 */
1320static void
1321nfsiodone_sync(struct bio *bio)
1322{
1323 bio->bio_flags = 0;
1324 bpdone(bio->bio_buf, 0);
1325}
edb90c22 1326
edb90c22
MD
1327/*
1328 * nfs read rpc - BIO version
1329 */
edb90c22
MD
1330void
1331nfs_readrpc_bio(struct vnode *vp, struct bio *bio)
1332{
1333 struct buf *bp = bio->bio_buf;
1334 u_int32_t *tl;
1335 struct nfsmount *nmp;
1336 int error = 0, len, tsiz;
1337 struct nfsm_info *info;
1338
1339 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1340 info->mrep = NULL;
1341 info->v3 = NFS_ISV3(vp);
1342
1343 nmp = VFSTONFS(vp->v_mount);
1344 tsiz = bp->b_bcount;
cc7d050e 1345 KKASSERT(tsiz <= nmp->nm_rsize);
edb90c22
MD
1346 if (bio->bio_offset + tsiz > nmp->nm_maxfilesize) {
1347 error = EFBIG;
1348 goto nfsmout;
1349 }
1350 nfsstats.rpccnt[NFSPROC_READ]++;
cc7d050e 1351 len = tsiz;
edb90c22
MD
1352 nfsm_reqhead(info, vp, NFSPROC_READ,
1353 NFSX_FH(info->v3) + NFSX_UNSIGNED * 3);
1354 ERROROUT(nfsm_fhtom(info, vp));
1355 tl = nfsm_build(info, NFSX_UNSIGNED * 3);
1356 if (info->v3) {
1357 txdr_hyper(bio->bio_offset, tl);
1358 *(tl + 2) = txdr_unsigned(len);
1359 } else {
1360 *tl++ = txdr_unsigned(bio->bio_offset);
1361 *tl++ = txdr_unsigned(len);
1362 *tl = 0;
1363 }
1364 info->bio = bio;
1365 info->done = nfs_readrpc_bio_done;
1366 nfsm_request_bio(info, vp, NFSPROC_READ, NULL,
1367 nfs_vpcred(vp, ND_READ));
1368 return;
1369nfsmout:
1370 kfree(info, M_NFSREQ);
1371 bp->b_error = error;
1372 bp->b_flags |= B_ERROR;
1373 biodone(bio);
1374}
1375
1376static void
1377nfs_readrpc_bio_done(nfsm_info_t info)
1378{
1379 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1380 struct bio *bio = info->bio;
1381 struct buf *bp = bio->bio_buf;
1382 u_int32_t *tl;
1383 int attrflag;
1384 int retlen;
1385 int eof;
1386 int error = 0;
1387
1388 KKASSERT(info->state == NFSM_STATE_DONE);
1389
c6b43e93 1390 lwkt_gettoken(&nmp->nm_token);
77912481 1391
498b8fb8 1392 ERROROUT(info->error);
edb90c22
MD
1393 if (info->v3) {
1394 ERROROUT(nfsm_postop_attr(info, info->vp, &attrflag,
1395 NFS_LATTR_NOSHRINK));
1396 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1397 eof = fxdr_unsigned(int, *(tl + 1));
1398 } else {
1399 ERROROUT(nfsm_loadattr(info, info->vp, NULL));
1400 eof = 0;
1401 }
1402 NEGATIVEOUT(retlen = nfsm_strsiz(info, nmp->nm_rsize));
1403 ERROROUT(nfsm_mtobio(info, bio, retlen));
1404 m_freem(info->mrep);
1405 info->mrep = NULL;
1406
1407 /*
28953d39
MD
1408 * No error occured, if retlen is less then bcount and no EOF
1409 * and NFSv3 a zero-fill short read occured.
1410 *
1411 * For NFSv2 a short-read indicates EOF.
edb90c22 1412 */
28953d39 1413 if (retlen < bp->b_bcount && info->v3 && eof == 0) {
edb90c22 1414 bzero(bp->b_data + retlen, bp->b_bcount - retlen);
28953d39 1415 retlen = bp->b_bcount;
edb90c22 1416 }
28953d39
MD
1417
1418 /*
1419 * If we hit an EOF we still zero-fill, but return the expected
1420 * b_resid anyway. This should normally not occur since async
1421 * BIOs are not used for read-before-write case. Races against
1422 * the server can cause it though and we don't want to leave
1423 * garbage in the buffer.
1424 */
1425 if (retlen < bp->b_bcount) {
1426 bzero(bp->b_data + retlen, bp->b_bcount - retlen);
edb90c22 1427 }
28953d39
MD
1428 bp->b_resid = 0;
1429 /* bp->b_resid = bp->b_bcount - retlen; */
edb90c22 1430nfsmout:
c6b43e93 1431 lwkt_reltoken(&nmp->nm_token);
f8565b0f 1432 kfree(info, M_NFSREQ);
edb90c22
MD
1433 if (error) {
1434 bp->b_error = error;
1435 bp->b_flags |= B_ERROR;
1436 }
1437 biodone(bio);
1438}
1439
edb90c22
MD
1440/*
1441 * nfs write call - BIO version
cb1cf930
MD
1442 *
1443 * NOTE: Caller has already busied the I/O.
edb90c22 1444 */
cc7d050e
MD
1445void
1446nfs_writerpc_bio(struct vnode *vp, struct bio *bio)
edb90c22 1447{
edb90c22 1448 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
cc7d050e
MD
1449 struct nfsnode *np = VTONFS(vp);
1450 struct buf *bp = bio->bio_buf;
1451 u_int32_t *tl;
1452 int len;
1453 int iomode;
1454 int error = 0;
1455 struct nfsm_info *info;
1456 off_t offset;
edb90c22 1457
cc7d050e
MD
1458 /*
1459 * Setup for actual write. Just clean up the bio if there
cb1cf930
MD
1460 * is nothing to do. b_dirtyoff/end have already been staged
1461 * by the bp's pages getting busied.
cc7d050e
MD
1462 */
1463 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
1464 bp->b_dirtyend = np->n_size - bio->bio_offset;
edb90c22 1465
cc7d050e
MD
1466 if (bp->b_dirtyend <= bp->b_dirtyoff) {
1467 bp->b_resid = 0;
1468 biodone(bio);
1469 return;
1470 }
1471 len = bp->b_dirtyend - bp->b_dirtyoff;
1472 offset = bio->bio_offset + bp->b_dirtyoff;
1473 if (offset + len > nmp->nm_maxfilesize) {
1474 bp->b_flags |= B_ERROR;
1475 bp->b_error = EFBIG;
1476 biodone(bio);
1477 return;
1478 }
1479 bp->b_resid = len;
1480 nfsstats.write_bios++;
1481
1482 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1483 info->mrep = NULL;
1484 info->v3 = NFS_ISV3(vp);
1485 info->info_writerpc.must_commit = 0;
1486 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0)
1487 iomode = NFSV3WRITE_UNSTABLE;
1488 else
1489 iomode = NFSV3WRITE_FILESYNC;
edb90c22 1490
cc7d050e
MD
1491 KKASSERT(len <= nmp->nm_wsize);
1492
1493 nfsstats.rpccnt[NFSPROC_WRITE]++;
1494 nfsm_reqhead(info, vp, NFSPROC_WRITE,
1495 NFSX_FH(info->v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1496 ERROROUT(nfsm_fhtom(info, vp));
1497 if (info->v3) {
1498 tl = nfsm_build(info, 5 * NFSX_UNSIGNED);
1499 txdr_hyper(offset, tl);
1500 tl += 2;
1501 *tl++ = txdr_unsigned(len);
1502 *tl++ = txdr_unsigned(iomode);
1503 *tl = txdr_unsigned(len);
1504 } else {
1505 u_int32_t x;
1506
1507 tl = nfsm_build(info, 4 * NFSX_UNSIGNED);
1508 /* Set both "begin" and "current" to non-garbage. */
1509 x = txdr_unsigned((u_int32_t)offset);
1510 *tl++ = x; /* "begin offset" */
1511 *tl++ = x; /* "current offset" */
1512 x = txdr_unsigned(len);
1513 *tl++ = x; /* total to this offset */
1514 *tl = x; /* size of this write */
1515 }
1516 ERROROUT(nfsm_biotom(info, bio, bp->b_dirtyoff, len));
1517 info->bio = bio;
1518 info->done = nfs_writerpc_bio_done;
1519 nfsm_request_bio(info, vp, NFSPROC_WRITE, NULL,
1520 nfs_vpcred(vp, ND_WRITE));
1521 return;
1522nfsmout:
1523 kfree(info, M_NFSREQ);
1524 bp->b_error = error;
1525 bp->b_flags |= B_ERROR;
1526 biodone(bio);
1527}
1528
1529static void
1530nfs_writerpc_bio_done(nfsm_info_t info)
1531{
1532 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1533 struct nfsnode *np = VTONFS(info->vp);
1534 struct bio *bio = info->bio;
1535 struct buf *bp = bio->bio_buf;
1536 int wccflag = NFSV3_WCCRATTR;
1537 int iomode = NFSV3WRITE_FILESYNC;
1538 int commit;
1539 int rlen;
1540 int error;
1541 int len = bp->b_resid; /* b_resid was set to shortened length */
1542 u_int32_t *tl;
1543
c6b43e93 1544 lwkt_gettoken(&nmp->nm_token);
77912481 1545
498b8fb8 1546 ERROROUT(info->error);
cc7d050e
MD
1547 if (info->v3) {
1548 /*
1549 * The write RPC returns a before and after mtime. The
1550 * nfsm_wcc_data() macro checks the before n_mtime
1551 * against the before time and stores the after time
1552 * in the nfsnode's cached vattr and n_mtime field.
1553 * The NRMODIFIED bit will be set if the before
1554 * time did not match the original mtime.
1555 */
1556 wccflag = NFSV3_WCCCHK;
1557 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag));
1558 if (error == 0) {
1559 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED + NFSX_V3WRITEVERF));
1560 rlen = fxdr_unsigned(int, *tl++);
1561 if (rlen == 0) {
1562 error = NFSERR_IO;
1563 m_freem(info->mrep);
1564 info->mrep = NULL;
1565 goto nfsmout;
1566 } else if (rlen < len) {
1567#if 0
edb90c22 1568 /*
cc7d050e 1569 * XXX what do we do here?
edb90c22 1570 */
cc7d050e
MD
1571 backup = len - rlen;
1572 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base - backup;
1573 uiop->uio_iov->iov_len += backup;
1574 uiop->uio_offset -= backup;
1575 uiop->uio_resid += backup;
1576 len = rlen;
1577#endif
1578 }
1579 commit = fxdr_unsigned(int, *tl++);
1580
1581 /*
1582 * Return the lowest committment level
1583 * obtained by any of the RPCs.
1584 */
1585 if (iomode == NFSV3WRITE_FILESYNC)
1586 iomode = commit;
1587 else if (iomode == NFSV3WRITE_DATASYNC &&
1588 commit == NFSV3WRITE_UNSTABLE)
1589 iomode = commit;
1590 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1591 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF);
1592 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1593 } else if (bcmp(tl, nmp->nm_verf, NFSX_V3WRITEVERF)) {
1594 info->info_writerpc.must_commit = 1;
1595 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF);
edb90c22 1596 }
edb90c22 1597 }
cc7d050e
MD
1598 } else {
1599 ERROROUT(nfsm_loadattr(info, info->vp, NULL));
1600 }
1601 m_freem(info->mrep);
1602 info->mrep = NULL;
1603 len = 0;
1604nfsmout:
1605 if (info->vp->v_mount->mnt_flag & MNT_ASYNC)
1606 iomode = NFSV3WRITE_FILESYNC;
1607 bp->b_resid = len;
1608
1609 /*
1610 * End of RPC. Now clean up the bp.
1611 *
8ae5c7e0
MD
1612 * We no longer enable write clustering for commit operations,
1613 * See around line 1157 for a more detailed comment.
cc7d050e
MD
1614 */
1615 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1616 bp->b_flags |= B_NEEDCOMMIT;
8ae5c7e0
MD
1617#if 0
1618 /* XXX do not enable commit clustering */
cc7d050e
MD
1619 if (bp->b_dirtyoff == 0 && bp->b_dirtyend == bp->b_bcount)
1620 bp->b_flags |= B_CLUSTEROK;
8ae5c7e0 1621#endif
cc7d050e
MD
1622 } else {
1623 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1624 }
1625
1626 /*
1627 * For an interrupted write, the buffer is still valid
1628 * and the write hasn't been pushed to the server yet,
1629 * so we can't set B_ERROR and report the interruption
1630 * by setting B_EINTR. For the async case, B_EINTR
1631 * is not relevant, so the rpc attempt is essentially
1632 * a noop. For the case of a V3 write rpc not being
1633 * committed to stable storage, the block is still
1634 * dirty and requires either a commit rpc or another
1635 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1636 * the block is reused. This is indicated by setting
1637 * the B_DELWRI and B_NEEDCOMMIT flags.
1638 *
1639 * If the buffer is marked B_PAGING, it does not reside on
1640 * the vp's paging queues so we cannot call bdirty(). The
1641 * bp in this case is not an NFS cache block so we should
1642 * be safe. XXX
1643 */
1644 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1645 crit_enter();
1646 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1647 if ((bp->b_flags & B_PAGING) == 0)
1648 bdirty(bp);
edb90c22 1649 if (error)
cc7d050e
MD
1650 bp->b_flags |= B_EINTR;
1651 crit_exit();
1652 } else {
1653 if (error) {
1654 bp->b_flags |= B_ERROR;
1655 bp->b_error = np->n_error = error;
1656 np->n_flag |= NWRITEERR;
1657 }
1658 bp->b_dirtyoff = bp->b_dirtyend = 0;
1659 }
1660 if (info->info_writerpc.must_commit)
1661 nfs_clearcommit(info->vp->v_mount);
c6b43e93
MD
1662 lwkt_reltoken(&nmp->nm_token);
1663
cc7d050e
MD
1664 kfree(info, M_NFSREQ);
1665 if (error) {
1666 bp->b_flags |= B_ERROR;
1667 bp->b_error = error;
1668 }
1669 biodone(bio);
1670}
1671
1672/*
1673 * Nfs Version 3 commit rpc - BIO version
1674 *
1675 * This function issues the commit rpc and will chain to a write
1676 * rpc if necessary.
1677 */
1678void
1679nfs_commitrpc_bio(struct vnode *vp, struct bio *bio)
1680{
1681 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1682 struct buf *bp = bio->bio_buf;
1683 struct nfsm_info *info;
1684 int error = 0;
1685 u_int32_t *tl;
1686
1687 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
1688 bp->b_dirtyoff = bp->b_dirtyend = 0;
1689 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1690 bp->b_resid = 0;
1691 biodone(bio);
1692 return;
1693 }
1694
1695 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1696 info->mrep = NULL;
1697 info->v3 = 1;
1698
1699 nfsstats.rpccnt[NFSPROC_COMMIT]++;
1700 nfsm_reqhead(info, vp, NFSPROC_COMMIT, NFSX_FH(1));
1701 ERROROUT(nfsm_fhtom(info, vp));
1702 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
1703 txdr_hyper(bio->bio_offset + bp->b_dirtyoff, tl);
1704 tl += 2;
1705 *tl = txdr_unsigned(bp->b_dirtyend - bp->b_dirtyoff);
1706 info->bio = bio;
1707 info->done = nfs_commitrpc_bio_done;
1708 nfsm_request_bio(info, vp, NFSPROC_COMMIT, NULL,
1709 nfs_vpcred(vp, ND_WRITE));
1710 return;
1711nfsmout:
1712 /*
1713 * Chain to write RPC on (early) error
1714 */
1715 kfree(info, M_NFSREQ);
1716 nfs_writerpc_bio(vp, bio);
1717}
1718
1719static void
1720nfs_commitrpc_bio_done(nfsm_info_t info)
1721{
1722 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1723 struct bio *bio = info->bio;
1724 struct buf *bp = bio->bio_buf;
1725 u_int32_t *tl;
1726 int wccflag = NFSV3_WCCRATTR;
1727 int error = 0;
1728
c6b43e93 1729 lwkt_gettoken(&nmp->nm_token);
77912481 1730
498b8fb8 1731 ERROROUT(info->error);
cc7d050e
MD
1732 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag));
1733 if (error == 0) {
1734 NULLOUT(tl = nfsm_dissect(info, NFSX_V3WRITEVERF));
1735 if (bcmp(nmp->nm_verf, tl, NFSX_V3WRITEVERF)) {
1736 bcopy(tl, nmp->nm_verf, NFSX_V3WRITEVERF);
1737 error = NFSERR_STALEWRITEVERF;
1738 }
edb90c22 1739 }
cc7d050e
MD
1740 m_freem(info->mrep);
1741 info->mrep = NULL;
1742
1743 /*
1744 * On completion we must chain to a write bio if an
1745 * error occurred.
1746 */
edb90c22 1747nfsmout:
cc7d050e
MD
1748 if (error == 0) {
1749 bp->b_dirtyoff = bp->b_dirtyend = 0;
1750 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1751 bp->b_resid = 0;
1752 biodone(bio);
1753 } else {
cc7d050e
MD
1754 nfs_writerpc_bio(info->vp, bio);
1755 }
8af6746a 1756 kfree(info, M_NFSREQ);
c6b43e93 1757 lwkt_reltoken(&nmp->nm_token);
edb90c22
MD
1758}
1759