2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1989, 1993
36 * The Regents of the University of California. All rights reserved.
38 * This code is derived from software contributed to Berkeley by
39 * Rick Macklem at The University of Guelph.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by the University of
52 * California, Berkeley and its contributors.
53 * 4. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * These functions support the macros and help fiddle mbuf chains for
72 * the nfs op functions. They do things like create the rpc header and
73 * copy data between mbuf chains and uio lists.
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
80 #include <sys/mount.h>
81 #include <sys/vnode.h>
82 #include <sys/nlookup.h>
83 #include <sys/namei.h>
85 #include <sys/socket.h>
87 #include <sys/malloc.h>
88 #include <sys/sysent.h>
89 #include <sys/syscall.h>
91 #include <sys/objcache.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_zone.h>
101 #include "nfsproto.h"
103 #include "nfsmount.h"
105 #include "xdr_subs.h"
106 #include "nfsm_subs.h"
109 #include <netinet/in.h>
111 static u_int32_t nfs_xid = 0;
114 * Create the header for an rpc request packet
115 * The hsiz is the size of the rest of the nfs request header.
116 * (just used to decide if a cluster is a good idea)
119 nfsm_reqhead(nfsm_info_t info, struct vnode *vp, u_long procid, int hsiz)
121 info->mb = m_getl(hsiz, MB_WAIT, MT_DATA, 0, NULL);
123 info->mreq = info->mb;
124 info->bpos = mtod(info->mb, caddr_t);
128 * Build the RPC header and fill in the authorization info.
129 * The authorization string argument is only used when the credentials
130 * come from outside of the kernel.
131 * Returns the head of the mbuf list.
134 nfsm_rpchead(struct ucred *cr, int nmflag, int procid, int auth_type,
135 int auth_len, char *auth_str, int verf_len, char *verf_str,
136 struct mbuf *mrest, int mrest_len, struct mbuf **mbp,
139 struct nfsm_info info;
142 int siz, grpsiz, authsiz, dsiz;
145 authsiz = nfsm_rndup(auth_len);
146 dsiz = authsiz + 10 * NFSX_UNSIGNED;
147 info.mb = m_getl(dsiz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
148 if (dsiz < MINCLSIZE) {
150 MH_ALIGN(info.mb, dsiz);
152 MH_ALIGN(info.mb, 8 * NFSX_UNSIGNED);
154 info.mb->m_len = info.mb->m_pkthdr.len = 0;
156 info.bpos = mtod(info.mb, caddr_t);
159 * First the RPC header.
161 tl = nfsm_build(&info, 8 * NFSX_UNSIGNED);
163 /* Get a pretty random xid to start with */
167 * Skip zero xid if it should ever happen.
172 *tl++ = *xidp = txdr_unsigned(nfs_xid);
175 *tl++ = txdr_unsigned(NFS_PROG);
176 if (nmflag & NFSMNT_NFSV3)
177 *tl++ = txdr_unsigned(NFS_VER3);
179 *tl++ = txdr_unsigned(NFS_VER2);
180 if (nmflag & NFSMNT_NFSV3)
181 *tl++ = txdr_unsigned(procid);
183 *tl++ = txdr_unsigned(nfsv2_procid[procid]);
186 * And then the authorization cred.
188 *tl++ = txdr_unsigned(auth_type);
189 *tl = txdr_unsigned(authsiz);
192 tl = nfsm_build(&info, auth_len);
193 *tl++ = 0; /* stamp ?? */
194 *tl++ = 0; /* NULL hostname */
195 *tl++ = txdr_unsigned(cr->cr_uid);
196 *tl++ = txdr_unsigned(cr->cr_groups[0]);
197 grpsiz = (auth_len >> 2) - 5;
198 *tl++ = txdr_unsigned(grpsiz);
199 for (i = 1; i <= grpsiz; i++)
200 *tl++ = txdr_unsigned(cr->cr_groups[i]);
205 if (M_TRAILINGSPACE(info.mb) == 0) {
206 mb2 = m_getl(siz, MB_WAIT, MT_DATA, 0, NULL);
208 info.mb->m_next = mb2;
210 info.bpos = mtod(info.mb, caddr_t);
212 i = min(siz, M_TRAILINGSPACE(info.mb));
213 bcopy(auth_str, info.bpos, i);
219 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) {
220 for (i = 0; i < siz; i++)
222 info.mb->m_len += siz;
228 * And the verifier...
230 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
232 *tl++ = txdr_unsigned(RPCAUTH_KERB4);
233 *tl = txdr_unsigned(verf_len);
236 if (M_TRAILINGSPACE(info.mb) == 0) {
237 mb2 = m_getl(siz, MB_WAIT, MT_DATA,
240 info.mb->m_next = mb2;
242 info.bpos = mtod(info.mb, caddr_t);
244 i = min(siz, M_TRAILINGSPACE(info.mb));
245 bcopy(verf_str, info.bpos, i);
251 if ((siz = (nfsm_rndup(verf_len) - verf_len)) > 0) {
252 for (i = 0; i < siz; i++)
254 info.mb->m_len += siz;
257 *tl++ = txdr_unsigned(RPCAUTH_NULL);
260 info.mb->m_next = mrest;
261 info.mreq->m_pkthdr.len = authsiz + 10 * NFSX_UNSIGNED + mrest_len;
262 info.mreq->m_pkthdr.rcvif = NULL;
268 nfsm_build(nfsm_info_t info, int bytes)
273 if (bytes > M_TRAILINGSPACE(info->mb)) {
274 MGET(mb2, MB_WAIT, MT_DATA);
276 panic("build > MLEN");
277 info->mb->m_next = mb2;
280 info->bpos = mtod(info->mb, caddr_t);
283 info->mb->m_len += bytes;
290 * If NULL returned caller is expected to abort with an EBADRPC error.
291 * Caller will usually use the NULLOUT macro.
294 nfsm_dissect(nfsm_info_t info, int bytes)
301 n = mtod(info->md, caddr_t) + info->md->m_len - info->dpos;
306 error = nfsm_disct(&info->md, &info->dpos, bytes, n, &cp2);
320 * Caller is expected to abort if non-zero error is returned.
323 nfsm_fhtom(nfsm_info_t info, struct vnode *vp)
331 n = nfsm_rndup(VTONFS(vp)->n_fhsize) + NFSX_UNSIGNED;
332 if (n <= M_TRAILINGSPACE(info->mb)) {
333 tl = nfsm_build(info, n);
334 *tl++ = txdr_unsigned(VTONFS(vp)->n_fhsize);
335 *(tl + ((n >> 2) - 2)) = 0;
336 bcopy((caddr_t)VTONFS(vp)->n_fhp,(caddr_t)tl,
337 VTONFS(vp)->n_fhsize);
339 } else if ((error = nfsm_strtmbuf(&info->mb, &info->bpos,
340 (caddr_t)VTONFS(vp)->n_fhp,
341 VTONFS(vp)->n_fhsize)) != 0) {
346 cp = nfsm_build(info, NFSX_V2FH);
347 bcopy(VTONFS(vp)->n_fhp, cp, NFSX_V2FH);
354 nfsm_srvfhtom(nfsm_info_t info, fhandle_t *fhp)
359 tl = nfsm_build(info, NFSX_UNSIGNED + NFSX_V3FH);
360 *tl++ = txdr_unsigned(NFSX_V3FH);
361 bcopy(fhp, tl, NFSX_V3FH);
363 tl = nfsm_build(info, NFSX_V2FH);
364 bcopy(fhp, tl, NFSX_V2FH);
369 nfsm_srvpostop_fh(nfsm_info_t info, fhandle_t *fhp)
373 tl = nfsm_build(info, 2 * NFSX_UNSIGNED + NFSX_V3FH);
375 *tl++ = txdr_unsigned(NFSX_V3FH);
376 bcopy(fhp, tl, NFSX_V3FH);
380 * Caller is expected to abort if non-zero error is returned.
382 * NOTE: (*vpp) may be loaded with a valid vnode even if (*gotvpp)
383 * winds up 0. The caller is responsible for dealing with (*vpp).
386 nfsm_mtofh(nfsm_info_t info, struct vnode *dvp, struct vnode **vpp, int *gotvpp)
388 struct nfsnode *ttnp;
395 tl = nfsm_dissect(info, NFSX_UNSIGNED);
398 *gotvpp = fxdr_unsigned(int, *tl);
403 NEGATIVEOUT(ttfhsize = nfsm_getfh(info, &ttfhp));
404 error = nfs_nget(dvp->v_mount, ttfhp, ttfhsize, &ttnp);
413 tl = nfsm_dissect(info, NFSX_UNSIGNED);
417 *gotvpp = fxdr_unsigned(int, *tl);
418 } else if (fxdr_unsigned(int, *tl)) {
419 error = nfsm_adv(info, NFSX_V3FATTR);
425 error = nfsm_loadattr(info, *vpp, NULL);
432 * Caller is expected to abort with EBADRPC if a negative length is returned.
435 nfsm_getfh(nfsm_info_t info, nfsfh_t **fhpp)
442 tl = nfsm_dissect(info, NFSX_UNSIGNED);
445 if ((n = fxdr_unsigned(int, *tl)) <= 0 || n > NFSX_V3FHMAX) {
453 *fhpp = nfsm_dissect(info, nfsm_rndup(n));
460 * Caller is expected to abort if a non-zero error is returned.
463 nfsm_loadattr(nfsm_info_t info, struct vnode *vp, struct vattr *vap)
467 error = nfs_loadattrcache(vp, &info->md, &info->dpos, vap, 0);
477 * Caller is expected to abort if a non-zero error is returned.
480 nfsm_postop_attr(nfsm_info_t info, struct vnode *vp, int *attrp, int lflags)
485 tl = nfsm_dissect(info, NFSX_UNSIGNED);
488 *attrp = fxdr_unsigned(int, *tl);
490 error = nfs_loadattrcache(vp, &info->md, &info->dpos,
503 * Caller is expected to abort if a non-zero error is returned.
506 nfsm_wcc_data(nfsm_info_t info, struct vnode *vp, int *attrp)
513 tl = nfsm_dissect(info, NFSX_UNSIGNED);
516 if (*tl == nfs_true) {
517 tl = nfsm_dissect(info, 6 * NFSX_UNSIGNED);
521 ttretf = (VTONFS(vp)->n_mtime ==
522 fxdr_unsigned(u_int32_t, *(tl + 2)));
524 VTONFS(vp)->n_flag |= NRMODIFIED;
526 error = nfsm_postop_attr(info, vp, &ttattrf,
527 NFS_LATTR_NOSHRINK|NFS_LATTR_NOMTIMECHECK);
531 error = nfsm_postop_attr(info, vp, &ttattrf,
544 * This function updates the attribute cache based on data returned in the
545 * NFS reply for NFS RPCs that modify the target file. If the RPC succeeds
546 * a 'before' and 'after' mtime is returned that allows us to determine if
547 * the new mtime attribute represents our modification or someone else's
550 * The flag argument returns non-0 if the original times matched, zero if
551 * they did not match. NRMODIFIED is automatically set if the before time
552 * does not match the original n_mtime, and n_mtime is automatically updated
553 * to the new after time (by nfsm_postop_attr()).
555 * If full is true, set all fields, otherwise just set mode and time fields
558 nfsm_v3attrbuild(nfsm_info_t info, struct vattr *vap, int full)
562 if (vap->va_mode != (mode_t)VNOVAL) {
563 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
565 *tl = txdr_unsigned(vap->va_mode);
567 tl = nfsm_build(info, NFSX_UNSIGNED);
570 if (full && vap->va_uid != (uid_t)VNOVAL) {
571 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
573 *tl = txdr_unsigned(vap->va_uid);
575 tl = nfsm_build(info, NFSX_UNSIGNED);
578 if (full && vap->va_gid != (gid_t)VNOVAL) {
579 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
581 *tl = txdr_unsigned(vap->va_gid);
583 tl = nfsm_build(info, NFSX_UNSIGNED);
586 if (full && vap->va_size != VNOVAL) {
587 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
589 txdr_hyper(vap->va_size, tl);
591 tl = nfsm_build(info, NFSX_UNSIGNED);
594 if (vap->va_atime.tv_sec != VNOVAL) {
595 if (vap->va_atime.tv_sec != time_second) {
596 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
597 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
598 txdr_nfsv3time(&vap->va_atime, tl);
600 tl = nfsm_build(info, NFSX_UNSIGNED);
601 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
604 tl = nfsm_build(info, NFSX_UNSIGNED);
605 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
607 if (vap->va_mtime.tv_sec != VNOVAL) {
608 if (vap->va_mtime.tv_sec != time_second) {
609 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
610 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
611 txdr_nfsv3time(&vap->va_mtime, tl);
613 tl = nfsm_build(info, NFSX_UNSIGNED);
614 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
617 tl = nfsm_build(info, NFSX_UNSIGNED);
618 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
623 * Caller is expected to abort with EBADRPC if a negative length is returned.
626 nfsm_strsiz(nfsm_info_t info, int maxlen)
631 tl = nfsm_dissect(info, NFSX_UNSIGNED);
634 len = fxdr_unsigned(int32_t, *tl);
635 if (len < 0 || len > maxlen)
641 * Caller is expected to abort if a negative length is returned, but also
642 * call nfsm_reply(0) if -2 is returned.
644 * This function sets *errorp. Caller should not modify the error code.
647 nfsm_srvstrsiz(nfsm_info_t info, int maxlen, int *errorp)
652 tl = nfsm_dissect(info, NFSX_UNSIGNED);
657 len = fxdr_unsigned(int32_t,*tl);
658 if (len > maxlen || len <= 0) {
666 * Caller is expected to abort if a negative length is returned, but also
667 * call nfsm_reply(0) if -2 is returned.
669 * This function sets *errorp. Caller should not modify the error code.
672 nfsm_srvnamesiz(nfsm_info_t info, int *errorp)
677 tl = nfsm_dissect(info, NFSX_UNSIGNED);
684 * In this case if *errorp is not EBADRPC and we are NFSv3,
685 * nfsm_reply() will not return a negative number. But all
686 * call cases assume len is valid so we really do want
689 len = fxdr_unsigned(int32_t,*tl);
690 if (len > NFS_MAXNAMLEN)
691 *errorp = NFSERR_NAMETOL;
700 * Caller is expected to abort if a non-zero error is returned.
703 nfsm_mtouio(nfsm_info_t info, struct uio *uiop, int len)
708 (error = nfsm_mbuftouio(&info->md, uiop, len, &info->dpos)) != 0) {
717 * Caller is expected to abort if a non-zero error is returned.
720 nfsm_mtobio(nfsm_info_t info, struct bio *bio, int len)
725 (error = nfsm_mbuftobio(&info->md, bio, len, &info->dpos)) != 0) {
734 * Caller is expected to abort if a non-zero error is returned.
737 nfsm_uiotom(nfsm_info_t info, struct uio *uiop, int len)
741 if ((error = nfsm_uiotombuf(uiop, &info->mb, len, &info->bpos)) != 0) {
750 * Caller is expected to abort if a negative value is returned. This
751 * function sets *errorp. Caller should not modify the error code.
753 * We load up the remaining info fields and run the request state
754 * machine until it is done.
756 * This call runs the entire state machine and does not return until
757 * the command is complete.
760 nfsm_request(nfsm_info_t info, struct vnode *vp, int procnum,
761 thread_t td, struct ucred *cred, int *errorp)
763 info->state = NFSM_STATE_SETUP;
764 info->procnum = procnum;
769 info->nmp = VFSTONFS(vp->v_mount);
771 *errorp = nfs_request(info, NFSM_STATE_SETUP, NFSM_STATE_DONE);
773 if ((*errorp & NFSERR_RETERR) == 0)
775 *errorp &= ~NFSERR_RETERR;
781 * This call starts the state machine through the initial transmission.
782 * Completion is via the bio. The info structure must have installed
785 * If we are unable to do the initial tx we generate the bio completion
789 nfsm_request_bio(nfsm_info_t info, struct vnode *vp, int procnum,
790 thread_t td, struct ucred *cred)
795 info->state = NFSM_STATE_SETUP;
796 info->procnum = procnum;
800 info->nmp = VFSTONFS(vp->v_mount);
802 error = nfs_request(info, NFSM_STATE_SETUP, NFSM_STATE_WAITREPLY);
803 if (error != EINPROGRESS) {
804 kprintf("nfsm_request_bio: early abort %d\n", error);
805 bp = info->bio->bio_buf;
807 bp->b_flags |= B_ERROR;
814 * Caller is expected to abort if a non-zero error is returned.
817 nfsm_strtom(nfsm_info_t info, const void *data, int len, int maxlen)
826 return(ENAMETOOLONG);
828 n = nfsm_rndup(len) + NFSX_UNSIGNED;
829 if (n <= M_TRAILINGSPACE(info->mb)) {
830 tl = nfsm_build(info, n);
831 *tl++ = txdr_unsigned(len);
832 *(tl + ((n >> 2) - 2)) = 0;
833 bcopy(data, tl, len);
836 error = nfsm_strtmbuf(&info->mb, &info->bpos, data, len);
846 * Caller is expected to abort if a negative value is returned. This
847 * function sets *errorp. Caller should not modify the error code.
850 nfsm_reply(nfsm_info_t info,
851 struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
852 int siz, int *errorp)
854 nfsd->nd_repstat = *errorp;
855 if (*errorp && !(nfsd->nd_flag & ND_NFSV3))
857 nfs_rephead(siz, nfsd, slp, *errorp, &info->mreq,
858 &info->mb, &info->bpos);
859 if (info->mrep != NULL) {
863 if (*errorp && (!(nfsd->nd_flag & ND_NFSV3) || *errorp == EBADRPC)) {
871 nfsm_writereply(nfsm_info_t info,
872 struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
875 nfsd->nd_repstat = error;
876 if (error && !(info->v3))
878 nfs_rephead(siz, nfsd, slp, error, &info->mreq, &info->mb, &info->bpos);
882 * Caller is expected to abort if a non-zero error is returned.
885 nfsm_adv(nfsm_info_t info, int len)
890 n = mtod(info->md, caddr_t) + info->md->m_len - info->dpos;
894 } else if ((error = nfs_adv(&info->md, &info->dpos, len, n)) != 0) {
902 * Caller is expected to abort if a negative length is returned, but also
903 * call nfsm_reply(0) if -2 is returned.
905 * This function sets *errorp. Caller should not modify the error code.
908 nfsm_srvmtofh(nfsm_info_t info, struct nfsrv_descript *nfsd,
909 fhandle_t *fhp, int *errorp)
914 if (nfsd->nd_flag & ND_NFSV3) {
915 tl = nfsm_dissect(info, NFSX_UNSIGNED);
920 fhlen = fxdr_unsigned(int, *tl);
921 if (fhlen != 0 && fhlen != NFSX_V3FH) {
929 tl = nfsm_dissect(info, fhlen);
934 bcopy(tl, fhp, fhlen);
936 bzero(fhp, NFSX_V3FH);
942 _nfsm_clget(nfsm_info_t info, struct mbuf *mp1, struct mbuf *mp2,
949 mp1->m_len += bp - info->bpos;
950 mp1 = m_getcl(MB_WAIT, MT_DATA, 0);
951 mp1->m_len = MCLBYTES;
954 bp = mtod(mp1, caddr_t);
955 be = bp + mp1->m_len;
957 tl = (u_int32_t *)bp;
962 nfsm_srvsattr(nfsm_info_t info, struct vattr *vap)
967 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
968 if (*tl == nfs_true) {
969 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
970 vap->va_mode = nfstov_mode(*tl);
972 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
973 if (*tl == nfs_true) {
974 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
975 vap->va_uid = fxdr_unsigned(uid_t, *tl);
977 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
978 if (*tl == nfs_true) {
979 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
980 vap->va_gid = fxdr_unsigned(gid_t, *tl);
982 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
983 if (*tl == nfs_true) {
984 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
985 vap->va_size = fxdr_hyper(tl);
987 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
988 switch (fxdr_unsigned(int, *tl)) {
989 case NFSV3SATTRTIME_TOCLIENT:
990 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
991 fxdr_nfsv3time(tl, &vap->va_atime);
993 case NFSV3SATTRTIME_TOSERVER:
994 getnanotime(&vap->va_atime);
997 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
998 switch (fxdr_unsigned(int, *tl)) {
999 case NFSV3SATTRTIME_TOCLIENT:
1000 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1001 fxdr_nfsv3time(tl, &vap->va_mtime);
1003 case NFSV3SATTRTIME_TOSERVER:
1004 getnanotime(&vap->va_mtime);
1012 * copies mbuf chain to the uio scatter/gather list
1015 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos)
1017 char *mbufcp, *uiocp;
1018 int xfer, left, len;
1025 len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
1026 rem = nfsm_rndup(siz)-siz;
1028 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
1030 left = uiop->uio_iov->iov_len;
1031 uiocp = uiop->uio_iov->iov_base;
1040 mbufcp = mtod(mp, caddr_t);
1043 xfer = (left > len) ? len : left;
1046 if (uiop->uio_iov->iov_op != NULL)
1047 (*(uiop->uio_iov->iov_op))
1048 (mbufcp, uiocp, xfer);
1051 if (uiop->uio_segflg == UIO_SYSSPACE)
1052 bcopy(mbufcp, uiocp, xfer);
1054 copyout(mbufcp, uiocp, xfer);
1059 uiop->uio_offset += xfer;
1060 uiop->uio_resid -= xfer;
1062 if (uiop->uio_iov->iov_len <= siz) {
1066 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz;
1067 uiop->uio_iov->iov_len -= uiosiz;
1075 error = nfs_adv(mrep, dpos, rem, len);
1083 * copies mbuf chain to the bio buffer
1086 nfsm_mbuftobio(struct mbuf **mrep, struct bio *bio, int size, caddr_t *dpos)
1088 struct buf *bp = bio->bio_buf;
1099 len = mtod(mp, caddr_t) + mp->m_len - mbufcp;
1100 rem = nfsm_rndup(size) - size;
1102 bio_left = bp->b_bcount;
1103 bio_cp = bp->b_data;
1110 mbufcp = mtod(mp, caddr_t);
1113 if ((xfer = len) > size)
1116 if (xfer > bio_left)
1118 bcopy(mbufcp, bio_cp, xfer);
1121 * Not enough buffer space in the bio.
1135 error = nfs_adv(mrep, dpos, rem, len);
1143 * copies a uio scatter/gather list to an mbuf chain.
1144 * NOTE: can ony handle iovcnt == 1
1147 nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
1150 struct mbuf *mp, *mp2;
1151 int xfer, left, mlen;
1153 boolean_t getcluster;
1157 if (uiop->uio_iovcnt != 1)
1158 panic("nfsm_uiotombuf: iovcnt != 1");
1161 if (siz >= MINCLSIZE)
1165 rem = nfsm_rndup(siz) - siz;
1168 left = uiop->uio_iov->iov_len;
1169 uiocp = uiop->uio_iov->iov_base;
1174 mlen = M_TRAILINGSPACE(mp);
1177 mp = m_getcl(MB_WAIT, MT_DATA, 0);
1179 mp = m_get(MB_WAIT, MT_DATA);
1183 mlen = M_TRAILINGSPACE(mp);
1185 xfer = (left > mlen) ? mlen : left;
1188 if (uiop->uio_iov->iov_op != NULL)
1189 (*(uiop->uio_iov->iov_op))
1190 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1193 if (uiop->uio_segflg == UIO_SYSSPACE)
1194 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1196 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1200 uiop->uio_offset += xfer;
1201 uiop->uio_resid -= xfer;
1203 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz;
1204 uiop->uio_iov->iov_len -= uiosiz;
1208 if (rem > M_TRAILINGSPACE(mp)) {
1209 MGET(mp, MB_WAIT, MT_DATA);
1213 cp = mtod(mp, caddr_t)+mp->m_len;
1214 for (left = 0; left < rem; left++)
1219 *bpos = mtod(mp, caddr_t)+mp->m_len;
1225 * Help break down an mbuf chain by setting the first siz bytes contiguous
1226 * pointed to by returned val.
1227 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough
1228 * cases. (The macros use the vars. dpos and dpos2)
1231 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2)
1233 struct mbuf *mp, *mp2;
1239 *mdp = mp = mp->m_next;
1243 *dposp = mtod(mp, caddr_t);
1248 } else if (mp->m_next == NULL) {
1250 } else if (siz > MHLEN) {
1251 panic("nfs S too big");
1253 MGET(mp2, MB_WAIT, MT_DATA);
1254 mp2->m_next = mp->m_next;
1258 *cp2 = p = mtod(mp, caddr_t);
1259 bcopy(*dposp, p, left); /* Copy what was left */
1263 /* Loop around copying up the siz2 bytes */
1267 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1269 bcopy(mtod(mp2, caddr_t), p, xfer);
1271 mp2->m_data += xfer;
1280 *dposp = mtod(mp2, caddr_t);
1286 * Advance the position in the mbuf chain.
1289 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left)
1304 *dposp = mtod(m, caddr_t)+offs;
1309 * Copy a string into mbufs for the hard cases...
1312 nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
1314 struct mbuf *m1 = NULL, *m2;
1315 long left, xfer, len, tlen;
1321 left = M_TRAILINGSPACE(m2);
1323 tl = ((u_int32_t *)(*bpos));
1324 *tl++ = txdr_unsigned(siz);
1326 left -= NFSX_UNSIGNED;
1327 m2->m_len += NFSX_UNSIGNED;
1329 bcopy(cp, (caddr_t) tl, left);
1336 /* Loop around adding mbufs */
1340 m1 = m_getl(siz, MB_WAIT, MT_DATA, 0, &msize);
1344 tl = mtod(m1, u_int32_t *);
1347 *tl++ = txdr_unsigned(siz);
1348 m1->m_len -= NFSX_UNSIGNED;
1349 tlen = NFSX_UNSIGNED;
1352 if (siz < m1->m_len) {
1353 len = nfsm_rndup(siz);
1356 *(tl+(xfer>>2)) = 0;
1358 xfer = len = m1->m_len;
1360 bcopy(cp, (caddr_t) tl, xfer);
1361 m1->m_len = len+tlen;
1366 *bpos = mtod(m1, caddr_t)+m1->m_len;
1371 * A fiddled version of m_adj() that ensures null fill to a long
1372 * boundary and only trims off the back end
1375 nfsm_adj(struct mbuf *mp, int len, int nul)
1382 * Trim from tail. Scan the mbuf chain,
1383 * calculating its length and finding the last mbuf.
1384 * If the adjustment only affects this mbuf, then just
1385 * adjust and return. Otherwise, rescan and truncate
1386 * after the remaining size.
1392 if (m->m_next == NULL)
1396 if (m->m_len > len) {
1399 cp = mtod(m, caddr_t)+m->m_len-nul;
1400 for (i = 0; i < nul; i++)
1409 * Correct length for chain is "count".
1410 * Find the mbuf with last data, adjust its length,
1411 * and toss data from remaining mbufs on chain.
1413 for (m = mp; m; m = m->m_next) {
1414 if (m->m_len >= count) {
1417 cp = mtod(m, caddr_t)+m->m_len-nul;
1418 for (i = 0; i < nul; i++)
1425 for (m = m->m_next;m;m = m->m_next)
1430 * Make these functions instead of macros, so that the kernel text size
1431 * doesn't get too big...
1434 nfsm_srvwcc_data(nfsm_info_t info, struct nfsrv_descript *nfsd,
1435 int before_ret, struct vattr *before_vap,
1436 int after_ret, struct vattr *after_vap)
1441 * before_ret is 0 if before_vap is valid, non-zero if it isn't.
1444 tl = nfsm_build(info, NFSX_UNSIGNED);
1447 tl = nfsm_build(info, 7 * NFSX_UNSIGNED);
1449 txdr_hyper(before_vap->va_size, tl);
1451 txdr_nfsv3time(&(before_vap->va_mtime), tl);
1453 txdr_nfsv3time(&(before_vap->va_ctime), tl);
1455 nfsm_srvpostop_attr(info, nfsd, after_ret, after_vap);
1459 nfsm_srvpostop_attr(nfsm_info_t info, struct nfsrv_descript *nfsd,
1460 int after_ret, struct vattr *after_vap)
1462 struct nfs_fattr *fp;
1466 tl = nfsm_build(info, NFSX_UNSIGNED);
1469 tl = nfsm_build(info, NFSX_UNSIGNED + NFSX_V3FATTR);
1471 fp = (struct nfs_fattr *)tl;
1472 nfsm_srvfattr(nfsd, after_vap, fp);
1477 nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
1478 struct nfs_fattr *fp)
1481 * NFS seems to truncate nlink to 16 bits, don't let it overflow.
1483 if (vap->va_nlink > 65535)
1484 fp->fa_nlink = 65535;
1486 fp->fa_nlink = txdr_unsigned(vap->va_nlink);
1487 fp->fa_uid = txdr_unsigned(vap->va_uid);
1488 fp->fa_gid = txdr_unsigned(vap->va_gid);
1489 if (nfsd->nd_flag & ND_NFSV3) {
1490 fp->fa_type = vtonfsv3_type(vap->va_type);
1491 fp->fa_mode = vtonfsv3_mode(vap->va_mode);
1492 txdr_hyper(vap->va_size, &fp->fa3_size);
1493 txdr_hyper(vap->va_bytes, &fp->fa3_used);
1494 fp->fa3_rdev.specdata1 = txdr_unsigned(vap->va_rmajor);
1495 fp->fa3_rdev.specdata2 = txdr_unsigned(vap->va_rminor);
1496 fp->fa3_fsid.nfsuquad[0] = 0;
1497 fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
1498 txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
1499 txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
1500 txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
1501 txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
1503 fp->fa_type = vtonfsv2_type(vap->va_type);
1504 fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1505 fp->fa2_size = txdr_unsigned(vap->va_size);
1506 fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
1507 if (vap->va_type == VFIFO)
1508 fp->fa2_rdev = 0xffffffff;
1510 fp->fa2_rdev = txdr_unsigned(makeudev(vap->va_rmajor, vap->va_rminor));
1511 fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
1512 fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
1513 fp->fa2_fileid = txdr_unsigned(vap->va_fileid);
1514 txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
1515 txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
1516 txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);