2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1989, 1993
36 * The Regents of the University of California. All rights reserved.
38 * This code is derived from software contributed to Berkeley by
39 * Rick Macklem at The University of Guelph.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by the University of
52 * California, Berkeley and its contributors.
53 * 4. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * These functions support the macros and help fiddle mbuf chains for
72 * the nfs op functions. They do things like create the rpc header and
73 * copy data between mbuf chains and uio lists.
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
80 #include <sys/mount.h>
81 #include <sys/vnode.h>
82 #include <sys/nlookup.h>
83 #include <sys/namei.h>
85 #include <sys/socket.h>
87 #include <sys/malloc.h>
88 #include <sys/sysent.h>
89 #include <sys/syscall.h>
91 #include <sys/objcache.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_zone.h>
101 #include "nfsproto.h"
103 #include "nfsmount.h"
105 #include "xdr_subs.h"
106 #include "nfsm_subs.h"
109 #include <netinet/in.h>
111 static u_int32_t nfs_xid = 0;
114 * Create the header for an rpc request packet
115 * The hsiz is the size of the rest of the nfs request header.
116 * (just used to decide if a cluster is a good idea)
119 nfsm_reqhead(nfsm_info_t info, struct vnode *vp, u_long procid, int hsiz)
121 info->mb = m_getl(hsiz, MB_WAIT, MT_DATA, 0, NULL);
123 info->mreq = info->mb;
124 info->bpos = mtod(info->mb, caddr_t);
128 * Build the RPC header and fill in the authorization info.
129 * The authorization string argument is only used when the credentials
130 * come from outside of the kernel.
131 * Returns the head of the mbuf list.
134 nfsm_rpchead(struct ucred *cr, int nmflag, int procid, int auth_type,
135 int auth_len, char *auth_str, int verf_len, char *verf_str,
136 struct mbuf *mrest, int mrest_len, struct mbuf **mbp,
139 struct nfsm_info info;
142 int siz, grpsiz, authsiz, dsiz;
145 authsiz = nfsm_rndup(auth_len);
146 dsiz = authsiz + 10 * NFSX_UNSIGNED;
147 info.mb = m_getl(dsiz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
148 if (dsiz < MINCLSIZE) {
150 MH_ALIGN(info.mb, dsiz);
152 MH_ALIGN(info.mb, 8 * NFSX_UNSIGNED);
154 info.mb->m_len = info.mb->m_pkthdr.len = 0;
156 info.bpos = mtod(info.mb, caddr_t);
159 * First the RPC header.
161 tl = nfsm_build(&info, 8 * NFSX_UNSIGNED);
163 /* Get a pretty random xid to start with */
167 * Skip zero xid if it should ever happen.
172 *tl++ = *xidp = txdr_unsigned(nfs_xid);
175 *tl++ = txdr_unsigned(NFS_PROG);
176 if (nmflag & NFSMNT_NFSV3)
177 *tl++ = txdr_unsigned(NFS_VER3);
179 *tl++ = txdr_unsigned(NFS_VER2);
180 if (nmflag & NFSMNT_NFSV3)
181 *tl++ = txdr_unsigned(procid);
183 *tl++ = txdr_unsigned(nfsv2_procid[procid]);
186 * And then the authorization cred.
188 *tl++ = txdr_unsigned(auth_type);
189 *tl = txdr_unsigned(authsiz);
192 tl = nfsm_build(&info, auth_len);
193 *tl++ = 0; /* stamp ?? */
194 *tl++ = 0; /* NULL hostname */
195 *tl++ = txdr_unsigned(cr->cr_uid);
196 *tl++ = txdr_unsigned(cr->cr_groups[0]);
197 grpsiz = (auth_len >> 2) - 5;
198 *tl++ = txdr_unsigned(grpsiz);
199 for (i = 1; i <= grpsiz; i++)
200 *tl++ = txdr_unsigned(cr->cr_groups[i]);
205 if (M_TRAILINGSPACE(info.mb) == 0) {
206 mb2 = m_getl(siz, MB_WAIT, MT_DATA, 0, NULL);
208 info.mb->m_next = mb2;
210 info.bpos = mtod(info.mb, caddr_t);
212 i = min(siz, M_TRAILINGSPACE(info.mb));
213 bcopy(auth_str, info.bpos, i);
219 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) {
220 for (i = 0; i < siz; i++)
222 info.mb->m_len += siz;
228 * And the verifier...
230 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
232 *tl++ = txdr_unsigned(RPCAUTH_KERB4);
233 *tl = txdr_unsigned(verf_len);
236 if (M_TRAILINGSPACE(info.mb) == 0) {
237 mb2 = m_getl(siz, MB_WAIT, MT_DATA,
240 info.mb->m_next = mb2;
242 info.bpos = mtod(info.mb, caddr_t);
244 i = min(siz, M_TRAILINGSPACE(info.mb));
245 bcopy(verf_str, info.bpos, i);
251 if ((siz = (nfsm_rndup(verf_len) - verf_len)) > 0) {
252 for (i = 0; i < siz; i++)
254 info.mb->m_len += siz;
257 *tl++ = txdr_unsigned(RPCAUTH_NULL);
260 info.mb->m_next = mrest;
261 info.mreq->m_pkthdr.len = authsiz + 10 * NFSX_UNSIGNED + mrest_len;
262 info.mreq->m_pkthdr.rcvif = NULL;
268 nfsm_build(nfsm_info_t info, int bytes)
273 if (bytes > M_TRAILINGSPACE(info->mb)) {
274 MGET(mb2, MB_WAIT, MT_DATA);
276 panic("build > MLEN");
277 info->mb->m_next = mb2;
280 info->bpos = mtod(info->mb, caddr_t);
283 info->mb->m_len += bytes;
290 * If NULL returned caller is expected to abort with an EBADRPC error.
291 * Caller will usually use the NULLOUT macro.
294 nfsm_dissect(nfsm_info_t info, int bytes)
301 n = mtod(info->md, caddr_t) + info->md->m_len - info->dpos;
306 error = nfsm_disct(&info->md, &info->dpos, bytes, n, &cp2);
320 * Caller is expected to abort if non-zero error is returned.
323 nfsm_fhtom(nfsm_info_t info, struct vnode *vp)
331 n = nfsm_rndup(VTONFS(vp)->n_fhsize) + NFSX_UNSIGNED;
332 if (n <= M_TRAILINGSPACE(info->mb)) {
333 tl = nfsm_build(info, n);
334 *tl++ = txdr_unsigned(VTONFS(vp)->n_fhsize);
335 *(tl + ((n >> 2) - 2)) = 0;
336 bcopy((caddr_t)VTONFS(vp)->n_fhp,(caddr_t)tl,
337 VTONFS(vp)->n_fhsize);
339 } else if ((error = nfsm_strtmbuf(&info->mb, &info->bpos,
340 (caddr_t)VTONFS(vp)->n_fhp,
341 VTONFS(vp)->n_fhsize)) != 0) {
346 cp = nfsm_build(info, NFSX_V2FH);
347 bcopy(VTONFS(vp)->n_fhp, cp, NFSX_V2FH);
354 nfsm_srvfhtom(nfsm_info_t info, fhandle_t *fhp)
359 tl = nfsm_build(info, NFSX_UNSIGNED + NFSX_V3FH);
360 *tl++ = txdr_unsigned(NFSX_V3FH);
361 bcopy(fhp, tl, NFSX_V3FH);
363 tl = nfsm_build(info, NFSX_V2FH);
364 bcopy(fhp, tl, NFSX_V2FH);
369 nfsm_srvpostop_fh(nfsm_info_t info, fhandle_t *fhp)
373 tl = nfsm_build(info, 2 * NFSX_UNSIGNED + NFSX_V3FH);
375 *tl++ = txdr_unsigned(NFSX_V3FH);
376 bcopy(fhp, tl, NFSX_V3FH);
380 * Caller is expected to abort if non-zero error is returned.
382 * NOTE: (*vpp) may be loaded with a valid vnode even if (*gotvpp)
383 * winds up 0. The caller is responsible for dealing with (*vpp).
386 nfsm_mtofh(nfsm_info_t info, struct vnode *dvp, struct vnode **vpp, int *gotvpp)
388 struct nfsnode *ttnp;
395 tl = nfsm_dissect(info, NFSX_UNSIGNED);
398 *gotvpp = fxdr_unsigned(int, *tl);
403 NEGATIVEOUT(ttfhsize = nfsm_getfh(info, &ttfhp));
404 error = nfs_nget(dvp->v_mount, ttfhp, ttfhsize, &ttnp);
413 tl = nfsm_dissect(info, NFSX_UNSIGNED);
417 *gotvpp = fxdr_unsigned(int, *tl);
418 } else if (fxdr_unsigned(int, *tl)) {
419 error = nfsm_adv(info, NFSX_V3FATTR);
425 error = nfsm_loadattr(info, *vpp, NULL);
432 * Caller is expected to abort with EBADRPC if a negative length is returned.
435 nfsm_getfh(nfsm_info_t info, nfsfh_t **fhpp)
442 tl = nfsm_dissect(info, NFSX_UNSIGNED);
445 if ((n = fxdr_unsigned(int, *tl)) <= 0 || n > NFSX_V3FHMAX) {
453 *fhpp = nfsm_dissect(info, nfsm_rndup(n));
460 * Caller is expected to abort if a non-zero error is returned.
463 nfsm_loadattr(nfsm_info_t info, struct vnode *vp, struct vattr *vap)
467 error = nfs_loadattrcache(vp, &info->md, &info->dpos, vap, 0);
477 * Caller is expected to abort if a non-zero error is returned.
480 nfsm_postop_attr(nfsm_info_t info, struct vnode *vp, int *attrp, int lflags)
485 tl = nfsm_dissect(info, NFSX_UNSIGNED);
488 *attrp = fxdr_unsigned(int, *tl);
490 error = nfs_loadattrcache(vp, &info->md, &info->dpos,
503 * Caller is expected to abort if a non-zero error is returned.
506 nfsm_wcc_data(nfsm_info_t info, struct vnode *vp, int *attrp)
513 tl = nfsm_dissect(info, NFSX_UNSIGNED);
516 if (*tl == nfs_true) {
517 tl = nfsm_dissect(info, 6 * NFSX_UNSIGNED);
521 ttretf = (VTONFS(vp)->n_mtime ==
522 fxdr_unsigned(u_int32_t, *(tl + 2)));
524 VTONFS(vp)->n_flag |= NRMODIFIED;
526 error = nfsm_postop_attr(info, vp, &ttattrf,
527 NFS_LATTR_NOSHRINK|NFS_LATTR_NOMTIMECHECK);
531 error = nfsm_postop_attr(info, vp, &ttattrf,
544 * This function updates the attribute cache based on data returned in the
545 * NFS reply for NFS RPCs that modify the target file. If the RPC succeeds
546 * a 'before' and 'after' mtime is returned that allows us to determine if
547 * the new mtime attribute represents our modification or someone else's
550 * The flag argument returns non-0 if the original times matched, zero if
551 * they did not match. NRMODIFIED is automatically set if the before time
552 * does not match the original n_mtime, and n_mtime is automatically updated
553 * to the new after time (by nfsm_postop_attr()).
555 * If full is true, set all fields, otherwise just set mode and time fields
558 nfsm_v3attrbuild(nfsm_info_t info, struct vattr *vap, int full)
562 if (vap->va_mode != (mode_t)VNOVAL) {
563 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
565 *tl = txdr_unsigned(vap->va_mode);
567 tl = nfsm_build(info, NFSX_UNSIGNED);
570 if (full && vap->va_uid != (uid_t)VNOVAL) {
571 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
573 *tl = txdr_unsigned(vap->va_uid);
575 tl = nfsm_build(info, NFSX_UNSIGNED);
578 if (full && vap->va_gid != (gid_t)VNOVAL) {
579 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
581 *tl = txdr_unsigned(vap->va_gid);
583 tl = nfsm_build(info, NFSX_UNSIGNED);
586 if (full && vap->va_size != VNOVAL) {
587 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
589 txdr_hyper(vap->va_size, tl);
591 tl = nfsm_build(info, NFSX_UNSIGNED);
594 if (vap->va_atime.tv_sec != VNOVAL) {
595 if (vap->va_atime.tv_sec != time_second) {
596 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
597 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
598 txdr_nfsv3time(&vap->va_atime, tl);
600 tl = nfsm_build(info, NFSX_UNSIGNED);
601 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
604 tl = nfsm_build(info, NFSX_UNSIGNED);
605 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
607 if (vap->va_mtime.tv_sec != VNOVAL) {
608 if (vap->va_mtime.tv_sec != time_second) {
609 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
610 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
611 txdr_nfsv3time(&vap->va_mtime, tl);
613 tl = nfsm_build(info, NFSX_UNSIGNED);
614 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
617 tl = nfsm_build(info, NFSX_UNSIGNED);
618 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
623 * Caller is expected to abort with EBADRPC if a negative length is returned.
626 nfsm_strsiz(nfsm_info_t info, int maxlen)
631 tl = nfsm_dissect(info, NFSX_UNSIGNED);
634 len = fxdr_unsigned(int32_t, *tl);
635 if (len < 0 || len > maxlen)
641 * Caller is expected to abort if a negative length is returned, but also
642 * call nfsm_reply(0) if -2 is returned.
644 * This function sets *errorp. Caller should not modify the error code.
647 nfsm_srvstrsiz(nfsm_info_t info, int maxlen, int *errorp)
652 tl = nfsm_dissect(info, NFSX_UNSIGNED);
657 len = fxdr_unsigned(int32_t,*tl);
658 if (len > maxlen || len <= 0) {
666 * Caller is expected to abort if a negative length is returned, but also
667 * call nfsm_reply(0) if -2 is returned.
669 * This function sets *errorp. Caller should not modify the error code.
672 nfsm_srvnamesiz(nfsm_info_t info, int *errorp)
677 tl = nfsm_dissect(info, NFSX_UNSIGNED);
684 * In this case if *errorp is not EBADRPC and we are NFSv3,
685 * nfsm_reply() will not return a negative number. But all
686 * call cases assume len is valid so we really do want
689 len = fxdr_unsigned(int32_t,*tl);
690 if (len > NFS_MAXNAMLEN)
691 *errorp = NFSERR_NAMETOL;
700 * Caller is expected to abort if a non-zero error is returned.
703 nfsm_mtouio(nfsm_info_t info, struct uio *uiop, int len)
708 (error = nfsm_mbuftouio(&info->md, uiop, len, &info->dpos)) != 0) {
717 * Caller is expected to abort if a non-zero error is returned.
720 nfsm_mtobio(nfsm_info_t info, struct bio *bio, int len)
725 (error = nfsm_mbuftobio(&info->md, bio, len, &info->dpos)) != 0) {
734 * Caller is expected to abort if a non-zero error is returned.
737 nfsm_uiotom(nfsm_info_t info, struct uio *uiop, int len)
741 if ((error = nfsm_uiotombuf(uiop, &info->mb, len, &info->bpos)) != 0) {
750 * Caller is expected to abort if a negative value is returned. This
751 * function sets *errorp. Caller should not modify the error code.
753 * We load up the remaining info fields and run the request state
754 * machine until it is done.
756 * This call runs the entire state machine and does not return until
757 * the command is complete.
760 nfsm_request(nfsm_info_t info, struct vnode *vp, int procnum,
761 thread_t td, struct ucred *cred, int *errorp)
763 info->state = NFSM_STATE_SETUP;
764 info->procnum = procnum;
769 info->nmp = VFSTONFS(vp->v_mount);
771 *errorp = nfs_request(info, NFSM_STATE_SETUP, NFSM_STATE_DONE);
773 if ((*errorp & NFSERR_RETERR) == 0)
775 *errorp &= ~NFSERR_RETERR;
781 * This call starts the state machine through the initial transmission.
782 * Completion is via the bio. The info structure must have installed
785 * If we are unable to do the initial tx we generate the bio completion
789 nfsm_request_bio(nfsm_info_t info, struct vnode *vp, int procnum,
790 thread_t td, struct ucred *cred)
795 info->state = NFSM_STATE_SETUP;
796 info->procnum = procnum;
800 info->nmp = VFSTONFS(vp->v_mount);
802 error = nfs_request(info, NFSM_STATE_SETUP, NFSM_STATE_WAITREPLY);
803 if (error != EINPROGRESS) {
804 kprintf("nfsm_request_bio: early abort %d\n", error);
805 bp = info->bio->bio_buf;
807 bp->b_flags |= B_ERROR;
814 * Caller is expected to abort if a non-zero error is returned.
817 nfsm_strtom(nfsm_info_t info, const void *data, int len, int maxlen)
826 return(ENAMETOOLONG);
828 n = nfsm_rndup(len) + NFSX_UNSIGNED;
829 if (n <= M_TRAILINGSPACE(info->mb)) {
830 tl = nfsm_build(info, n);
831 *tl++ = txdr_unsigned(len);
832 *(tl + ((n >> 2) - 2)) = 0;
833 bcopy(data, tl, len);
836 error = nfsm_strtmbuf(&info->mb, &info->bpos, data, len);
846 * Caller is expected to abort if a negative value is returned. This
847 * function sets *errorp. Caller should not modify the error code.
850 nfsm_reply(nfsm_info_t info,
851 struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
852 int siz, int *errorp)
854 nfsd->nd_repstat = *errorp;
855 if (*errorp && !(nfsd->nd_flag & ND_NFSV3))
857 nfs_rephead(siz, nfsd, slp, *errorp, &info->mreq,
858 &info->mb, &info->bpos);
859 if (info->mrep != NULL) {
863 if (*errorp && (!(nfsd->nd_flag & ND_NFSV3) || *errorp == EBADRPC)) {
871 nfsm_writereply(nfsm_info_t info,
872 struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
875 nfsd->nd_repstat = error;
876 if (error && !(info->v3))
878 nfs_rephead(siz, nfsd, slp, error, &info->mreq, &info->mb, &info->bpos);
882 * Caller is expected to abort if a non-zero error is returned.
885 nfsm_adv(nfsm_info_t info, int len)
890 n = mtod(info->md, caddr_t) + info->md->m_len - info->dpos;
894 } else if ((error = nfs_adv(&info->md, &info->dpos, len, n)) != 0) {
902 * Caller is expected to abort if a negative length is returned, but also
903 * call nfsm_reply(0) if -2 is returned.
905 * This function sets *errorp. Caller should not modify the error code.
908 nfsm_srvmtofh(nfsm_info_t info, struct nfsrv_descript *nfsd,
909 fhandle_t *fhp, int *errorp)
914 if (nfsd->nd_flag & ND_NFSV3) {
915 tl = nfsm_dissect(info, NFSX_UNSIGNED);
920 fhlen = fxdr_unsigned(int, *tl);
921 if (fhlen != 0 && fhlen != NFSX_V3FH) {
929 tl = nfsm_dissect(info, fhlen);
934 bcopy(tl, fhp, fhlen);
936 bzero(fhp, NFSX_V3FH);
942 _nfsm_clget(nfsm_info_t info, struct mbuf **mp1, struct mbuf **mp2,
943 char **bp, char **be)
946 if (*mp1 == info->mb)
947 (*mp1)->m_len += *bp - info->bpos;
948 *mp1 = m_getcl(MB_WAIT, MT_DATA, 0);
949 (*mp1)->m_len = MCLBYTES;
950 (*mp2)->m_next = *mp1;
952 *bp = mtod(*mp1, caddr_t);
953 *be = *bp + (*mp1)->m_len;
959 nfsm_srvsattr(nfsm_info_t info, struct vattr *vap)
964 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
965 if (*tl == nfs_true) {
966 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
967 vap->va_mode = nfstov_mode(*tl);
969 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
970 if (*tl == nfs_true) {
971 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
972 vap->va_uid = fxdr_unsigned(uid_t, *tl);
974 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
975 if (*tl == nfs_true) {
976 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
977 vap->va_gid = fxdr_unsigned(gid_t, *tl);
979 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
980 if (*tl == nfs_true) {
981 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
982 vap->va_size = fxdr_hyper(tl);
984 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
985 switch (fxdr_unsigned(int, *tl)) {
986 case NFSV3SATTRTIME_TOCLIENT:
987 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
988 fxdr_nfsv3time(tl, &vap->va_atime);
990 case NFSV3SATTRTIME_TOSERVER:
991 getnanotime(&vap->va_atime);
994 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
995 switch (fxdr_unsigned(int, *tl)) {
996 case NFSV3SATTRTIME_TOCLIENT:
997 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
998 fxdr_nfsv3time(tl, &vap->va_mtime);
1000 case NFSV3SATTRTIME_TOSERVER:
1001 getnanotime(&vap->va_mtime);
1009 * copies mbuf chain to the uio scatter/gather list
1012 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos)
1014 char *mbufcp, *uiocp;
1015 int xfer, left, len;
1022 len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
1023 rem = nfsm_rndup(siz)-siz;
1025 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
1027 left = uiop->uio_iov->iov_len;
1028 uiocp = uiop->uio_iov->iov_base;
1037 mbufcp = mtod(mp, caddr_t);
1040 xfer = (left > len) ? len : left;
1043 if (uiop->uio_iov->iov_op != NULL)
1044 (*(uiop->uio_iov->iov_op))
1045 (mbufcp, uiocp, xfer);
1048 if (uiop->uio_segflg == UIO_SYSSPACE)
1049 bcopy(mbufcp, uiocp, xfer);
1051 copyout(mbufcp, uiocp, xfer);
1056 uiop->uio_offset += xfer;
1057 uiop->uio_resid -= xfer;
1059 if (uiop->uio_iov->iov_len <= siz) {
1063 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz;
1064 uiop->uio_iov->iov_len -= uiosiz;
1072 error = nfs_adv(mrep, dpos, rem, len);
1080 * copies mbuf chain to the bio buffer
1083 nfsm_mbuftobio(struct mbuf **mrep, struct bio *bio, int size, caddr_t *dpos)
1085 struct buf *bp = bio->bio_buf;
1096 len = mtod(mp, caddr_t) + mp->m_len - mbufcp;
1097 rem = nfsm_rndup(size) - size;
1099 bio_left = bp->b_bcount;
1100 bio_cp = bp->b_data;
1107 mbufcp = mtod(mp, caddr_t);
1110 if ((xfer = len) > size)
1113 if (xfer > bio_left)
1115 bcopy(mbufcp, bio_cp, xfer);
1118 * Not enough buffer space in the bio.
1132 error = nfs_adv(mrep, dpos, rem, len);
1140 * copies a uio scatter/gather list to an mbuf chain.
1141 * NOTE: can ony handle iovcnt == 1
1144 nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
1147 struct mbuf *mp, *mp2;
1148 int xfer, left, mlen;
1150 boolean_t getcluster;
1154 if (uiop->uio_iovcnt != 1)
1155 panic("nfsm_uiotombuf: iovcnt != 1");
1158 if (siz >= MINCLSIZE)
1162 rem = nfsm_rndup(siz) - siz;
1165 left = uiop->uio_iov->iov_len;
1166 uiocp = uiop->uio_iov->iov_base;
1171 mlen = M_TRAILINGSPACE(mp);
1174 mp = m_getcl(MB_WAIT, MT_DATA, 0);
1176 mp = m_get(MB_WAIT, MT_DATA);
1180 mlen = M_TRAILINGSPACE(mp);
1182 xfer = (left > mlen) ? mlen : left;
1185 if (uiop->uio_iov->iov_op != NULL)
1186 (*(uiop->uio_iov->iov_op))
1187 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1190 if (uiop->uio_segflg == UIO_SYSSPACE)
1191 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1193 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1197 uiop->uio_offset += xfer;
1198 uiop->uio_resid -= xfer;
1200 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz;
1201 uiop->uio_iov->iov_len -= uiosiz;
1205 if (rem > M_TRAILINGSPACE(mp)) {
1206 MGET(mp, MB_WAIT, MT_DATA);
1210 cp = mtod(mp, caddr_t)+mp->m_len;
1211 for (left = 0; left < rem; left++)
1216 *bpos = mtod(mp, caddr_t)+mp->m_len;
1222 * Help break down an mbuf chain by setting the first siz bytes contiguous
1223 * pointed to by returned val.
1224 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough
1225 * cases. (The macros use the vars. dpos and dpos2)
1228 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2)
1230 struct mbuf *mp, *mp2;
1236 *mdp = mp = mp->m_next;
1240 *dposp = mtod(mp, caddr_t);
1245 } else if (mp->m_next == NULL) {
1247 } else if (siz > MHLEN) {
1248 panic("nfs S too big");
1250 MGET(mp2, MB_WAIT, MT_DATA);
1251 mp2->m_next = mp->m_next;
1255 *cp2 = p = mtod(mp, caddr_t);
1256 bcopy(*dposp, p, left); /* Copy what was left */
1260 /* Loop around copying up the siz2 bytes */
1264 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1266 bcopy(mtod(mp2, caddr_t), p, xfer);
1268 mp2->m_data += xfer;
1277 *dposp = mtod(mp2, caddr_t);
1283 * Advance the position in the mbuf chain.
1286 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left)
1301 *dposp = mtod(m, caddr_t)+offs;
1306 * Copy a string into mbufs for the hard cases...
1309 nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
1311 struct mbuf *m1 = NULL, *m2;
1312 long left, xfer, len, tlen;
1318 left = M_TRAILINGSPACE(m2);
1320 tl = ((u_int32_t *)(*bpos));
1321 *tl++ = txdr_unsigned(siz);
1323 left -= NFSX_UNSIGNED;
1324 m2->m_len += NFSX_UNSIGNED;
1326 bcopy(cp, (caddr_t) tl, left);
1333 /* Loop around adding mbufs */
1337 m1 = m_getl(siz, MB_WAIT, MT_DATA, 0, &msize);
1341 tl = mtod(m1, u_int32_t *);
1344 *tl++ = txdr_unsigned(siz);
1345 m1->m_len -= NFSX_UNSIGNED;
1346 tlen = NFSX_UNSIGNED;
1349 if (siz < m1->m_len) {
1350 len = nfsm_rndup(siz);
1353 *(tl+(xfer>>2)) = 0;
1355 xfer = len = m1->m_len;
1357 bcopy(cp, (caddr_t) tl, xfer);
1358 m1->m_len = len+tlen;
1363 *bpos = mtod(m1, caddr_t)+m1->m_len;
1368 * A fiddled version of m_adj() that ensures null fill to a long
1369 * boundary and only trims off the back end
1372 nfsm_adj(struct mbuf *mp, int len, int nul)
1379 * Trim from tail. Scan the mbuf chain,
1380 * calculating its length and finding the last mbuf.
1381 * If the adjustment only affects this mbuf, then just
1382 * adjust and return. Otherwise, rescan and truncate
1383 * after the remaining size.
1389 if (m->m_next == NULL)
1393 if (m->m_len > len) {
1396 cp = mtod(m, caddr_t)+m->m_len-nul;
1397 for (i = 0; i < nul; i++)
1406 * Correct length for chain is "count".
1407 * Find the mbuf with last data, adjust its length,
1408 * and toss data from remaining mbufs on chain.
1410 for (m = mp; m; m = m->m_next) {
1411 if (m->m_len >= count) {
1414 cp = mtod(m, caddr_t)+m->m_len-nul;
1415 for (i = 0; i < nul; i++)
1422 for (m = m->m_next;m;m = m->m_next)
1427 * Make these functions instead of macros, so that the kernel text size
1428 * doesn't get too big...
1431 nfsm_srvwcc_data(nfsm_info_t info, struct nfsrv_descript *nfsd,
1432 int before_ret, struct vattr *before_vap,
1433 int after_ret, struct vattr *after_vap)
1438 * before_ret is 0 if before_vap is valid, non-zero if it isn't.
1441 tl = nfsm_build(info, NFSX_UNSIGNED);
1444 tl = nfsm_build(info, 7 * NFSX_UNSIGNED);
1446 txdr_hyper(before_vap->va_size, tl);
1448 txdr_nfsv3time(&(before_vap->va_mtime), tl);
1450 txdr_nfsv3time(&(before_vap->va_ctime), tl);
1452 nfsm_srvpostop_attr(info, nfsd, after_ret, after_vap);
1456 nfsm_srvpostop_attr(nfsm_info_t info, struct nfsrv_descript *nfsd,
1457 int after_ret, struct vattr *after_vap)
1459 struct nfs_fattr *fp;
1463 tl = nfsm_build(info, NFSX_UNSIGNED);
1466 tl = nfsm_build(info, NFSX_UNSIGNED + NFSX_V3FATTR);
1468 fp = (struct nfs_fattr *)tl;
1469 nfsm_srvfattr(nfsd, after_vap, fp);
1474 nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
1475 struct nfs_fattr *fp)
1478 * NFS seems to truncate nlink to 16 bits, don't let it overflow.
1480 if (vap->va_nlink > 65535)
1481 fp->fa_nlink = 65535;
1483 fp->fa_nlink = txdr_unsigned(vap->va_nlink);
1484 fp->fa_uid = txdr_unsigned(vap->va_uid);
1485 fp->fa_gid = txdr_unsigned(vap->va_gid);
1486 if (nfsd->nd_flag & ND_NFSV3) {
1487 fp->fa_type = vtonfsv3_type(vap->va_type);
1488 fp->fa_mode = vtonfsv3_mode(vap->va_mode);
1489 txdr_hyper(vap->va_size, &fp->fa3_size);
1490 txdr_hyper(vap->va_bytes, &fp->fa3_used);
1491 fp->fa3_rdev.specdata1 = txdr_unsigned(vap->va_rmajor);
1492 fp->fa3_rdev.specdata2 = txdr_unsigned(vap->va_rminor);
1493 fp->fa3_fsid.nfsuquad[0] = 0;
1494 fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
1495 txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
1496 txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
1497 txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
1498 txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
1500 fp->fa_type = vtonfsv2_type(vap->va_type);
1501 fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1502 fp->fa2_size = txdr_unsigned(vap->va_size);
1503 fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
1504 if (vap->va_type == VFIFO)
1505 fp->fa2_rdev = 0xffffffff;
1507 fp->fa2_rdev = txdr_unsigned(makeudev(vap->va_rmajor, vap->va_rminor));
1508 fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
1509 fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
1510 fp->fa2_fileid = txdr_unsigned(vap->va_fileid);
1511 txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
1512 txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
1513 txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);