2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1989, 1993
36 * The Regents of the University of California. All rights reserved.
38 * This code is derived from software contributed to Berkeley by
39 * Rick Macklem at The University of Guelph.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * These functions support the macros and help fiddle mbuf chains for
68 * the nfs op functions. They do things like create the rpc header and
69 * copy data between mbuf chains and uio lists.
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
79 #include <sys/socket.h>
81 #include <sys/malloc.h>
82 #include <sys/sysent.h>
83 #include <sys/syscall.h>
85 #include <sys/objcache.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_extern.h>
99 #include "nfsm_subs.h"
102 #include <netinet/in.h>
104 static u_int32_t nfs_xid = 0;
107 * Create the header for an rpc request packet
108 * The hsiz is the size of the rest of the nfs request header.
109 * (just used to decide if a cluster is a good idea)
112 nfsm_reqhead(nfsm_info_t info, struct vnode *vp, u_long procid, int hsiz)
114 info->mb = m_getl(hsiz, M_WAITOK, MT_DATA, 0, NULL);
116 info->mreq = info->mb;
117 info->bpos = mtod(info->mb, caddr_t);
121 * Build the RPC header and fill in the authorization info.
122 * The authorization string argument is only used when the credentials
123 * come from outside of the kernel.
124 * Returns the head of the mbuf list.
127 nfsm_rpchead(struct ucred *cr, int nmflag, int procid, int auth_type,
128 int auth_len, char *auth_str, int verf_len, char *verf_str,
129 struct mbuf *mrest, int mrest_len, struct mbuf **mbp,
132 struct nfsm_info info;
136 int siz, grpsiz, authsiz, dsiz;
139 authsiz = nfsm_rndup(auth_len);
140 dsiz = authsiz + 10 * NFSX_UNSIGNED;
141 info.mb = m_getl(dsiz, M_WAITOK, MT_DATA, M_PKTHDR, NULL);
142 if (dsiz < MINCLSIZE) {
144 MH_ALIGN(info.mb, dsiz);
146 MH_ALIGN(info.mb, 8 * NFSX_UNSIGNED);
148 info.mb->m_len = info.mb->m_pkthdr.len = 0;
150 info.bpos = mtod(info.mb, caddr_t);
153 * First the RPC header.
155 tl = nfsm_build(&info, 8 * NFSX_UNSIGNED);
157 /* Get a pretty random xid to start with */
162 xid = atomic_fetchadd_int(&nfs_xid, 1);
165 *tl++ = *xidp = txdr_unsigned(xid);
168 *tl++ = txdr_unsigned(NFS_PROG);
169 if (nmflag & NFSMNT_NFSV3)
170 *tl++ = txdr_unsigned(NFS_VER3);
172 *tl++ = txdr_unsigned(NFS_VER2);
173 if (nmflag & NFSMNT_NFSV3)
174 *tl++ = txdr_unsigned(procid);
176 *tl++ = txdr_unsigned(nfsv2_procid[procid]);
179 * And then the authorization cred.
181 *tl++ = txdr_unsigned(auth_type);
182 *tl = txdr_unsigned(authsiz);
185 tl = nfsm_build(&info, auth_len);
186 *tl++ = 0; /* stamp ?? */
187 *tl++ = 0; /* NULL hostname */
188 *tl++ = txdr_unsigned(cr->cr_uid);
189 *tl++ = txdr_unsigned(cr->cr_groups[0]);
190 grpsiz = (auth_len >> 2) - 5;
191 *tl++ = txdr_unsigned(grpsiz);
192 for (i = 1; i <= grpsiz; i++)
193 *tl++ = txdr_unsigned(cr->cr_groups[i]);
198 if (M_TRAILINGSPACE(info.mb) == 0) {
199 mb2 = m_getl(siz, M_WAITOK, MT_DATA, 0, NULL);
201 info.mb->m_next = mb2;
203 info.bpos = mtod(info.mb, caddr_t);
205 i = min(siz, M_TRAILINGSPACE(info.mb));
206 bcopy(auth_str, info.bpos, i);
212 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) {
213 for (i = 0; i < siz; i++)
215 info.mb->m_len += siz;
221 * And the verifier...
223 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
225 *tl++ = txdr_unsigned(RPCAUTH_KERB4);
226 *tl = txdr_unsigned(verf_len);
229 if (M_TRAILINGSPACE(info.mb) == 0) {
230 mb2 = m_getl(siz, M_WAITOK, MT_DATA,
233 info.mb->m_next = mb2;
235 info.bpos = mtod(info.mb, caddr_t);
237 i = min(siz, M_TRAILINGSPACE(info.mb));
238 bcopy(verf_str, info.bpos, i);
244 if ((siz = (nfsm_rndup(verf_len) - verf_len)) > 0) {
245 for (i = 0; i < siz; i++)
247 info.mb->m_len += siz;
250 *tl++ = txdr_unsigned(RPCAUTH_NULL);
253 info.mb->m_next = mrest;
254 info.mreq->m_pkthdr.len = authsiz + 10 * NFSX_UNSIGNED + mrest_len;
255 info.mreq->m_pkthdr.rcvif = NULL;
261 nfsm_build(nfsm_info_t info, int bytes)
266 if (bytes > M_TRAILINGSPACE(info->mb)) {
267 MGET(mb2, M_WAITOK, MT_DATA);
269 panic("build > MLEN");
270 info->mb->m_next = mb2;
273 info->bpos = mtod(info->mb, caddr_t);
276 info->mb->m_len += bytes;
283 * If NULL returned caller is expected to abort with an EBADRPC error.
284 * Caller will usually use the NULLOUT macro.
287 nfsm_dissect(nfsm_info_t info, int bytes)
295 * Check for missing reply packet. This typically occurs if there
296 * is a soft termination w/too many retries.
298 if (info->md == NULL) {
307 * Otherwise any error will be due to the packet format
309 n = mtod(info->md, caddr_t) + info->md->m_len - info->dpos;
314 error = nfsm_disct(&info->md, &info->dpos, bytes, n, &cp2);
328 * Caller is expected to abort if non-zero error is returned.
331 nfsm_fhtom(nfsm_info_t info, struct vnode *vp)
339 n = nfsm_rndup(VTONFS(vp)->n_fhsize) + NFSX_UNSIGNED;
340 if (n <= M_TRAILINGSPACE(info->mb)) {
341 tl = nfsm_build(info, n);
342 *tl++ = txdr_unsigned(VTONFS(vp)->n_fhsize);
343 *(tl + ((n >> 2) - 2)) = 0;
344 bcopy((caddr_t)VTONFS(vp)->n_fhp,(caddr_t)tl,
345 VTONFS(vp)->n_fhsize);
347 } else if ((error = nfsm_strtmbuf(&info->mb, &info->bpos,
348 (caddr_t)VTONFS(vp)->n_fhp,
349 VTONFS(vp)->n_fhsize)) != 0) {
354 cp = nfsm_build(info, NFSX_V2FH);
355 bcopy(VTONFS(vp)->n_fhp, cp, NFSX_V2FH);
362 nfsm_srvfhtom(nfsm_info_t info, fhandle_t *fhp)
367 tl = nfsm_build(info, NFSX_UNSIGNED + NFSX_V3FH);
368 *tl++ = txdr_unsigned(NFSX_V3FH);
369 bcopy(fhp, tl, NFSX_V3FH);
371 tl = nfsm_build(info, NFSX_V2FH);
372 bcopy(fhp, tl, NFSX_V2FH);
377 nfsm_srvpostop_fh(nfsm_info_t info, fhandle_t *fhp)
381 tl = nfsm_build(info, 2 * NFSX_UNSIGNED + NFSX_V3FH);
383 *tl++ = txdr_unsigned(NFSX_V3FH);
384 bcopy(fhp, tl, NFSX_V3FH);
388 * Caller is expected to abort if non-zero error is returned.
390 * NOTE: (*vpp) may be loaded with a valid vnode even if (*gotvpp)
391 * winds up 0. The caller is responsible for dealing with (*vpp).
394 nfsm_mtofh(nfsm_info_t info, struct vnode *dvp, struct vnode **vpp, int *gotvpp)
396 struct nfsnode *ttnp;
403 tl = nfsm_dissect(info, NFSX_UNSIGNED);
406 *gotvpp = fxdr_unsigned(int, *tl);
411 NEGATIVEOUT(ttfhsize = nfsm_getfh(info, &ttfhp));
412 error = nfs_nget(dvp->v_mount, ttfhp, ttfhsize, &ttnp, NULL);
421 tl = nfsm_dissect(info, NFSX_UNSIGNED);
425 *gotvpp = fxdr_unsigned(int, *tl);
426 } else if (fxdr_unsigned(int, *tl)) {
427 error = nfsm_adv(info, NFSX_V3FATTR);
433 error = nfsm_loadattr(info, *vpp, NULL);
440 * Caller is expected to abort with EBADRPC if a negative length is returned.
443 nfsm_getfh(nfsm_info_t info, nfsfh_t **fhpp)
450 tl = nfsm_dissect(info, NFSX_UNSIGNED);
453 if ((n = fxdr_unsigned(int, *tl)) <= 0 || n > NFSX_V3FHMAX) {
461 *fhpp = nfsm_dissect(info, nfsm_rndup(n));
468 * Caller is expected to abort if a non-zero error is returned.
471 nfsm_loadattr(nfsm_info_t info, struct vnode *vp, struct vattr *vap)
475 error = nfs_loadattrcache(vp, &info->md, &info->dpos, vap, 0);
485 * Caller is expected to abort if a non-zero error is returned.
488 nfsm_postop_attr(nfsm_info_t info, struct vnode *vp, int *attrp, int lflags)
493 tl = nfsm_dissect(info, NFSX_UNSIGNED);
496 *attrp = fxdr_unsigned(int, *tl);
498 error = nfs_loadattrcache(vp, &info->md, &info->dpos,
511 * Caller is expected to abort if a non-zero error is returned.
514 nfsm_wcc_data(nfsm_info_t info, struct vnode *vp, int *attrp)
521 tl = nfsm_dissect(info, NFSX_UNSIGNED);
524 if (*tl == nfs_true) {
525 tl = nfsm_dissect(info, 6 * NFSX_UNSIGNED);
529 ttretf = (VTONFS(vp)->n_mtime ==
530 fxdr_unsigned(u_int32_t, *(tl + 2)));
532 VTONFS(vp)->n_flag |= NRMODIFIED;
534 error = nfsm_postop_attr(info, vp, &ttattrf,
535 NFS_LATTR_NOSHRINK|NFS_LATTR_NOMTIMECHECK);
539 error = nfsm_postop_attr(info, vp, &ttattrf,
552 * This function updates the attribute cache based on data returned in the
553 * NFS reply for NFS RPCs that modify the target file. If the RPC succeeds
554 * a 'before' and 'after' mtime is returned that allows us to determine if
555 * the new mtime attribute represents our modification or someone else's
558 * The flag argument returns non-0 if the original times matched, zero if
559 * they did not match. NRMODIFIED is automatically set if the before time
560 * does not match the original n_mtime, and n_mtime is automatically updated
561 * to the new after time (by nfsm_postop_attr()).
563 * If full is true, set all fields, otherwise just set mode and time fields
566 nfsm_v3attrbuild(nfsm_info_t info, struct vattr *vap, int full)
570 if (vap->va_mode != (mode_t)VNOVAL) {
571 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
573 *tl = txdr_unsigned(vap->va_mode);
575 tl = nfsm_build(info, NFSX_UNSIGNED);
578 if (full && vap->va_uid != (uid_t)VNOVAL) {
579 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
581 *tl = txdr_unsigned(vap->va_uid);
583 tl = nfsm_build(info, NFSX_UNSIGNED);
586 if (full && vap->va_gid != (gid_t)VNOVAL) {
587 tl = nfsm_build(info, 2 * NFSX_UNSIGNED);
589 *tl = txdr_unsigned(vap->va_gid);
591 tl = nfsm_build(info, NFSX_UNSIGNED);
594 if (full && vap->va_size != VNOVAL) {
595 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
597 txdr_hyper(vap->va_size, tl);
599 tl = nfsm_build(info, NFSX_UNSIGNED);
602 if (vap->va_atime.tv_sec != VNOVAL) {
603 if (vap->va_atime.tv_sec != time_second) {
604 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
605 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
606 txdr_nfsv3time(&vap->va_atime, tl);
608 tl = nfsm_build(info, NFSX_UNSIGNED);
609 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
612 tl = nfsm_build(info, NFSX_UNSIGNED);
613 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
615 if (vap->va_mtime.tv_sec != VNOVAL) {
616 if (vap->va_mtime.tv_sec != time_second) {
617 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
618 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
619 txdr_nfsv3time(&vap->va_mtime, tl);
621 tl = nfsm_build(info, NFSX_UNSIGNED);
622 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
625 tl = nfsm_build(info, NFSX_UNSIGNED);
626 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
631 * Caller is expected to abort with EBADRPC if a negative length is returned.
634 nfsm_strsiz(nfsm_info_t info, int maxlen)
639 tl = nfsm_dissect(info, NFSX_UNSIGNED);
642 len = fxdr_unsigned(int32_t, *tl);
643 if (len < 0 || len > maxlen)
649 * Caller is expected to abort if a negative length is returned, but also
650 * call nfsm_reply(0) if -2 is returned.
652 * This function sets *errorp. Caller should not modify the error code.
655 nfsm_srvstrsiz(nfsm_info_t info, int maxlen, int *errorp)
660 tl = nfsm_dissect(info, NFSX_UNSIGNED);
665 len = fxdr_unsigned(int32_t,*tl);
666 if (len > maxlen || len <= 0) {
674 * Caller is expected to abort if a negative length is returned, but also
675 * call nfsm_reply(0) if -2 is returned.
677 * This function sets *errorp. Caller should not modify the error code.
680 nfsm_srvnamesiz(nfsm_info_t info, int *errorp)
685 tl = nfsm_dissect(info, NFSX_UNSIGNED);
692 * In this case if *errorp is not EBADRPC and we are NFSv3,
693 * nfsm_reply() will not return a negative number. But all
694 * call cases assume len is valid so we really do want
697 len = fxdr_unsigned(int32_t,*tl);
698 if (len > NFS_MAXNAMLEN)
699 *errorp = NFSERR_NAMETOL;
708 * Caller is expected to abort if a non-zero error is returned.
711 nfsm_mtouio(nfsm_info_t info, struct uio *uiop, int len)
716 (error = nfsm_mbuftouio(&info->md, uiop, len, &info->dpos)) != 0) {
725 * Caller is expected to abort if a non-zero error is returned.
728 nfsm_mtobio(nfsm_info_t info, struct bio *bio, int len)
733 (error = nfsm_mbuftobio(&info->md, bio, len, &info->dpos)) != 0) {
742 * Caller is expected to abort if a non-zero error is returned.
745 nfsm_uiotom(nfsm_info_t info, struct uio *uiop, int len)
749 error = nfsm_uiotombuf(uiop, &info->mb, len, &info->bpos);
759 nfsm_biotom(nfsm_info_t info, struct bio *bio, int off, int len)
763 error = nfsm_biotombuf(bio, &info->mb, off, len, &info->bpos);
773 * Caller is expected to abort if a negative value is returned. This
774 * function sets *errorp. Caller should not modify the error code.
776 * We load up the remaining info fields and run the request state
777 * machine until it is done.
779 * This call runs the entire state machine and does not return until
780 * the command is complete.
783 nfsm_request(nfsm_info_t info, struct vnode *vp, int procnum,
784 thread_t td, struct ucred *cred, int *errorp)
786 info->state = NFSM_STATE_SETUP;
787 info->procnum = procnum;
792 info->nmp = VFSTONFS(vp->v_mount);
794 *errorp = nfs_request(info, NFSM_STATE_SETUP, NFSM_STATE_DONE);
796 if ((*errorp & NFSERR_RETERR) == 0)
798 *errorp &= ~NFSERR_RETERR;
804 * This call starts the state machine through the initial transmission.
805 * Completion is via the bio. The info structure must have installed
808 * If we are unable to do the initial tx we generate the bio completion
812 nfsm_request_bio(nfsm_info_t info, struct vnode *vp, int procnum,
813 thread_t td, struct ucred *cred)
818 info->state = NFSM_STATE_SETUP;
819 info->procnum = procnum;
823 info->nmp = VFSTONFS(vp->v_mount);
825 error = nfs_request(info, NFSM_STATE_SETUP, NFSM_STATE_WAITREPLY);
826 if (error != EINPROGRESS) {
827 kprintf("nfsm_request_bio: early abort %d\n", error);
828 bp = info->bio->bio_buf;
830 bp->b_flags |= B_ERROR;
831 if (error == EIO) /* unrecoverable */
832 bp->b_flags |= B_INVAL;
840 * Caller is expected to abort if a non-zero error is returned.
843 nfsm_strtom(nfsm_info_t info, const void *data, int len, int maxlen)
852 return(ENAMETOOLONG);
854 n = nfsm_rndup(len) + NFSX_UNSIGNED;
855 if (n <= M_TRAILINGSPACE(info->mb)) {
856 tl = nfsm_build(info, n);
857 *tl++ = txdr_unsigned(len);
858 *(tl + ((n >> 2) - 2)) = 0;
859 bcopy(data, tl, len);
862 error = nfsm_strtmbuf(&info->mb, &info->bpos, data, len);
872 * Caller is expected to abort if a negative value is returned. This
873 * function sets *errorp. Caller should not modify the error code.
876 nfsm_reply(nfsm_info_t info,
877 struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
878 int siz, int *errorp)
880 nfsd->nd_repstat = *errorp;
881 if (*errorp && !(nfsd->nd_flag & ND_NFSV3))
883 nfs_rephead(siz, nfsd, slp, *errorp, &info->mreq,
884 &info->mb, &info->bpos);
885 if (info->mrep != NULL) {
889 if (*errorp && (!(nfsd->nd_flag & ND_NFSV3) || *errorp == EBADRPC)) {
897 nfsm_writereply(nfsm_info_t info,
898 struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
901 nfsd->nd_repstat = error;
902 if (error && !(info->v3))
904 nfs_rephead(siz, nfsd, slp, error, &info->mreq, &info->mb, &info->bpos);
908 * Caller is expected to abort if a non-zero error is returned.
911 nfsm_adv(nfsm_info_t info, int len)
916 n = mtod(info->md, caddr_t) + info->md->m_len - info->dpos;
920 } else if ((error = nfs_adv(&info->md, &info->dpos, len, n)) != 0) {
928 * Caller is expected to abort if a negative length is returned, but also
929 * call nfsm_reply(0) if -2 is returned.
931 * This function sets *errorp. Caller should not modify the error code.
934 nfsm_srvmtofh(nfsm_info_t info, struct nfsrv_descript *nfsd,
935 fhandle_t *fhp, int *errorp)
940 if (nfsd->nd_flag & ND_NFSV3) {
941 tl = nfsm_dissect(info, NFSX_UNSIGNED);
946 fhlen = fxdr_unsigned(int, *tl);
947 if (fhlen != 0 && fhlen != NFSX_V3FH) {
955 tl = nfsm_dissect(info, fhlen);
960 bcopy(tl, fhp, fhlen);
962 bzero(fhp, NFSX_V3FH);
968 _nfsm_clget(nfsm_info_t info, struct mbuf **mp1, struct mbuf **mp2,
969 char **bp, char **be)
972 if (*mp1 == info->mb)
973 (*mp1)->m_len += *bp - info->bpos;
974 *mp1 = m_getcl(M_WAITOK, MT_DATA, 0);
975 (*mp1)->m_len = MCLBYTES;
976 (*mp2)->m_next = *mp1;
978 *bp = mtod(*mp1, caddr_t);
979 *be = *bp + (*mp1)->m_len;
985 nfsm_srvsattr(nfsm_info_t info, struct vattr *vap)
990 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
991 if (*tl == nfs_true) {
992 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
993 vap->va_mode = nfstov_mode(*tl);
995 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
996 if (*tl == nfs_true) {
997 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
998 vap->va_uid = fxdr_unsigned(uid_t, *tl);
1000 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1001 if (*tl == nfs_true) {
1002 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1003 vap->va_gid = fxdr_unsigned(gid_t, *tl);
1005 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1006 if (*tl == nfs_true) {
1007 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1008 vap->va_size = fxdr_hyper(tl);
1010 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1011 switch (fxdr_unsigned(int, *tl)) {
1012 case NFSV3SATTRTIME_TOCLIENT:
1013 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1014 fxdr_nfsv3time(tl, &vap->va_atime);
1016 case NFSV3SATTRTIME_TOSERVER:
1017 getnanotime(&vap->va_atime);
1020 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1021 switch (fxdr_unsigned(int, *tl)) {
1022 case NFSV3SATTRTIME_TOCLIENT:
1023 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1024 fxdr_nfsv3time(tl, &vap->va_mtime);
1026 case NFSV3SATTRTIME_TOSERVER:
1027 getnanotime(&vap->va_mtime);
1035 * copies mbuf chain to the uio scatter/gather list
1038 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos)
1040 char *mbufcp, *uiocp;
1041 int xfer, left, len;
1048 len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
1049 rem = nfsm_rndup(siz)-siz;
1051 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
1053 left = uiop->uio_iov->iov_len;
1054 uiocp = uiop->uio_iov->iov_base;
1063 mbufcp = mtod(mp, caddr_t);
1066 xfer = (left > len) ? len : left;
1069 if (uiop->uio_iov->iov_op != NULL)
1070 (*(uiop->uio_iov->iov_op))
1071 (mbufcp, uiocp, xfer);
1074 if (uiop->uio_segflg == UIO_SYSSPACE)
1075 bcopy(mbufcp, uiocp, xfer);
1077 copyout(mbufcp, uiocp, xfer);
1082 uiop->uio_offset += xfer;
1083 uiop->uio_resid -= xfer;
1085 if (uiop->uio_iov->iov_len <= siz) {
1089 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz;
1090 uiop->uio_iov->iov_len -= uiosiz;
1098 error = nfs_adv(mrep, dpos, rem, len);
1106 * copies mbuf chain to the bio buffer
1109 nfsm_mbuftobio(struct mbuf **mrep, struct bio *bio, int size, caddr_t *dpos)
1111 struct buf *bp = bio->bio_buf;
1122 len = mtod(mp, caddr_t) + mp->m_len - mbufcp;
1123 rem = nfsm_rndup(size) - size;
1125 bio_left = bp->b_bcount;
1126 bio_cp = bp->b_data;
1133 mbufcp = mtod(mp, caddr_t);
1136 if ((xfer = len) > size)
1139 if (xfer > bio_left)
1141 bcopy(mbufcp, bio_cp, xfer);
1144 * Not enough buffer space in the bio.
1158 error = nfs_adv(mrep, dpos, rem, len);
1166 * copies a uio scatter/gather list to an mbuf chain.
1167 * NOTE: can ony handle iovcnt == 1
1170 nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
1173 struct mbuf *mp, *mp2;
1174 int xfer, left, mlen;
1176 boolean_t getcluster;
1180 if (uiop->uio_iovcnt != 1)
1181 panic("nfsm_uiotombuf: iovcnt != 1");
1184 if (siz >= MINCLSIZE)
1188 rem = nfsm_rndup(siz) - siz;
1191 left = uiop->uio_iov->iov_len;
1192 uiocp = uiop->uio_iov->iov_base;
1197 mlen = M_TRAILINGSPACE(mp);
1200 mp = m_getcl(M_WAITOK, MT_DATA, 0);
1202 mp = m_get(M_WAITOK, MT_DATA);
1206 mlen = M_TRAILINGSPACE(mp);
1208 xfer = (left > mlen) ? mlen : left;
1211 if (uiop->uio_iov->iov_op != NULL)
1212 (*(uiop->uio_iov->iov_op))
1213 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1216 if (uiop->uio_segflg == UIO_SYSSPACE)
1217 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1219 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
1223 uiop->uio_offset += xfer;
1224 uiop->uio_resid -= xfer;
1226 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz;
1227 uiop->uio_iov->iov_len -= uiosiz;
1231 if (rem > M_TRAILINGSPACE(mp)) {
1232 MGET(mp, M_WAITOK, MT_DATA);
1236 cp = mtod(mp, caddr_t)+mp->m_len;
1237 for (left = 0; left < rem; left++)
1242 *bpos = mtod(mp, caddr_t)+mp->m_len;
1248 nfsm_biotombuf(struct bio *bio, struct mbuf **mq, int off,
1249 int siz, caddr_t *bpos)
1251 struct buf *bp = bio->bio_buf;
1252 struct mbuf *mp, *mp2;
1257 boolean_t getcluster;
1260 if (siz >= MINCLSIZE)
1264 rem = nfsm_rndup(siz) - siz;
1267 bio_cp = bp->b_data + off;
1271 mlen = M_TRAILINGSPACE(mp);
1274 mp = m_getcl(M_WAITOK, MT_DATA, 0);
1276 mp = m_get(M_WAITOK, MT_DATA);
1280 mlen = M_TRAILINGSPACE(mp);
1282 xfer = (bio_left < mlen) ? bio_left : mlen;
1283 bcopy(bio_cp, mtod(mp, caddr_t) + mp->m_len, xfer);
1289 if (rem > M_TRAILINGSPACE(mp)) {
1290 MGET(mp, M_WAITOK, MT_DATA);
1294 cp = mtod(mp, caddr_t) + mp->m_len;
1295 for (mlen = 0; mlen < rem; mlen++)
1300 *bpos = mtod(mp, caddr_t) + mp->m_len;
1307 * Help break down an mbuf chain by setting the first siz bytes contiguous
1308 * pointed to by returned val.
1309 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough
1310 * cases. (The macros use the vars. dpos and dpos2)
1313 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2)
1315 struct mbuf *mp, *mp2;
1321 *mdp = mp = mp->m_next;
1325 *dposp = mtod(mp, caddr_t);
1330 } else if (mp->m_next == NULL) {
1332 } else if (siz > MHLEN) {
1333 panic("nfs S too big");
1335 MGET(mp2, M_WAITOK, MT_DATA);
1336 mp2->m_next = mp->m_next;
1340 *cp2 = p = mtod(mp, caddr_t);
1341 bcopy(*dposp, p, left); /* Copy what was left */
1345 /* Loop around copying up the siz2 bytes */
1349 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1351 bcopy(mtod(mp2, caddr_t), p, xfer);
1353 mp2->m_data += xfer;
1362 *dposp = mtod(mp2, caddr_t);
1368 * Advance the position in the mbuf chain.
1371 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left)
1386 *dposp = mtod(m, caddr_t)+offs;
1391 * Copy a string into mbufs for the hard cases...
1394 nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
1396 struct mbuf *m1 = NULL, *m2;
1397 long left, xfer, len, tlen;
1403 left = M_TRAILINGSPACE(m2);
1405 tl = ((u_int32_t *)(*bpos));
1406 *tl++ = txdr_unsigned(siz);
1408 left -= NFSX_UNSIGNED;
1409 m2->m_len += NFSX_UNSIGNED;
1411 bcopy(cp, (caddr_t) tl, left);
1418 /* Loop around adding mbufs */
1422 m1 = m_getl(siz, M_WAITOK, MT_DATA, 0, &msize);
1426 tl = mtod(m1, u_int32_t *);
1429 *tl++ = txdr_unsigned(siz);
1430 m1->m_len -= NFSX_UNSIGNED;
1431 tlen = NFSX_UNSIGNED;
1434 if (siz < m1->m_len) {
1435 len = nfsm_rndup(siz);
1438 *(tl+(xfer>>2)) = 0;
1440 xfer = len = m1->m_len;
1442 bcopy(cp, (caddr_t) tl, xfer);
1443 m1->m_len = len+tlen;
1448 *bpos = mtod(m1, caddr_t)+m1->m_len;
1453 * A fiddled version of m_adj() that ensures null fill to a long
1454 * boundary and only trims off the back end
1457 nfsm_adj(struct mbuf *mp, int len, int nul)
1464 * Trim from tail. Scan the mbuf chain,
1465 * calculating its length and finding the last mbuf.
1466 * If the adjustment only affects this mbuf, then just
1467 * adjust and return. Otherwise, rescan and truncate
1468 * after the remaining size.
1474 if (m->m_next == NULL)
1478 if (m->m_len > len) {
1481 cp = mtod(m, caddr_t)+m->m_len-nul;
1482 for (i = 0; i < nul; i++)
1491 * Correct length for chain is "count".
1492 * Find the mbuf with last data, adjust its length,
1493 * and toss data from remaining mbufs on chain.
1495 for (m = mp; m; m = m->m_next) {
1496 if (m->m_len >= count) {
1499 cp = mtod(m, caddr_t)+m->m_len-nul;
1500 for (i = 0; i < nul; i++)
1507 for (m = m->m_next;m;m = m->m_next)
1512 * Make these functions instead of macros, so that the kernel text size
1513 * doesn't get too big...
1516 nfsm_srvwcc_data(nfsm_info_t info, struct nfsrv_descript *nfsd,
1517 int before_ret, struct vattr *before_vap,
1518 int after_ret, struct vattr *after_vap)
1523 * before_ret is 0 if before_vap is valid, non-zero if it isn't.
1526 tl = nfsm_build(info, NFSX_UNSIGNED);
1529 tl = nfsm_build(info, 7 * NFSX_UNSIGNED);
1531 txdr_hyper(before_vap->va_size, tl);
1533 txdr_nfsv3time(&(before_vap->va_mtime), tl);
1535 txdr_nfsv3time(&(before_vap->va_ctime), tl);
1537 nfsm_srvpostop_attr(info, nfsd, after_ret, after_vap);
1541 nfsm_srvpostop_attr(nfsm_info_t info, struct nfsrv_descript *nfsd,
1542 int after_ret, struct vattr *after_vap)
1544 struct nfs_fattr *fp;
1548 tl = nfsm_build(info, NFSX_UNSIGNED);
1551 tl = nfsm_build(info, NFSX_UNSIGNED + NFSX_V3FATTR);
1553 fp = (struct nfs_fattr *)tl;
1554 nfsm_srvfattr(nfsd, after_vap, fp);
1559 nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
1560 struct nfs_fattr *fp)
1563 * NFS seems to truncate nlink to 16 bits, don't let it overflow.
1565 if (vap->va_nlink > 65535)
1566 fp->fa_nlink = 65535;
1568 fp->fa_nlink = txdr_unsigned(vap->va_nlink);
1569 fp->fa_uid = txdr_unsigned(vap->va_uid);
1570 fp->fa_gid = txdr_unsigned(vap->va_gid);
1571 if (nfsd->nd_flag & ND_NFSV3) {
1572 fp->fa_type = vtonfsv3_type(vap->va_type);
1573 fp->fa_mode = vtonfsv3_mode(vap->va_mode);
1574 txdr_hyper(vap->va_size, &fp->fa3_size);
1575 txdr_hyper(vap->va_bytes, &fp->fa3_used);
1576 fp->fa3_rdev.specdata1 = txdr_unsigned(vap->va_rmajor);
1577 fp->fa3_rdev.specdata2 = txdr_unsigned(vap->va_rminor);
1578 fp->fa3_fsid.nfsuquad[0] = 0;
1579 fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
1580 txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
1581 txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
1582 txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
1583 txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
1585 fp->fa_type = vtonfsv2_type(vap->va_type);
1586 fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1587 fp->fa2_size = txdr_unsigned(vap->va_size);
1588 fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
1589 if (vap->va_type == VFIFO)
1590 fp->fa2_rdev = 0xffffffff;
1592 fp->fa2_rdev = txdr_unsigned(makeudev(vap->va_rmajor, vap->va_rminor));
1593 fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
1594 fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
1595 fp->fa2_fileid = txdr_unsigned(vap->va_fileid);
1596 txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
1597 txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
1598 txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);